1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
|
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)file.h 8.3 (Berkeley) 1/9/95
* $FreeBSD$
*/
#ifndef _SYS_FILE_H_
#define _SYS_FILE_H_
#ifndef _KERNEL
#include <sys/types.h> /* XXX */
#include <sys/fcntl.h>
#include <rtems/bsd/sys/unistd.h>
#else
#include <sys/queue.h>
#include <sys/refcount.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
#include <vm/vm.h>
struct filedesc;
struct stat;
struct thread;
struct uio;
struct knote;
struct vnode;
#endif /* _KERNEL */
#define DTYPE_NONE 0 /* not yet initialized */
#define DTYPE_VNODE 1 /* file */
#define DTYPE_SOCKET 2 /* communications endpoint */
#define DTYPE_PIPE 3 /* pipe */
#define DTYPE_FIFO 4 /* fifo (named pipe) */
#define DTYPE_KQUEUE 5 /* event queue */
#define DTYPE_CRYPTO 6 /* crypto */
#define DTYPE_MQUEUE 7 /* posix message queue */
#define DTYPE_SHM 8 /* swap-backed shared memory */
#define DTYPE_SEM 9 /* posix semaphore */
#define DTYPE_PTS 10 /* pseudo teletype master device */
#define DTYPE_DEV 11 /* Device specific fd type */
#define DTYPE_PROCDESC 12 /* process descriptor */
#define DTYPE_LINUXEFD 13 /* emulation eventfd type */
#define DTYPE_LINUXTFD 14 /* emulation timerfd type */
#ifdef _KERNEL
struct file;
struct filecaps;
struct kaiocb;
struct kinfo_file;
struct ucred;
#define FOF_OFFSET 0x01 /* Use the offset in uio argument */
#define FOF_NOLOCK 0x02 /* Do not take FOFFSET_LOCK */
#define FOF_NEXTOFF 0x04 /* Also update f_nextoff */
#define FOF_NOUPDATE 0x10 /* Do not update f_offset */
off_t foffset_lock(struct file *fp, int flags);
void foffset_lock_uio(struct file *fp, struct uio *uio, int flags);
void foffset_unlock(struct file *fp, off_t val, int flags);
void foffset_unlock_uio(struct file *fp, struct uio *uio, int flags);
static inline off_t
foffset_get(struct file *fp)
{
return (foffset_lock(fp, FOF_NOLOCK));
}
typedef int fo_rdwr_t(struct file *fp, struct uio *uio,
struct ucred *active_cred, int flags,
struct thread *td);
typedef int fo_truncate_t(struct file *fp, off_t length,
struct ucred *active_cred, struct thread *td);
typedef int fo_ioctl_t(struct file *fp, u_long com, void *data,
struct ucred *active_cred, struct thread *td);
typedef int fo_poll_t(struct file *fp, int events,
struct ucred *active_cred, struct thread *td);
typedef int fo_kqfilter_t(struct file *fp, struct knote *kn);
typedef int fo_stat_t(struct file *fp, struct stat *sb,
struct ucred *active_cred, struct thread *td);
typedef int fo_close_t(struct file *fp, struct thread *td);
typedef int fo_chmod_t(struct file *fp, mode_t mode,
struct ucred *active_cred, struct thread *td);
typedef int fo_chown_t(struct file *fp, uid_t uid, gid_t gid,
struct ucred *active_cred, struct thread *td);
typedef int fo_sendfile_t(struct file *fp, int sockfd, struct uio *hdr_uio,
struct uio *trl_uio, off_t offset, size_t nbytes,
off_t *sent, int flags, struct thread *td);
typedef int fo_seek_t(struct file *fp, off_t offset, int whence,
struct thread *td);
typedef int fo_fill_kinfo_t(struct file *fp, struct kinfo_file *kif,
struct filedesc *fdp);
typedef int fo_mmap_t(struct file *fp, vm_map_t map, vm_offset_t *addr,
vm_size_t size, vm_prot_t prot, vm_prot_t cap_maxprot,
int flags, vm_ooffset_t foff, struct thread *td);
typedef int fo_aio_queue_t(struct file *fp, struct kaiocb *job);
typedef int fo_flags_t;
struct fileops {
fo_rdwr_t *fo_read;
fo_rdwr_t *fo_write;
fo_truncate_t *fo_truncate;
fo_ioctl_t *fo_ioctl;
fo_poll_t *fo_poll;
fo_kqfilter_t *fo_kqfilter;
fo_stat_t *fo_stat;
fo_close_t *fo_close;
fo_chmod_t *fo_chmod;
fo_chown_t *fo_chown;
fo_sendfile_t *fo_sendfile;
fo_seek_t *fo_seek;
fo_fill_kinfo_t *fo_fill_kinfo;
fo_mmap_t *fo_mmap;
fo_aio_queue_t *fo_aio_queue;
fo_flags_t fo_flags; /* DFLAG_* below */
};
#define DFLAG_PASSABLE 0x01 /* may be passed via unix sockets. */
#define DFLAG_SEEKABLE 0x02 /* seekable / nonsequential */
#endif /* _KERNEL */
#if defined(_KERNEL) || defined(_WANT_FILE)
#ifdef __rtems__
#include <rtems/libio_.h>
#include <sys/fcntl.h>
#endif /* __rtems__ */
/*
* Kernel descriptor table.
* One entry for each open kernel vnode and socket.
*
* Below is the list of locks that protects members in struct file.
*
* (a) f_vnode lock required (shared allows both reads and writes)
* (f) protected with mtx_lock(mtx_pool_find(fp))
* (d) cdevpriv_mtx
* none not locked
*/
struct fadvise_info {
int fa_advice; /* (f) FADV_* type. */
off_t fa_start; /* (f) Region start. */
off_t fa_end; /* (f) Region end. */
};
struct file {
#ifndef __rtems__
void *f_data; /* file descriptor specific data */
struct fileops *f_ops; /* File operations */
struct ucred *f_cred; /* associated credentials. */
struct vnode *f_vnode; /* NULL or applicable vnode */
short f_type; /* descriptor type */
short f_vnread_flags; /* (f) Sleep lock for f_offset */
volatile u_int f_flag; /* see fcntl.h */
volatile u_int f_count; /* reference count */
/*
* DTYPE_VNODE specific fields.
*/
union {
int16_t f_seqcount; /* (a) Count of sequential accesses. */
int f_pipegen;
};
off_t f_nextoff; /* next expected read/write offset. */
union {
struct cdev_privdata *fvn_cdevpriv;
/* (d) Private data for the cdev. */
struct fadvise_info *fvn_advice;
} f_vnun;
/*
* DFLAG_SEEKABLE specific fields
*/
off_t f_offset;
/*
* Mandatory Access control information.
*/
void *f_label; /* Place-holder for MAC label. */
#else /* __rtems__ */
rtems_libio_t f_io;
#endif /* __rtems__ */
};
#ifdef __rtems__
#define f_data f_io.pathinfo.node_access_2
#define f_cdevpriv f_io.data1
static inline struct file *
rtems_bsd_iop_to_fp(rtems_libio_t *iop)
{
return (struct file *) iop;
}
static inline struct file *
rtems_bsd_fd_to_fp(int fd)
{
return rtems_bsd_iop_to_fp(&rtems_libio_iops[fd]);
}
static inline int
rtems_bsd_fp_to_fd(struct file *fp)
{
return fp - rtems_bsd_iop_to_fp(&rtems_libio_iops[0]);
}
static inline void *
rtems_bsd_loc_to_f_data(const rtems_filesystem_location_info_t *loc)
{
return loc->node_access_2;
}
static inline uint32_t
rtems_bsd_fflag_to_libio_flags(u_int fflag)
{
uint32_t libio_flags = 0;
if ((fflag & FREAD) == FREAD) {
libio_flags |= LIBIO_FLAGS_READ;
}
if ((fflag & FWRITE) == FWRITE) {
libio_flags |= LIBIO_FLAGS_WRITE;
}
if ((fflag & FNONBLOCK) == FNONBLOCK) {
libio_flags |= LIBIO_FLAGS_NO_DELAY;
}
return (libio_flags);
}
static inline u_int
rtems_bsd_libio_flags_to_fflag(uint32_t libio_flags)
{
u_int fflag = 0;
if ((libio_flags & LIBIO_FLAGS_READ) == LIBIO_FLAGS_READ) {
fflag |= FREAD;
}
if ((libio_flags & LIBIO_FLAGS_WRITE) == LIBIO_FLAGS_WRITE) {
fflag |= FWRITE;
}
if ((libio_flags & LIBIO_FLAGS_NO_DELAY) == LIBIO_FLAGS_NO_DELAY) {
fflag |= FNONBLOCK;
}
return (fflag);
}
static int inline
rtems_bsd_error_to_status_and_errno(int error)
{
if (error == 0) {
return 0;
} else {
rtems_set_errno_and_return_minus_one(error);
}
}
#endif /* __rtems__ */
#ifndef __rtems__
#define f_cdevpriv f_vnun.fvn_cdevpriv
#define f_advice f_vnun.fvn_advice
#endif /* __rtems__ */
#define FOFFSET_LOCKED 0x1
#define FOFFSET_LOCK_WAITING 0x2
#define FDEVFS_VNODE 0x4
#endif /* _KERNEL || _WANT_FILE */
/*
* Userland version of struct file, for sysctl
*/
struct xfile {
ksize_t xf_size; /* size of struct xfile */
pid_t xf_pid; /* owning process */
uid_t xf_uid; /* effective uid of owning process */
int xf_fd; /* descriptor number */
int _xf_int_pad1;
kvaddr_t xf_file; /* address of struct file */
short xf_type; /* descriptor type */
short _xf_short_pad1;
int xf_count; /* reference count */
int xf_msgcount; /* references from message queue */
int _xf_int_pad2;
off_t xf_offset; /* file offset */
kvaddr_t xf_data; /* file descriptor specific data */
kvaddr_t xf_vnode; /* vnode pointer */
u_int xf_flag; /* flags (see fcntl.h) */
int _xf_int_pad3;
int64_t _xf_int64_pad[6];
};
#ifdef _KERNEL
extern struct fileops vnops;
extern struct fileops badfileops;
#ifndef __rtems__
extern struct fileops socketops;
#else /* __rtems__ */
extern const rtems_filesystem_file_handlers_r socketops;
#endif /* __rtems__ */
extern int maxfiles; /* kernel limit on number of open files */
extern int maxfilesperproc; /* per process limit on number of open files */
extern volatile int openfiles; /* actual number of open files */
#ifndef __rtems__
int fget(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp);
#else /* __rtems__ */
struct file *rtems_bsd_get_file(int fd);
static inline int
rtems_bsd_do_fget(int fd, struct file **fpp)
{
struct file *fp;
fp = rtems_bsd_get_file(fd);
*fpp = fp;
return (fp != NULL ? 0 : EBADF);
}
#define fget(td, fd, rights, fpp) rtems_bsd_do_fget(fd, fpp)
#endif /* __rtems__ */
int fget_mmap(struct thread *td, int fd, cap_rights_t *rightsp,
u_char *maxprotp, struct file **fpp);
int fget_read(struct thread *td, int fd, cap_rights_t *rightsp,
struct file **fpp);
int fget_write(struct thread *td, int fd, cap_rights_t *rightsp,
struct file **fpp);
int fget_fcntl(struct thread *td, int fd, cap_rights_t *rightsp,
int needfcntl, struct file **fpp);
int _fdrop(struct file *fp, struct thread *td);
#ifndef __rtems__
fo_rdwr_t invfo_rdwr;
fo_truncate_t invfo_truncate;
fo_ioctl_t invfo_ioctl;
fo_poll_t invfo_poll;
fo_kqfilter_t invfo_kqfilter;
fo_chmod_t invfo_chmod;
fo_chown_t invfo_chown;
fo_sendfile_t invfo_sendfile;
fo_sendfile_t vn_sendfile;
fo_seek_t vn_seek;
fo_fill_kinfo_t vn_fill_kinfo;
int vn_fill_kinfo_vnode(struct vnode *vp, struct kinfo_file *kif);
#else /* __rtems__ */
int rtems_bsd_soo_kqfilter(rtems_libio_t *iop, struct knote *kn);
#endif /* __rtems__ */
#ifndef __rtems__
void finit(struct file *, u_int, short, void *, struct fileops *);
#else /* __rtems__ */
static inline void
finit(struct file *fp, u_int fflag, short type, void *data,
const rtems_filesystem_file_handlers_r *ops)
{
(void)type;
fp->f_data = data;
fp->f_io.pathinfo.handlers = ops;
rtems_libio_iop_flags_set(&fp->f_io, LIBIO_FLAGS_OPEN |
rtems_bsd_fflag_to_libio_flags(fflag));
}
#endif /* __rtems__ */
int fgetvp(struct thread *td, int fd, cap_rights_t *rightsp,
struct vnode **vpp);
int fgetvp_exec(struct thread *td, int fd, cap_rights_t *rightsp,
struct vnode **vpp);
int fgetvp_rights(struct thread *td, int fd, cap_rights_t *needrightsp,
struct filecaps *havecaps, struct vnode **vpp);
int fgetvp_read(struct thread *td, int fd, cap_rights_t *rightsp,
struct vnode **vpp);
int fgetvp_write(struct thread *td, int fd, cap_rights_t *rightsp,
struct vnode **vpp);
static __inline int
_fnoop(void)
{
return (0);
}
#ifndef __rtems__
static __inline __result_use_check bool
fhold(struct file *fp)
{
return (refcount_acquire_checked(&fp->f_count));
}
#endif /* __rtems__ */
#ifndef __rtems__
#define fdrop(fp, td) \
(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td)) : _fnoop())
#else /* __rtems__ */
static inline void
rtems_bsd_fdrop(struct file *fp)
{
rtems_libio_iop_drop(&fp->f_io);
}
/*
* WARNING: fdalloc() and falloc_caps() do not increment the reference count of
* the file descriptor in contrast to FreeBSD. We must not call the fdrop()
* corresponding to a fdalloc() or falloc_caps(). The reason for this is that
* FreeBSD performs a lazy cleanup once the reference count reaches zero.
* RTEMS uses the reference count to determine if a cleanup is allowed.
*/
#define fdrop(fp, td) rtems_bsd_fdrop(fp)
#endif /* __rtems__ */
#ifndef __rtems__
static __inline fo_rdwr_t fo_read;
static __inline fo_rdwr_t fo_write;
static __inline fo_truncate_t fo_truncate;
static __inline fo_ioctl_t fo_ioctl;
static __inline fo_poll_t fo_poll;
static __inline fo_kqfilter_t fo_kqfilter;
static __inline fo_stat_t fo_stat;
static __inline fo_close_t fo_close;
static __inline fo_chmod_t fo_chmod;
static __inline fo_chown_t fo_chown;
static __inline fo_sendfile_t fo_sendfile;
static __inline int
fo_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
int flags, struct thread *td)
{
return ((*fp->f_ops->fo_read)(fp, uio, active_cred, flags, td));
}
static __inline int
fo_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
int flags, struct thread *td)
{
return ((*fp->f_ops->fo_write)(fp, uio, active_cred, flags, td));
}
static __inline int
fo_truncate(struct file *fp, off_t length, struct ucred *active_cred,
struct thread *td)
{
return ((*fp->f_ops->fo_truncate)(fp, length, active_cred, td));
}
#endif /* __rtems__ */
static __inline int
fo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
struct thread *td)
{
#ifndef __rtems__
return ((*fp->f_ops->fo_ioctl)(fp, com, data, active_cred, td));
#else /* __rtems__ */
int rv;
(void) active_cred;
(void) td;
errno = 0;
rv = ((*fp->f_io.pathinfo.handlers->ioctl_h)(&fp->f_io, com, data));
if (rv == 0) {
return (0);
} else {
return (errno);
}
#endif /* __rtems__ */
}
static __inline int
fo_poll(struct file *fp, int events, struct ucred *active_cred,
struct thread *td)
{
#ifndef __rtems__
return ((*fp->f_ops->fo_poll)(fp, events, active_cred, td));
#else /* __rtems__ */
(void) active_cred;
(void) td;
return ((*fp->f_io.pathinfo.handlers->poll_h)(&fp->f_io, events));
#endif /* __rtems__ */
}
#ifndef __rtems__
static __inline int
fo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
struct thread *td)
{
return ((*fp->f_ops->fo_stat)(fp, sb, active_cred, td));
}
static __inline int
fo_close(struct file *fp, struct thread *td)
{
return ((*fp->f_ops->fo_close)(fp, td));
}
#endif /* __rtems__ */
static __inline int
fo_kqfilter(struct file *fp, struct knote *kn)
{
#ifndef __rtems__
return ((*fp->f_ops->fo_kqfilter)(fp, kn));
#else /* __rtems__ */
return ((*fp->f_io.pathinfo.handlers->kqfilter_h)(&fp->f_io, kn));
#endif /* __rtems__ */
}
#ifndef __rtems__
static __inline int
fo_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
struct thread *td)
{
return ((*fp->f_ops->fo_chmod)(fp, mode, active_cred, td));
}
static __inline int
fo_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
struct thread *td)
{
return ((*fp->f_ops->fo_chown)(fp, uid, gid, active_cred, td));
}
static __inline int
fo_sendfile(struct file *fp, int sockfd, struct uio *hdr_uio,
struct uio *trl_uio, off_t offset, size_t nbytes, off_t *sent, int flags,
struct thread *td)
{
return ((*fp->f_ops->fo_sendfile)(fp, sockfd, hdr_uio, trl_uio, offset,
nbytes, sent, flags, td));
}
static __inline int
fo_seek(struct file *fp, off_t offset, int whence, struct thread *td)
{
return ((*fp->f_ops->fo_seek)(fp, offset, whence, td));
}
static __inline int
fo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
{
return ((*fp->f_ops->fo_fill_kinfo)(fp, kif, fdp));
}
static __inline int
fo_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
struct thread *td)
{
if (fp->f_ops->fo_mmap == NULL)
return (ENODEV);
return ((*fp->f_ops->fo_mmap)(fp, map, addr, size, prot, cap_maxprot,
flags, foff, td));
}
static __inline int
fo_aio_queue(struct file *fp, struct kaiocb *job)
{
return ((*fp->f_ops->fo_aio_queue)(fp, job));
}
#endif /* __rtems__ */
#endif /* _KERNEL */
#endif /* !SYS_FILE_H */
|