1
1

build: Move libevent to a 3rd-party package

With Open MPI 5.0, the decision was made to stop building
3rd-party packages, such as Libevent, HWLOC, PMIx, and PRRTE as
MCA components and instead 1) start relying on external libraries
whenever possible and 2) Open MPI builds the 3rd party
libraries (if needed) as independent libraries, rather than
linked into libopen-pal.

This patch moves libevent from an MCA framework to a stand-alone
library built outside of OPAL.  A wrapper in opal/util is provided
to minimize the unnecessary changes in the rest of the code.  When
using the internal Libevent, it will be installed as a stand-alone
libevent.a, instead of bundled in OPAL.  Any pre-installed version
of Libevent at or after 2.0.21 is preferred over the internal
version.

Signed-off-by: Brian Barrett <bbarrett@amazon.com>
Этот коммит содержится в:
Brian Barrett 2020-04-04 23:14:00 +00:00
родитель 389b4b3a78
Коммит 9ffac85650
233 изменённых файлов: 637 добавлений и 76357 удалений

6
.gitignore поставляемый
Просмотреть файл

@ -90,6 +90,12 @@ ltsugar.m4
ltversion.m4
ltoptions.m4
# Libevent is included as a tarball. Ignore any expanded Libevent
# tarballs, since they are not included in git. Do not ignore the
# tarballs themselves, and those are artifacts we will store in git.
3rd-party/libevent-*
!3rd-party/libevent-*.tar.*
config/project_list.m4
config/autogen_found_items.m4
config/opal_get_version.sh

Двоичные данные
3rd-party/libevent-2.0.22-stable.tar.gz поставляемый Обычный файл

Двоичный файл не отображается.

Просмотреть файл

@ -75,6 +75,9 @@ my $ompi_autoconf_search = "autoconf";
my $ompi_automake_search = "automake";
my $ompi_libtoolize_search = "libtoolize;glibtoolize";
# version of libevent we ship
my $libevent_version="2.0.22-stable";
# One-time setup
my $username;
my $hostname;
@ -1443,6 +1446,21 @@ dnl 3rd-party package information\n";
# these are fairly one-off, so we did not try to do anything
# generic. Sorry :).
verbose "=== Libevent\n";
if ("libevent" ~~ @disabled_3rdparty_packages) {
verbose "--- Libevent disabled\n";
} else {
my $libevent_directory = "libevent-" . $libevent_version;
my $libevent_tarball = $libevent_directory . ".tar.gz";
if (! -f "3rd-party/" . $libevent_tarball) {
my_die("Could not find libevent tarball\n");
}
$m4 .= "m4_define([package_libevent], [1])\n";
$m4 .= "m4_define([libevent_tarball], [" . $libevent_tarball . "])\n";
$m4 .= "m4_define([libevent_directory], [" . $libevent_directory . "])\n";
verbose "--- Libevent enabled (" . $libevent_version . ")\n";
}
$m4 .= "\n";
#---------------------------------------------------------------------------

Просмотреть файл

@ -16,6 +16,8 @@
# reserved.
# Copyright (c) 2009 Oak Ridge National Labs. All rights reserved.
# Copyright (c) 2019-2020 Intel, Inc. All rights reserved.
# Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
# All Rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -24,7 +26,7 @@
#
AC_DEFUN([OMPI_SETUP_PRRTE],[
OPAL_VAR_SCOPE_PUSH([opal_prrte_save_CPPFLAGS opal_prrte_save_CFLAGS opal_prrte_save_LDFLAGS opal_prrte_save_LIBS opal_prrte_args opal_prrte_save_enable_dlopen opal_prrte_save_enable_mca_dso opal_prrte_save_enable_mca_static opal_prrte_extra_libs opal_prrte_extra_ltlibs opal_prrte_extra_ldflags opal_prrte_save_with_libevent opal_prrte_save_with_hwloc opal_prrte_save_with_pmix])
OPAL_VAR_SCOPE_PUSH([opal_prrte_save_CPPFLAGS opal_prrte_save_CFLAGS opal_prrte_save_LDFLAGS opal_prrte_save_LIBS opal_prrte_args opal_prrte_save_enable_dlopen opal_prrte_save_enable_mca_dso opal_prrte_save_enable_mca_static opal_prrte_extra_libs opal_prrte_extra_ltlibs opal_prrte_extra_ldflags opal_prrte_save_with_hwloc opal_prrte_save_with_pmix])
opal_prrte_save_CFLAGS=$CFLAGS
opal_prrte_save_CPPFLAGS=$CPPFLAGS
@ -33,7 +35,6 @@ AC_DEFUN([OMPI_SETUP_PRRTE],[
opal_prrte_save_enable_dlopen=enable_dlopen
opal_prrte_save_enable_mca_dso=enable_mca_dso
opal_prrte_save_enable_mca_static=enable_mca_static
opal_prrte_save_with_libevent=with_libevent
opal_prrte_save_with_hwloc=with_hwloc
opal_prrte_save_with_pmix=with_pmix
@ -65,15 +66,15 @@ AC_DEFUN([OMPI_SETUP_PRRTE],[
opal_prrte_extra_libs=$OMPI_TOP_BUILDDIR/opal/libopen-pal.la
opal_prrte_extra_ltlibs=$OMPI_TOP_BUILDDIR/opal/libopen-pal.la
if test -z $with_libevent || test "$with_libevent" = "internal" || test "$with_libevent" = "yes"; then
opal_prrte_libevent_arg="--with-libevent-header=$OMPI_TOP_SRCDIR/opal/mca/event/event.h"
else
if test "$with_libevent" = "external"; then
opal_prrte_libevent_arg="--with-libevent"
else
opal_prrte_libevent_arg="--with-libevent=$with_libevent"
fi
fi
AS_IF([test "$opal_libevent_mode" = "internal"],
[opal_prrte_extra_libs="$opal_prrte_extra_libs $opal_libevent_LIBS"
opal_prrte_extra_ltlibs="$opal_prrte_extra_ltlibs $opal_libevent_LIBS"
AS_IF([test ! -z "$opal_libevent_header"]
[opal_prrte_libevent_arg="--with-libevent-header=$opal_libevent_header"])],
[opal_prrte_libevent_arg="--with-libevent=$with_libevent"
AS_IF([test ! -z "$with_libevent_libdir"],
[opal_prrte_libevent_arg="$opal_prrte_libevent_arg --with-libevent-libdir=$with_libevent_libdir"])])
if test -z $with_hwloc || test "$with_hwloc" = "internal" || test "$with_hwloc" = "yes"; then
opal_prrte_hwloc_arg="--with-hwloc-header=$OMPI_TOP_SRCDIR/opal/mca/hwloc/hwloc-internal.h"

219
config/opal_config_libevent.m4 Обычный файл
Просмотреть файл

@ -0,0 +1,219 @@
dnl -*- autoconf -*-
dnl
dnl Copyright (c) 2009-2018 Cisco Systems, Inc. All rights reserved
dnl Copyright (c) 2013 Los Alamos National Security, LLC. All rights reserved.
dnl Copyright (c) 2015-2018 Research Organization for Information Science
dnl and Technology (RIST). All rights reserved.
dnl Copyright (c) 2020 Amazon.com, Inc. or its affiliates. All Rights
dnl reserved.
dnl $COPYRIGHT$
dnl
dnl Additional copyrights may follow
dnl
dnl $HEADER$
dnl
dnl Check for / configure libevent package. Prefer finding an
dnl external libevent, build our internal one if required. If we can
dnl not find an external libevent and the internal one fails to
dnl configure, abort.
dnl
dnl This macro will change the environment in the following way:
dnl
dnl * opal_libevent_header [legacy] - will be set if building
dnl internally, to the header file that should be included for
dnl embedded builds. This is used by PRRTE, but should not
dnl be used by new code.
dnl * opal_libevent_mode - either external or internal. If internal,
dnl --with-libevent should be ignored by other packages
dnl * opal_libevent_CPPFLAGS - the C Preprocessor flags necessary to
dnl run the preprocessor on a file which relies on Libevent
dnl headers. This will be folded into the global CPPFLAGS,
dnl so most people should never need this.
dnl * opal_libevent_LDFLAGS - the linker flags necessary to run the
dnl linker on a file which relies on Libevent libraries. This
dnl will be folded into the global CPPFLAGS, so most people
dnl should never need this.
dnl * opal_libevent_LIBS - the libraries necessary to link source which
dnl uses Libevent. Cannot be added to LIBS yet, because then
dnl other execution tests later in configure (there are sadly
dnl some) would fail if the path in LDFLAGS was not added to
dnl LD_LIBRARY_PATH.
dnl * CPPFLAGS, LDFLAGS - Updated opal_libevent_CPPFLAGS and
dnl opal_libevent_LDFLAGS.
AC_DEFUN([OPAL_CONFIG_LIBEVENT], [
OPAL_VAR_SCOPE_PUSH([internal_libevent_happy external_libevent_happy])
opal_show_subtitle "Configuring Libevent"
OPAL_3RDPARTY_WITH([libevent], [libevent], [package_libevent])
opal_libevent_header=""
# unless internal specifically requested by the user, try to find
# an external that works.
external_libevent_happy=0
AS_IF([test "$opal_libevent_mode" != "internal"],
[_OPAL_CONFIG_LIBEVENT_EXTERNAL(
[external_libevent_happy=1
opal_libevent_mode="external"],
[external_libevent_happy=0
AS_IF([test "$opal_libevent_mode" = "external"],
[AC_MSG_ERROR([External libevent requested but not found.])])])])
internal_libevent_happy=0
m4_ifdef([package_libevent],
[AS_IF([test "$external_libevent_happy" = "0"],
[_OPAL_CONFIG_LIBEVENT_INTERNAL([internal_libevent_happy=1
opal_libevent_mode="internal"])])])
AS_IF([test "$external_libevent_happy" = "0" -a "$internal_libevent_happy" = "0"],
[AC_MSG_ERROR([Could not find viable libevent build.])])
# this will work even if there is no libevent package included,
# because libevent_tarball and libevent_directory will evaluate to
# an empty string. These are relative to the 3rd-party/
# directory.
OPAL_3RDPARTY_EXTRA_DIST="$OPAL_3RDPARTY_EXTRA_DIST libevent_tarball"
OPAL_3RDPARTY_DISTCLEAN_DIRS="$OPAL_3RDPARTY_DISTCLEAN_DIRS libevent_directory"
AC_SUBST(opal_libevent_LIBS)
OPAL_SUMMARY_ADD([[Miscellaneous]],[[libevent]],[libevent], [$opal_libevent_mode])
OPAL_VAR_SCOPE_POP
])
dnl _OPAL_CONFIG_LIBEVENT_EXTERNAL(action-if-happy, action-if-not-happy)
dnl
dnl only safe to call from OPAL_CONFIG_LIBEVENT, assumes variables
dnl from there are set.
AC_DEFUN([_OPAL_CONFIG_LIBEVENT_EXTERNAL], [
OPAL_VAR_SCOPE_PUSH([opal_libevent_CPPFLAGS_save opal_libevent_LDFLAGS_save opal_libevent_LIBS_save opal_libevent_external_support])
opal_libevent_CPPFLAGS_save=$CPPFLAGS
opal_libevent_LDFLAGS_save=$LDFLAGS
opal_libevent_LIBS_save=$LIBS
AS_IF([test ! -z "$with_libevent_libdir"],
[OPAL_CHECK_WITHDIR([libevent-libdir], [$with_libevent_libdir],
[libevent.*])])
OPAL_CHECK_PACKAGE([opal_libevent],
[event2/event.h],
[event],
[event_config_new],
[-levent_pthreads],
[$with_libevent],
[$with_libevent_libdir],
[opal_libevent_external_support=yes],
[opal_libevent_external_support=no])
# need these set for the tests below. If things fail, will undo at the end.
CPPFLAGS="$opal_libevent_CPPFLAGS_save $opal_libevent_CPPFLAGS"
LDFLAGS="$opal_libevent_LDFLAGS_save $opal_libevent_LDFLAGS"
LIBS="$opal_libevent_LIBS_save $opal_libevent_LIBS"
# Ensure that this libevent has the symbol
# "evthread_set_lock_callbacks", which will only exist if
# libevent was configured with thread support.
AS_IF([test "$opal_libevent_external_support" = "yes"],
[AC_CHECK_LIB([event], [evthread_set_lock_callbacks],
[],
[AC_MSG_WARN([External libevent does not have thread support])
AC_MSG_WARN([Open MPI requires libevent to be compiled with])
AC_MSG_WARN([thread support enabled])
opal_libevent_external_support=no])])
AS_IF([test "$opal_libevent_external_support" = "yes"],
[AC_CHECK_LIB([event_pthreads], [evthread_use_pthreads],
[],
[AC_MSG_WARN([External libevent does not have thread support])
AC_MSG_WARN([Open MPI requires libevent to be compiled with])
AC_MSG_WARN([thread support enabled])
opal_libevent_external_support=no])])
# Open MPI used to fall back to the internal libevent if the
# installed version was older than the internal version. This
# isn't what we want, because we really want to prefer external
# versions. Pin the "oldest supported" external version to
# 2.0.21, which we know works from testing on RHEL7.
AS_IF([test "$opal_libevent_external_support" = "yes"],
[AC_MSG_CHECKING([if external libevent version is 2.0.21 or greater])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <event2/event.h>]],
[[
#if defined(_EVENT_NUMERIC_VERSION) && _EVENT_NUMERIC_VERSION < 0x02001500
#error "libevent API version is less than 0x02001500"
#elif defined(EVENT__NUMERIC_VERSION) && EVENT__NUMERIC_VERSION < 0x02001500
#error "libevent API version is less than 0x02001500"
#endif
]])],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])
AC_MSG_WARN([external libevent version is too old (2.0.21 or later required)])
opal_libevent_external_support=no])])
LDFLAGS="$opal_libevent_LDFLAGS_save"
LIBS="$opal_libevent_LIBS_save"
AS_IF([test "$opal_libevent_external_support" = "yes"],
[$1],
[CPPFLAGS="$opal_libevent_CPPFLAGS_save"
$2])
OPAL_VAR_SCOPE_POP
])
dnl _OPAL_CONFIG_LIBEVENT_INTERNAL(action-if-happy, action-if-not-happy)
dnl
dnl Configure the packaged libevent. Only needs to be run if the
dnl external libevent is not going to be used. Assumes that if
dnl this function is called, that success means the internal package
dnl will be used.
AC_DEFUN([_OPAL_CONFIG_LIBEVENT_INTERNAL], [
OPAL_VAR_SCOPE_PUSH(subconfig_happy subconfig_prefix internal_libevent_location)
AS_IF([test ! -z $prefix], [subconfig_prefix="--prefix=$prefix"])
# Note: To update the version of libevent shipped, update the
# constant in autogen.pl.
OPAL_EXPAND_TARBALL([3rd-party/libevent_tarball], [3rd-party/libevent_directory], [configure])
# We disable the GCC warnings because 1) we're not developers of
# Libevent, so we will never actually fix said warnnings and 2)
# some of the warning options cause failures with compilers that
# fake being GCC (I'm looking at you, PGI).
PAC_CONFIG_SUBDIR_ARGS([3rd-party/libevent_directory],
[--disable-dns --disable-http --disable-rpc --disable-openssl --enable-thread-support --disable-evport --disable-gcc-warnings],
[], [subconfig_happy=1], [subconfig_happy=0])
AS_IF([test "$subconfig_happy" = "1"],
[internal_libevent_location="3rd-party/libevent_directory"
# note: because we only ship/commit a tarball (and not the source
# directory), the source is always expanded in the builddir, so we
# only need to add a -I to the builddir.
CPPFLAGS="$CPPFLAGS -I$OMPI_TOP_BUILDDIR/$internal_libevent_location -I$OMPI_TOP_BUILDDIR/$internal_libevent_location/include"
# No need to update LDFLAGS, because they will install into
# our tree and in the mean time are referenced by their .la
# files.
opal_libevent_LIBS="$OMPI_TOP_BUILDDIR/$internal_libevent_location/libevent.la $OMPI_TOP_BUILDDIR/$internal_libevent_location/libevent_pthreads.la"
opal_libevent_header="$OMPI_TOP_BUILDDIR/$internal_libevent_location/event.h"
# no need to add to DIST_SUBDIRS, because we only ship the
# tarball. This is relative to the 3rd-party/ directory.
OPAL_3RDPARTY_SUBDIRS="$OPAL_3RDPARTY_SUBDIRS libevent_directory"
# The tarball as configured can not be used for compile
# tests, because libevent uses a Makefile rule rather than
# Autoconf to generate their config file (sigh). Force
# generation of that file now, so that other 3rd party
# packages can run compile tests.
AC_MSG_NOTICE([Generating Libevent's event-config.h])
(cd $OMPI_TOP_BUILDDIR/$internal_libevent_location/ ; ${MAKE-make} include/event2/event-config.h)
AS_IF([test $? -ne 0], [AC_MSG_ERROR([Could not generate event-config.h.])])
$1], [$2])
OPAL_VAR_SCOPE_POP
])

Просмотреть файл

@ -1108,6 +1108,8 @@ OPAL_3RDPARY_DIST_SUBDIRS=
OPAL_3RDPARY_EXTRA_DIST=
OPAL_3RDPARY_DISTCLEAN_DIRS=
OPAL_CONFIG_LIBEVENT
AC_SUBST(OPAL_3RDPARTY_SUBDIRS)
AC_SUBST(OPAL_3RDPARTY_DIST_SUBDIRS)
AC_SUBST(OPAL_3RDPARTY_EXTRA_DIST)

Просмотреть файл

@ -11,6 +11,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -35,7 +37,7 @@
#endif
/* Open MPI includes */
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "ompi/class/ompi_free_list.h"
#include "ompi/mca/btl/btl.h"
#include "ompi/mca/btl/base/base.h"

Просмотреть файл

@ -12,7 +12,7 @@
* Copyright (c) 2007-2010 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2009 Oak Ridge National Laboratory
* Copyright (c) 2018 Amazon.com, Inc. or its affiliates. All Rights reserved.
* Copyright (c) 2018-2020 Amazon.com, Inc. or its affiliates. All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -50,7 +50,7 @@
#include <limits.h>
#include "ompi/constants.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/if.h"
#include "opal/util/output.h"
#include "opal/util/argv.h"

Просмотреть файл

@ -10,6 +10,8 @@
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -21,7 +23,7 @@
#define MCA_BTL_TCP_ENDPOINT_H
#include "opal/class/opal_list.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "btl_tcp2_frag.h"
#include "btl_tcp2.h"
BEGIN_C_DECLS

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -21,7 +23,7 @@
*/
#include "ompi_config.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/mca/btl/base/base.h"
#include "ompi/mca/bml/bml.h"
#include "bml_r2.h"

Просмотреть файл

@ -7,6 +7,8 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -22,7 +24,7 @@
#endif /* HAVE_UNIST_H */
#include "opal/class/opal_bitmap.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/opal_environ.h"
#include "ompi/mca/mca.h"
#include "opal/mca/base/base.h"

Просмотреть файл

@ -9,7 +9,7 @@
* Copyright (c) 2012-2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2020 Intel, Inc. All rights reserved.
* Copyright (c) 2018 Amazon.com, Inc. or its affiliates. All Rights reserved.
* Copyright (c) 2018-2020 Amazon.com, Inc. or its affiliates. All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -29,7 +29,7 @@
#include "opal/dss/dss.h"
#include "opal/runtime/opal_cr.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/output.h"
#include "opal/util/printf.h"

Просмотреть файл

@ -14,6 +14,8 @@
* Copyright (c) 2014-2020 Intel, Inc. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -23,7 +25,7 @@
#include "ompi_config.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/output.h"
#include "opal/mca/pmix/pmix-internal.h"

Просмотреть файл

@ -16,7 +16,7 @@
* Copyright (c) 2013-2020 Intel, Inc. All rights reserved.
* Copyright (c) 2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2018 Amazon.com, Inc. or its affiliates. All Rights reserved.
* Copyright (c) 2018-2020 Amazon.com, Inc. or its affiliates. All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -26,7 +26,7 @@
#include "ompi_config.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/output.h"
#include "opal/util/show_help.h"
#include "opal/util/opal_environ.h"

Просмотреть файл

@ -11,6 +11,8 @@
* All rights reserved.
* Copyright (c) 2006 QLogic Corporation. All rights reserved.
* Copyright (c) 2015-2020 Intel, Inc. All rights reserved
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -22,7 +24,7 @@
#define MCA_MTL_PSM2_ENDPOINT_H
#include "opal/class/opal_list.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "ompi/mca/mtl/mtl.h"
#include "mtl_psm2.h"

Просмотреть файл

@ -12,6 +12,8 @@
* Copyright (c) 2013 Sandia National Laboratories. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -22,7 +24,7 @@
#include "ompi_config.h"
#include "pml_cm.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "ompi/mca/mtl/mtl.h"
#include "ompi/mca/mtl/base/base.h"
#include "ompi/mca/pml/base/pml_base_bsend.h"

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -23,7 +25,7 @@
#include "ompi_config.h"
#include "opal/runtime/opal.h"
#include "opal/util/output.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/mca/btl/base/base.h"
#include "mpi.h"

Просмотреть файл

@ -8,6 +8,8 @@
* reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -17,7 +19,7 @@
#include "ompi_config.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "pml_example.h"
static int mca_pml_example_component_register(void);

Просмотреть файл

@ -18,6 +18,8 @@
* All rights reserved.
* Copyright (c) 2018 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -26,7 +28,7 @@
*/
#include "ompi_config.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "mpi.h"
#include "ompi/runtime/params.h"
#include "ompi/mca/pml/pml.h"

Просмотреть файл

@ -12,7 +12,7 @@
* All rights reserved.
* Copyright (c) 2012 The University of Wisconsin-La Crosse. All rights
* reserved.
* Copyright (c) 2018 Amazon.com, Inc. or its affiliates. All Rights reserved.
* Copyright (c) 2018-2020 Amazon.com, Inc. or its affiliates. All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -42,7 +42,7 @@
#include <sys/stat.h> /* for mkfifo */
#endif /* HAVE_SYS_STAT_H */
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/output.h"
#include "opal/util/printf.h"
#include "opal/mca/crs/crs.h"

Просмотреть файл

@ -23,6 +23,8 @@
* Copyright (c) 2016-2017 IBM Corporation. All rights reserved.
* Copyright (c) 2019 Triad National Security, LLC. All rights
* reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -45,7 +47,7 @@
#include <netdb.h>
#endif
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/output.h"
#include "opal/runtime/opal_progress.h"
#include "opal/mca/base/base.h"

Просмотреть файл

@ -24,6 +24,8 @@
*
* Copyright (c) 2016-2017 IBM Corporation. All rights reserved.
* Copyright (c) 2018 FUJITSU LIMITED. All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -54,7 +56,7 @@
#include "opal/util/stacktrace.h"
#include "opal/util/show_help.h"
#include "opal/runtime/opal.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/mca/allocator/base/base.h"
#include "opal/mca/rcache/base/base.h"
#include "opal/mca/rcache/rcache.h"

Просмотреть файл

@ -13,6 +13,8 @@
# Copyright (c) 2015-2016 Intel, Inc. All rights reserved.
# Copyright (c) 2016 Research Organization for Information Science
# and Technology (RIST). All rights reserved.
# Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
# All Rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
@ -51,9 +53,15 @@ lib@OPAL_LIB_PREFIX@open_pal_la_LIBADD = \
datatype/libdatatype.la \
mca/base/libmca_base.la \
util/libopalutil.la \
$(MCA_opal_FRAMEWORK_LIBS)
lib@OPAL_LIB_PREFIX@open_pal_la_DEPENDENCIES = $(lib@OPAL_LIB_PREFIX@open_pal_la_LIBADD)
lib@OPAL_LIB_PREFIX@open_pal_la_LDFLAGS = -version-info $(libopen_pal_so_version)
$(MCA_opal_FRAMEWORK_LIBS) \
$(opal_libevent_LIBS)
lib@OPAL_LIB_PREFIX@open_pal_la_DEPENDENCIES = \
datatype/libdatatype.la \
mca/base/libmca_base.la \
util/libopalutil.la \
$(MCA_opal_FRAMEWORK_LIBS)
lib@OPAL_LIB_PREFIX@open_pal_la_LDFLAGS = -version-info $(libopen_pal_so_version) \
$(opal_libevent_LDFLAGS)
# included subdirectory Makefile.am's and appended-to variables
headers =

Просмотреть файл

@ -2,6 +2,8 @@
* Copyright (c) 2012-2016 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC. All rights reserved
* Copyright (c) 2015 Intel, Inc. All rights reserved
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -14,7 +16,7 @@
#include <stdio.h>
#include <stddef.h>
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/class/opal_hotel.h"

Просмотреть файл

@ -2,6 +2,8 @@
* Copyright (c) 2012-2016 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC. All rights reserved
* Copyright (c) 2015 Intel, Inc. All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -53,9 +55,11 @@
#include "opal_config.h"
#include "opal/constants.h"
#include "opal/util/output.h"
#include "opal/prefetch.h"
#include "opal/class/opal_object.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
BEGIN_C_DECLS

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2015-2018 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2018-2019 Intel, Inc. All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -30,7 +32,7 @@
#include <string.h>
/* Open MPI includes */
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/mca/btl/btl.h"
#include "opal/mca/btl/base/base.h"
#include "opal/mca/mpool/mpool.h"

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2017-2018 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2018 Intel, Inc, All rights reserved
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -24,7 +26,7 @@
#define MCA_BTL_OFI_ENDPOINT_H
#include "opal/class/opal_list.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "btl_ofi.h"

Просмотреть файл

@ -16,6 +16,8 @@
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2020 Google, LLC. All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -25,7 +27,7 @@
#include "opal_config.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/output.h"
#include "opal/mca/pmix/pmix-internal.h"
#include "opal/util/show_help.h"

Просмотреть файл

@ -15,7 +15,7 @@
* and Technology (RIST). All rights reserved.
* Copyright (c) 2014-2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2019 Amazon.com, Inc. or its affiliates. All Rights
* Copyright (c) 2019-2020 Amazon.com, Inc. or its affiliates. All Rights
* reserved.
* $COPYRIGHT$
*
@ -44,7 +44,7 @@
#endif
/* Open MPI includes */
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/class/opal_free_list.h"
#include "opal/mca/btl/btl.h"
#include "opal/mca/btl/base/base.h"

Просмотреть файл

@ -19,7 +19,7 @@
* Copyright (c) 2014-2019 Intel, Inc. All rights reserved.
* Copyright (c) 2014-2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2018-2019 Amazon.com, Inc. or its affiliates. All Rights
* Copyright (c) 2018-2020 Amazon.com, Inc. or its affiliates. All Rights
* reserved.
* $COPYRIGHT$
*
@ -60,7 +60,7 @@
#include <sys/time.h>
#endif
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/ethtool.h"
#include "opal/util/if.h"
#include "opal/util/output.h"

Просмотреть файл

@ -15,7 +15,7 @@
* Copyright (c) 2014 Intel, Inc. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2018 Amazon.com, Inc. or its affiliates. All Rights reserved.
* Copyright (c) 2018-2020 Amazon.com, Inc. or its affiliates. All Rights reserved.
* Copyright (c) 2020 Google, LLC. All rights reserved.
* $COPYRIGHT$
*
@ -53,7 +53,7 @@
#endif /* HAVE_SYS_TIME_H */
#include <time.h>
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/net.h"
#include "opal/util/show_help.h"
#include "opal/util/proc.h"

Просмотреть файл

@ -9,6 +9,8 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -20,7 +22,7 @@
#define MCA_BTL_TCP_ENDPOINT_H
#include "opal/class/opal_list.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "btl_tcp_frag.h"
#include "btl_tcp.h"
BEGIN_C_DECLS

Просмотреть файл

@ -13,6 +13,8 @@
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2020 Google, LLC. All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -30,7 +32,7 @@
#include <string.h>
/* Open MPI includes */
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/mca/btl/btl.h"
#include "opal/mca/btl/base/base.h"
#include "opal/mca/mpool/mpool.h"

Просмотреть файл

@ -12,6 +12,8 @@
* All rights reserved.
* Copyright (c) 2014 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -22,7 +24,7 @@
#include "opal_config.h"
#include "opal/constants.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/mca/btl/btl.h"
#include "opal/mca/mpool/base/base.h"
#include "opal/mca/btl/base/base.h"

Просмотреть файл

@ -9,6 +9,8 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -20,7 +22,7 @@
#define MCA_BTL_TEMPLATE_ENDPOINT_H
#include "opal/class/opal_list.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "btl_template_frag.h"
#include "btl_template.h"
BEGIN_C_DECLS

Просмотреть файл

@ -14,6 +14,8 @@
* reserved.
* Copyright (c) 2019 Google, LLC. All rights reserved.
* Copyright (c) 2019 Intel, Inc. All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -31,7 +33,7 @@
#include <string.h>
/* Open MPI includes */
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/mca/btl/base/base.h"
#include "opal/mca/mpool/mpool.h"
#include "opal/mca/btl/base/btl_base_error.h"

Просмотреть файл

@ -12,6 +12,8 @@
* All rights reserved.
* Copyright (c) 2017-2018 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -23,7 +25,7 @@
#define MCA_BTL_UCT_ENDPOINT_H
#include "opal/class/opal_list.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "btl_uct.h"
BEGIN_C_DECLS

Просмотреть файл

@ -15,6 +15,8 @@
* Copyright (c) 2011-2019 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2015-2016 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -34,7 +36,7 @@
#include "opal/util/alfg.h"
#include "opal/class/opal_hash_table.h"
#include "opal/class/opal_hash_table.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/mca/btl/btl.h"
#include "opal/mca/btl/base/btl_base_error.h"

Просмотреть файл

@ -2,7 +2,7 @@
* Copyright (c) 2014-2016 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2018 Amazon.com, Inc. or its affiliates. All Rights reserved.
* Copyright (c) 2018-2020 Amazon.com, Inc. or its affiliates. All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -23,7 +23,7 @@
#include "opal_stdint.h"
#include "opal/mca/threads/mutex.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/show_help.h"
#include "opal/types.h"
#include "opal/util/output.h"

Просмотреть файл

@ -1,8 +1,8 @@
/*
* Copyright (c) 2014-2016 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2015 Research Organization for Information Science
* Copyright (c) 2018-2020 Amazon.com, Inc. or its affiliates. All Rights reserved.
* and Technology (RIST). All rights reserved.
* Copyright (c) 2018 Amazon.com, Inc. or its affiliates. All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -26,7 +26,7 @@
#include "opal_stdint.h"
#include "opal/mca/threads/mutex.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/util/output.h"
#include "opal/util/fd.h"
#include "opal/util/string_copy.h"

Просмотреть файл

@ -12,6 +12,8 @@
* Copyright (c) 2006 Sandia National Laboratories. All rights
* reserved.
* Copyright (c) 2013-2017 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -26,7 +28,7 @@
#include "opal/class/opal_list.h"
#include "opal/class/opal_hotel.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "btl_usnic.h"

Просмотреть файл

@ -1,5 +1,7 @@
/*
* Copyright (c) 2013-2017 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -18,7 +20,7 @@
#include <sys/time.h>
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
/**

Просмотреть файл

@ -14,6 +14,8 @@
* Copyright (c) 2009 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2010-2015 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2020 Amazon.com, Inc. or its affiliates.
* All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
@ -28,7 +30,7 @@
#include "opal_config.h"
#include "opal/mca/event/event.h"
#include "opal/util/event.h"
#include "opal/mca/shmem/shmem.h"
#include "opal/mca/mpool/mpool.h"

Просмотреть файл

@ -1,27 +0,0 @@
#
# Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# main library setup
noinst_LTLIBRARIES = libmca_event.la
libmca_event_la_SOURCES =
# local files
headers = event.h
libmca_event_la_SOURCES += $(headers)
# Conditionally install the header files
if WANT_INSTALL_HEADERS
opaldir = $(opalincludedir)/$(subdir)
nobase_opal_HEADERS = $(headers)
endif
include base/Makefile.am
distclean-local:
rm -f base/static-components.h

Просмотреть файл

@ -1,26 +0,0 @@
* create a new subdirectory opal/mca/event/libeventxxxx
* copy these files from the current component to the new directory
autogen.subdirs
configure.m4
libeventxxxx_component.c
libeventxxxx_module.c
libeventxxxx.h
Makefile.am
opal_check_visibility.m4
* update the libevent version numbers in the above files by doing a global replace
* expand the libevent tarball into the new directory under a subdirectory named "libevent"
* copy libeventxxxx/libevent/opal_rename.h to the new libeventyyyy/libevent and change the version number in the symbol renames
* edit the following files per their counterparts in the current version
libevent/configure.ac
libevent/Makefile.am
libevent/include/event2/event.h
libevent/include/event2/thread.h
libevent/include/event2/util.h
libevent/log-internal.h
libevent/log.c
libevent/event.c (#ifdef lines in eventops area)

Просмотреть файл

@ -1,15 +0,0 @@
#
# Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
headers += \
base/base.h
libmca_event_la_SOURCES += \
base/event_base_frame.c

Просмотреть файл

@ -1,71 +0,0 @@
Last updated: 15 Sep 2010
How to update the Libevent embedded in OPAL
-------------------------------------------
OPAL requires some modification of the Libevent build system in order
to properly operate. In addition, OPAL accesses the Libevent functions
through a set of wrappers - this is done for three reasons:
1. Hide the Libevent functions. Some applications directly call
libevent APIs and expect to operate against a locally installed
library. Since the library used by OPAL may differ in version, and
to avoid linker errors for multiply-defined symbols, it is
important that the libevent functions included in OPAL be "hidden"
from external view. Thus, OPAL's internal copy of libevent is built
with visibility set to "hidden" and all access from the OPAL code
base is done through "opal_xxx" wrapper API calls.
In those cases where the system is built against a compiler that
doesn't support visibility, conflicts can (unfortunately)
arise. However, since only a very few applications would be
affected, and since most compilers support visibility, we do not
worry about this possibility.
2. Correct some deficiencies in the distributed Libevent configuration
tests. Specifically, the distributed tests for kqueue and epoll
support provide erroneous results on some platforms (as determined
by our empirical testing). OPAL therefore provides enhanced tests
to correctly assess those environments.
3. Enable greater flexibility in configuring Libevent for the specific
environment. In particular, OPAL has no need of Libevent's dns,
http, and rpc events, so configuration options to remove that code
from Libevent have been added.
The procedure for updating Libevent has been greatly simplified
compared to prior versions in the OPAL code base by replacing
file-by-file edits with configuration logic. Thus, updating the
included libevent code can generally be accomplished by:
1. create a new opal/mca/event component for the updated version, using
a name "libeventxxx", where xxx = libevent version. For example,
libevent 2.0.7 => component libevent207
2. create a subdirectory "libevent" in the new component and unpack the
new libevent code tarball into it.
3. copy the configure.m4, autogen.subdirs, component.c, moduule.c,
Makefile.am, and .h files from a prior version to the new component.
You will need to customize them for the new version, but they can
serve as a good template. In many cases, you will just have to update
the component name.
4. edit libevent/configure.in to add OMPI specific options and modes.
Use the corresponding file from a prior version as a guide. The
necessary changes are marked with "OMPI" comments. These changes
have been pushed upstream to libevent, and so edits may no longer
be required in the new version.
5. Modify libevent/Makefile.am. Here again, you should use the file from
a prior version as an example. Hopefully, you can just use the file
without change - otherwise, the changes will have to be done by hand.
Required changes reflect the need for OMPI to turn "off" unused
subsystems such as http. These changes have been pushed upstream to
libevent, and so edits may no longer be required in the new version.
6. in your new component Makefile.am, note that the libevent headers
are listed by name when WITH_INSTALL_HEADERS is given. This is required
to support the OMPI --with-devel-headers configure option. Please review
the list and update it to include all libevent headers for the new
version.

Просмотреть файл

@ -1,34 +0,0 @@
/*
* Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2012 Los Alamos National Security, LLC.
* All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#ifndef OPAL_EVENT_BASE_H
#define OPAL_EVENT_BASE_H
#include "opal_config.h"
#include "opal/class/opal_pointer_array.h"
#include "opal/mca/base/base.h"
#include "opal/mca/event/event.h"
/*
* Global functions for MCA overall event open and close
*/
BEGIN_C_DECLS
/**
* Event framework
*/
OPAL_DECLSPEC extern mca_base_framework_t opal_event_base_framework;
END_C_DECLS
#endif /* OPAL_BASE_EVENT_H */

Просмотреть файл

@ -1,110 +0,0 @@
/*
* Copyright (c) 2010-2015 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2014-2015 Intel, Inc. All rights reserved.
* Copyright (c) 2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "opal_config.h"
#include "opal/constants.h"
#include "opal/util/output.h"
#include "opal/mca/mca.h"
#include "opal/mca/base/base.h"
#include "opal/mca/event/event.h"
#include "opal/mca/event/base/base.h"
/*
* The following file was created by configure. It contains extern
* statements and the definition of an array of pointers to each
* component's public mca_base_component_t struct.
*/
#include "opal/mca/event/base/static-components.h"
/**
* Initialize the event MCA framework
*
* @retval OPAL_SUCCESS Upon success
* @retval OPAL_ERROR Upon failure
*
* This must be the first function invoked in the event MCA
* framework. It initializes the event MCA framework, finds
* and opens event components, etc.
*
* This function is invoked during opal_init().
*
* This function fills in the internal global variable
* opal_event_base_components_opened, which is a list of all
* event components that were successfully opened. This
* variable should \em only be used by other event base
* functions -- it is not considered a public interface member --
* and is only mentioned here for completeness.
*/
static int opal_event_base_open(mca_base_open_flag_t flags);
static int opal_event_base_close(void);
/* Use default register and close function */
MCA_BASE_FRAMEWORK_DECLARE(opal, event, NULL, NULL, opal_event_base_open,
opal_event_base_close, mca_event_base_static_components,
0);
/*
* Globals
*/
opal_event_base_t *opal_sync_event_base=NULL;
static int opal_event_base_open(mca_base_open_flag_t flags)
{
int rc = OPAL_SUCCESS;
/* Silence compiler warning */
(void) flags;
/* no need to open the components since we only use one */
/* init the lib */
if (OPAL_SUCCESS != (rc = opal_event_init())) {
return rc;
}
/* Declare our intent to use threads */
opal_event_use_threads();
/* get our event base */
if (NULL == (opal_sync_event_base = opal_event_base_create())) {
return OPAL_ERROR;
}
/* set the number of priorities */
if (0 < OPAL_EVENT_NUM_PRI) {
opal_event_base_priority_init(opal_sync_event_base, OPAL_EVENT_NUM_PRI);
}
return rc;
}
static int opal_event_base_close(void)
{
int rc;
/* cleanup components even though they are statically opened */
if (OPAL_SUCCESS != (rc =mca_base_framework_components_close (&opal_event_base_framework,
NULL))) {
return rc;
}
opal_event_base_free(opal_sync_event_base);
/* finalize the lib */
return opal_event_finalize();
}

Просмотреть файл

@ -1,7 +0,0 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner:project
status: maintenance

Просмотреть файл

@ -1,96 +0,0 @@
dnl -*- shell-script -*-
dnl
dnl Copyright (c) 2010-2018 Cisco Systems, Inc. All rights reserved
dnl $COPYRIGHT$
dnl
dnl Additional copyrights may follow
dnl
dnl $HEADER$
dnl
dnl There will only be one component used in this framework, and it will
dnl be selected at configure time by priority. Components must set
dnl their priorities in their configure.m4 files. They must also set
dnl the shell variable $opal_event_base_include to a header file name
dnl (relative to opal/mca/event) that will be included in
dnl opal/mca/event/event.h.
dnl We only want one winning component (vs. STOP_AT_FIRST_PRIORITY,
dnl which will allow all components of the same priority who succeed to
dnl win)
m4_define(MCA_opal_event_CONFIGURE_MODE, STOP_AT_FIRST)
dnl
dnl Setup --with-libevent and --with-libevent-libdir
dnl
AC_DEFUN([MCA_opal_event_SETUP],[
AC_ARG_WITH([libevent],
[AC_HELP_STRING([--with-libevent=DIR],
[Search for libevent headers and libraries in DIR. Should only be used if an external copy of libevent is being used.])])
# Bozo check
AS_IF([test "$with_libevent" = "no"],
[AC_MSG_WARN([It is not possible to configure Open MPI --without-libevent])
AC_MSG_ERROR([Cannot continue])])
AS_IF([test "$with_libevent" = "yes"],
[with_libevent=])
AC_ARG_WITH([libevent-libdir],
[AC_HELP_STRING([--with-libevent-libdir=DIR],
[Search for libevent libraries in DIR. Should only be used if an external copy of libevent is being used.])])
# Make sure the user didn't specify --with-libevent=internal and
# --with-libevent-libdir=whatever (because you can only specify
# --with-libevent-libdir when external libevent is being used).
AS_IF([test "$with_libevent" = "internal" && test -n "$with_libevent_libdir"],
[AC_MSG_WARN([Both --with-libevent=internal and --with-libevent-libdir=DIR])
AC_MSG_WARN([were specified, which does not make sense.])
AC_MSG_ERROR([Cannot continue])])
])
AC_DEFUN([MCA_opal_event_CONFIG],[
opal_event_base_include=
MCA_opal_event_SETUP
# configure all the components
MCA_CONFIGURE_FRAMEWORK($1, $2, 1)
# We must have found exactly 1 static component, or we can't
# continue. STOP_AT_FIRST will guarantee that we find at most
# one. We need to check here that we found *at least* one.
AS_IF([test "$MCA_opal_event_STATIC_COMPONENTS" = ""],
[AC_MSG_WARN([Did not find a suitable static opal event component])
AC_MSG_ERROR([Cannot continue])])
# We need to find out what the underlying component set
# HAVE_WORKING_EVENTOPS was set to. This is for systems that
# don't have working poll(), etc. (e.g., Cray) -- we still need an
# event component (for timers, etc.), but we don't have working
# event ops. Ensure that it was set by the component.
echo " "
AC_MSG_CHECKING([if have working event ops for the event framework])
AS_IF([test "$OPAL_HAVE_WORKING_EVENTOPS" = ""],
[AC_MSG_RESULT([unknown])
AC_MSG_WARN([Event component did not set OPAL_HAVE_WORKING_EVENTOPS])
AC_MSG_ERROR([Cannot continue])],
[AS_IF([test "$OPAL_HAVE_WORKING_EVENTOPS" = "1"],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])])])
AC_DEFINE_UNQUOTED(OPAL_HAVE_WORKING_EVENTOPS,
[$OPAL_HAVE_WORKING_EVENTOPS],
[Whether our event component has working event operations or not (if not, then assumedly it only has working timers and signals)])
# The winning component will have told us where their header file
# is located
AC_MSG_CHECKING([for winning event component header file])
AS_IF([test "$opal_event_base_include" = ""],
[AC_MSG_RESULT([missing])
AC_MSG_WARN([Missing implementation header])
AC_MSG_ERROR([Cannot continue])])
AC_DEFINE_UNQUOTED([MCA_event_IMPLEMENTATION_HEADER],
["opal/mca/event/$opal_event_base_include"],
[Header to include for event implementation])
AC_MSG_RESULT([$opal_event_base_include])
])dnl

Просмотреть файл

@ -1,78 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014-2015 Intel, Inc. All rights reserved.
* Copyright (c) 2015 Los Alamos National Security, LLC. All rights
* reserved.
*
* $COPYRIGHT$
*
* Additional copyrights may follow
*/
#ifndef OPAL_MCA_EVENT_H
#define OPAL_MCA_EVENT_H
#include "opal_config.h"
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <stdint.h>
#include <stdarg.h>
#include "opal/class/opal_pointer_array.h"
#include "opal/mca/mca.h"
#include "opal/mca/base/base.h"
BEGIN_C_DECLS
/* set the number of event priority levels */
#define OPAL_EVENT_NUM_PRI 8
#define OPAL_EV_ERROR_PRI 0
#define OPAL_EV_MSG_HI_PRI 1
#define OPAL_EV_SYS_HI_PRI 2
#define OPAL_EV_MSG_LO_PRI 3
#define OPAL_EV_SYS_LO_PRI 4
#define OPAL_EV_INFO_HI_PRI 5
#define OPAL_EV_INFO_LO_PRI 6
#define OPAL_EV_LOWEST_PRI 7
#define OPAL_EVENT_SIGNAL(ev) opal_event_get_signal(ev)
#define OPAL_TIMEOUT_DEFAULT {1, 0}
/**
* Structure for event components.
*/
struct opal_event_base_component_2_0_0_t {
/** MCA base component */
mca_base_component_t base_version;
/** MCA base data */
mca_base_component_data_t base_data;
};
/**
* Convenience typedef
*/
typedef struct opal_event_base_component_2_0_0_t opal_event_base_component_2_0_0_t;
typedef struct opal_event_base_component_2_0_0_t opal_event_component_t;
/**
* Macro for use in components that are of type event
*/
#define OPAL_EVENT_BASE_VERSION_2_0_0 \
OPAL_MCA_BASE_VERSION_2_1_0("event", 2, 0, 0)
END_C_DECLS
/* include implementation to call */
#include MCA_event_IMPLEMENTATION_HEADER
#endif /* OPAL_EVENT_H_ */

25
opal/mca/event/external/Makefile.am поставляемый
Просмотреть файл

@ -1,25 +0,0 @@
#
# Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights reserved.
# Copyright (c) 2016 Intel, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# We only ever build this component statically
noinst_LTLIBRARIES = libmca_event_external.la
libmca_event_external_la_SOURCES = \
external.h \
event_external_component.c \
event_external_module.c
libmca_event_external_la_CPPFLAGS = $(opal_event_external_CPPFLAGS)
#Conditionally install the header files
if WANT_INSTALL_HEADERS
opaldir = $(opalincludedir)/$(subdir)
nobase_opal_HEADERS = external.h
endif

227
opal/mca/event/external/configure.m4 поставляемый
Просмотреть файл

@ -1,227 +0,0 @@
# -*- shell-script -*-
#
# Copyright (c) 2009-2018 Cisco Systems, Inc. All rights reserved
# Copyright (c) 2013 Los Alamos National Security, LLC. All rights reserved.
# Copyright (c) 2015-2018 Research Organization for Information Science
# and Technology (RIST). All rights reserved.
#
# Copyright (c) 2017-2018 Intel, Inc. All rights reserved.
# Copyright (c) 2020 IBM Corporation. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
#
# Priority
#
AC_DEFUN([MCA_opal_event_external_PRIORITY], [90])
#
# Force this component to compile in static-only mode
#
AC_DEFUN([MCA_opal_event_external_COMPILE_MODE], [
AC_MSG_CHECKING([for MCA component $2:$3 compile mode])
$4="static"
AC_MSG_RESULT([$$4])
])
# MCA_event_external_POST_CONFIG()
# ---------------------------------
AC_DEFUN([MCA_opal_event_external_POST_CONFIG],[
# If we won, then do all the rest of the setup
AS_IF([test "$1" = "1"],
[AC_DEFINE_UNQUOTED([EVENT_EXTERNAL_EVENT_VERSION],
[external],
[Version of event])
# Set this variable so that the framework m4 knows what
# file to include in opal/mca/event/event.h
opal_event_external_basedir=opal/mca/event/external
opal_event_base_include="external/external.h"
# Add some stuff to CPPFLAGS so that the rest of the source
# tree can be built
CPPFLAGS="$CPPFLAGS $opal_event_external_CPPFLAGS"
LDFLAGS="$LDFLAGS $opal_event_external_LDFLAGS"
LIBS="$LIBS $opal_event_external_LIBS"
])
])dnl
# MCA_event_external_CONFIG([action-if-found], [action-if-not-found])
# --------------------------------------------------------------------
AC_DEFUN([MCA_opal_event_external_CONFIG],[
AC_CONFIG_FILES([opal/mca/event/external/Makefile])
OPAL_VAR_SCOPE_PUSH([opal_event_external_CPPFLAGS_save opal_event_external_CFLAGS_save opal_event_external_LDFLAGS_save opal_event_external_LIBS_save opal_event_dir opal_event_summary_msg])
opal_event_summary_msg="internal"
# Check the value of $with_libevent_libdir. This macro safely
# handles "yes", "no", blank, and directory name values.
OPAL_CHECK_WITHDIR([libevent-libdir], [$with_libevent_libdir],
[libevent.*])
# Did the user want us to check for libevent in a specific location?
AC_MSG_CHECKING([for external libevent in])
AS_IF([test -n "$with_libevent" && \
test "$with_libevent" != "external" && \
test "$with_libevent" != "internal" && \
test "$with_libevent" != "yes" && \
test "$with_libevent" != "no"],
[opal_event_dir=$with_libevent
AC_MSG_RESULT([$opal_event_dir])
OPAL_CHECK_WITHDIR([libevent], [$opal_event_dir],
[include/event2/event.h])
AS_IF([test -z "$with_libevent_libdir" || test "$with_libevent_libdir" = "yes"],
[AC_MSG_CHECKING([for $with_libevent/lib64])
AS_IF([test -d "$with_libevent/lib64"],
[opal_event_libdir_found=yes
AC_MSG_RESULT([found])],
[opal_event_libdir_found=no
AC_MSG_RESULT([not found])])
AS_IF([test "$opal_event_libdir_found" = "yes"],
[opal_event_libdir="$with_libevent/lib64"],
[AC_MSG_CHECKING([for $with_libevent/lib])
AS_IF([test -d "$with_libevent/lib"],
[AC_MSG_RESULT([found])
opal_event_libdir="$with_libevent/lib"],
[AC_MSG_RESULT([not found])
AC_MSG_WARN([Library directories were not found:])
AC_MSG_WARN([ $with_libevent/lib64])
AC_MSG_WARN([ $with_libevent/lib])
AC_MSG_WARN([Please use --with-libevent-libdir to identify it.])
AC_MSG_ERROR([Cannot continue])])])])],
[AC_MSG_RESULT([(default search paths)])])
AS_IF([test ! -z "$with_libevent_libdir" && test "$with_libevent_libdir" != "yes"],
[opal_event_libdir="$with_libevent_libdir"])
AS_IF([test "$with_libevent" != "internal"],
[opal_event_external_CPPFLAGS_save=$CPPFLAGS
opal_event_external_CFLAGS_save=$CFLAGS
opal_event_external_LDFLAGS_save=$LDFLAGS
opal_event_external_LIBS_save=$LIBS
OPAL_CHECK_PACKAGE([opal_event_external],
[event2/event.h],
[event_core],
[event_config_new],
[-levent_pthreads],
[$opal_event_dir],
[$opal_event_libdir],
[opal_event_external_support=yes],
[opal_event_external_support=no])
# Check to see if the above check failed because it conflicted with LSF's libevent.so
# This can happen if LSF's library is in the LDFLAGS envar or default search
# path. The 'event_getcode4name' function is only defined in LSF's libevent.so and not
# in Libevent's libevent.so
AS_IF([test "$opal_event_external_support" = "no"],
[AC_CHECK_LIB([event], [event_getcode4name],
[AC_MSG_WARN([===================================================================])
AC_MSG_WARN([Possible conflicting libevent.so libraries detected on the system.])
AC_MSG_WARN([])
AC_MSG_WARN([LSF provides a libevent.so that is not from Libevent in its])
AC_MSG_WARN([library path. It is possible that you have installed Libevent])
AC_MSG_WARN([on the system, but the linker is picking up the wrong version.])
AC_MSG_WARN([])
AC_MSG_WARN([Configure may continue and attempt to use the 'internal' libevent])
AC_MSG_WARN([instead of the 'external' libevent if you did not explicitly request])
AC_MSG_WARN([the 'external' component.])
AC_MSG_WARN([])
AC_MSG_WARN([If your intention was to use the 'external' libevent then you need])
AC_MSG_WARN([to address this linker path ordering issue. One way to do so is])
AC_MSG_WARN([to make sure the libevent system library path occurs before the])
AC_MSG_WARN([LSF library path.])
AC_MSG_WARN([===================================================================])
opal_event_external_support=no
])
])
AS_IF([test "$opal_event_external_support" = "yes"],
[LDFLAGS="$opal_event_external_LDFLAGS $LDFLAGS"
CPPFLAGS="$opal_event_external_CPPFLAGS $CPPFLAGS"])
AS_IF([test "$opal_event_external_support" = "yes"],
[# Ensure that this libevent has the symbol
# "evthread_set_lock_callbacks", which will only exist if
# libevent was configured with thread support.
AC_CHECK_LIB([event_core], [evthread_set_lock_callbacks],
[],
[AC_MSG_WARN([External libevent does not have thread support])
AC_MSG_WARN([Open MPI requires libevent to be compiled with])
AC_MSG_WARN([thread support enabled])
opal_event_external_support=no])])
AS_IF([test "$opal_event_external_support" = "yes"],
[AC_CHECK_LIB([event_pthreads], [evthread_use_pthreads],
[],
[AC_MSG_WARN([External libevent does not have thread support])
AC_MSG_WARN([Open MPI requires libevent to be compiled with])
AC_MSG_WARN([thread support enabled])
opal_event_external_support=no])])
AS_IF([test "$opal_event_external_support" = "yes"],
[AS_IF([test -z "$with_libevent" || test "$with_libevent" = "yes"],
[AC_MSG_CHECKING([if external libevent version is 2.0.22 or greater])
AC_COMPILE_IFELSE(
[AC_LANG_PROGRAM([[#include <event2/event.h>]],
[[
#if defined(_EVENT_NUMERIC_VERSION) && _EVENT_NUMERIC_VERSION < 0x02001600
#error "libevent API version is less than 0x02001600"
#elif defined(EVENT__NUMERIC_VERSION) && EVENT__NUMERIC_VERSION < 0x02001600
#error "libevent API version is less than 0x02001600"
#endif
]])],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])
opal_event_summary_msg="internal (external libevent version is less that internal version 2.0.22)"
AC_MSG_WARN([external libevent version is less than internal version (2.0.22)])
AC_MSG_WARN([using internal libevent])
opal_event_external_support=no])])])
CPPFLAGS=$opal_event_external_CPPFLAGS_save
CFLAGS=$opal_event_external_CFLAGS_save
LDFLAGS=$opal_event_external_LDFLAGS_save
LIBS=$opal_event_external_LIBS_save
AC_SUBST(opal_event_external_CPPFLAGS)
AC_SUBST(opal_event_external_LDFLAGS)
AC_SUBST(opal_event_external_LIBS)
# These flags need to get passed to the wrapper compilers
# (this is unnecessary for the internal/embedded event)
event_external_WRAPPER_EXTRA_CPPFLAGS=$opal_event_external_CPPFLAGS
# Finally, add some flags to the wrapper compiler if we're
# building with developer headers so that our headers can
# be found.
event_external_WRAPPER_EXTRA_LDFLAGS=$opal_event_external_LDFLAGS
event_external_WRAPPER_EXTRA_LIBS=$opal_event_external_LIBS])
##################################################################
# Done!
AS_IF([test "$opal_event_external_support" = "yes"],
[# If we configured successfully, set
# OPAL_HAVE_WORKING_EVENTOPS to 1 (it's a calculated value
# in the embedded Open MPI libevent, so we can only assume
# what it is in the installed libevent :-\ ).
file=$opal_event_dir/include/libevent/config.h
OPAL_HAVE_WORKING_EVENTOPS=1
opal_event_summary_msg="external"
$1],
[OPAL_HAVE_WORKING_EVENTOPS=0
AS_IF([test "$with_libevent" != internal && test -n "$with_libevent"],
[AC_MSG_WARN([external libevent requested but cannot be built])
AC_MSG_ERROR([Cannot continue.])])
$2])
OPAL_SUMMARY_ADD([[Miscellaneous]],[[Libevent support]], [], [$opal_event_summary_msg])
OPAL_VAR_SCOPE_POP
])dnl

Просмотреть файл

@ -1,119 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2011 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013-2015 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved.
*
* Copyright (c) 2017 Intel, Inc. All rights reserved.
* Copyright (c) 2018 Amazon.com, Inc. or its affiliates. All Rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "opal_config.h"
#include "opal/constants.h"
#include "opal/mca/event/event.h"
#include "event.h"
#include "opal/util/argv.h"
#include "opal/util/printf.h"
/*
* Public string showing the sysinfo ompi_linux component version number
*/
const char *opal_event_external_component_version_string =
"OPAL event_external event MCA component version " OPAL_VERSION;
/*
* Local function
*/
static int event_external_open(void);
static int event_external_register (void);
char *ompi_event_module_include = NULL;
/*
* Instantiate the public struct with all of our public information
* and pointers to our public functions in it
*/
const opal_event_component_t mca_event_external_component = {
/* First, the mca_component_t struct containing meta information
about the component itself */
.base_version = {
OPAL_EVENT_BASE_VERSION_2_0_0,
/* Component name and version */
.mca_component_name = "external",
MCA_BASE_MAKE_VERSION(component, OPAL_MAJOR_VERSION, OPAL_MINOR_VERSION,
OPAL_RELEASE_VERSION),
/* Component open and close functions */
.mca_open_component = event_external_open,
.mca_register_component_params = event_external_register
},
.base_data = {
/* The component is checkpoint ready */
MCA_BASE_METADATA_PARAM_CHECKPOINT
}
};
static int event_external_open(void)
{
/* Must have some code in this file, or the OS X linker may
eliminate the whole file */
return OPAL_SUCCESS;
}
static int event_external_register (void) {
const char **all_available_eventops;
char *avail = NULL;
char *help_msg = NULL;
int ret;
// Get supported methods
all_available_eventops = event_get_supported_methods();
#ifdef __APPLE__
ompi_event_module_include ="select";
#else
ompi_event_module_include = "poll";
#endif
avail = opal_argv_join((char**)all_available_eventops, ',');
opal_asprintf( &help_msg,
"Comma-delimited list of libevent subsystems "
"to use (%s -- available on your platform)",
avail );
ret = mca_base_component_var_register (&mca_event_external_component.base_version,
"event_include", help_msg,
MCA_BASE_VAR_TYPE_STRING, NULL, 0,
MCA_BASE_VAR_FLAG_SETTABLE,
OPAL_INFO_LVL_3,
MCA_BASE_VAR_SCOPE_LOCAL,
&ompi_event_module_include);
free(help_msg); /* release the help message */
free(avail);
avail = NULL;
if (0 > ret) {
return ret;
}
ret = mca_base_var_register_synonym (ret, "opal", "opal", "event", "include", 0);
if (0 > ret) {
return ret;
}
return OPAL_SUCCESS;
}

Просмотреть файл

@ -1,89 +0,0 @@
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012-2013 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2017 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2017 IBM Corporation. All rights reserved.
*/
#include "opal_config.h"
#include "opal/constants.h"
#include "opal/util/output.h"
#include "opal/mca/event/base/base.h"
#include "external.h"
#include "opal/util/argv.h"
extern char *ompi_event_module_include;
static struct event_config *config = NULL;
opal_event_base_t* opal_event_base_create(void)
{
opal_event_base_t *base;
base = event_base_new_with_config(config);
if (NULL == base) {
/* there is no backend method that does what we want */
opal_output(0, "No event method available");
}
return base;
}
int opal_event_init(void)
{
const char **all_available_eventops = NULL;
char **includes=NULL;
bool dumpit=false;
int i, j;
if (opal_output_get_verbosity(opal_event_base_framework.framework_output) > 4) {
event_enable_debug_mode();
}
all_available_eventops = event_get_supported_methods();
if (NULL == ompi_event_module_include) {
/* Shouldn't happen, but... */
ompi_event_module_include = strdup("select");
}
includes = opal_argv_split(ompi_event_module_include,',');
/* get a configuration object */
config = event_config_new();
/* cycle thru the available subsystems */
for (i = 0 ; NULL != all_available_eventops[i] ; ++i) {
/* if this module isn't included in the given ones,
* then exclude it
*/
dumpit = true;
for (j=0; NULL != includes[j]; j++) {
if (0 == strcmp("all", includes[j]) ||
0 == strcmp(all_available_eventops[i], includes[j])) {
dumpit = false;
break;
}
}
if (dumpit) {
event_config_avoid_method(config, all_available_eventops[i]);
}
}
opal_argv_free(includes);
return OPAL_SUCCESS;
}
int opal_event_finalize(void)
{
return OPAL_SUCCESS;
}
opal_event_t* opal_event_alloc(void)
{
opal_event_t *ev;
ev = (opal_event_t*)malloc(sizeof(opal_event_t));
return ev;
}

7
opal/mca/event/external/owner.txt поставляемый
Просмотреть файл

@ -1,7 +0,0 @@
#
# owner/status file
# owner: institution that is responsible for this package
# status: e.g. active, maintenance, unmaintained
#
owner:CISCO
status: maintenance

Просмотреть файл

@ -1,87 +0,0 @@
#
# Copyright (c) 2010-2014 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012-2013 Los Alamos National Security, LLC.
# All rights reserved.
# Copyright (c) 2015 Intel, Inc. All rights reserved
# Copyright (c) 2016 Research Organization for Information Science
# and Technology (RIST). All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
EXTRA_DIST = autogen.subdirs
AM_CPPFLAGS = -I$(srcdir)/libevent -I$(srcdir)/libevent/include -I$(builddir)/libevent/include -I$(srcdir)/libevent/compat
SUBDIRS = libevent
headers = libevent2022.h
sources = \
libevent2022_component.c \
libevent2022_module.c
# Conditionally install the header files
if WANT_INSTALL_HEADERS
headers += libevent/opal_rename.h libevent/event.h libevent/evutil.h libevent/util-internal.h \
libevent/mm-internal.h libevent/ipv6-internal.h \
libevent/strlcpy-internal.h libevent/evbuffer-internal.h \
libevent/bufferevent-internal.h libevent/event-internal.h \
libevent/evthread-internal.h libevent/defer-internal.h \
libevent/minheap-internal.h libevent/log-internal.h \
libevent/evsignal-internal.h libevent/evmap-internal.h \
libevent/changelist-internal.h libevent/iocp-internal.h \
libevent/ratelim-internal.h \
libevent/WIN32-Code/event2/event-config.h \
libevent/WIN32-Code/tree.h \
libevent/compat/sys/queue.h \
libevent/evhttp.h libevent/http-internal.h libevent/ht-internal.h \
libevent/evrpc.h libevent/evrpc-internal.h \
libevent/evdns.h libevent/include/event2/buffer_compat.h \
libevent/include/event2/buffer.h \
libevent/include/event2/bufferevent_compat.h \
libevent/include/event2/bufferevent_ssl.h \
libevent/include/event2/bufferevent_struct.h \
libevent/include/event2/bufferevent.h \
libevent/include/event2/dns_compat.h \
libevent/include/event2/dns_struct.h \
libevent/include/event2/event_compat.h \
libevent/include/event2/event_struct.h \
libevent/include/event2/event.h \
libevent/include/event2/http_compat.h \
libevent/include/event2/http_struct.h \
libevent/include/event2/http.h \
libevent/include/event2/keyvalq_struct.h \
libevent/include/event2/listener.h \
libevent/include/event2/rpc_compat.h \
libevent/include/event2/rpc_struct.h \
libevent/include/event2/rpc.h \
libevent/include/event2/tag_compat.h \
libevent/include/event2/tag.h \
libevent/include/event2/thread.h \
libevent/include/event2/util.h
opaldir = $(opalincludedir)/$(subdir)
nobase_opal_HEADERS = $(headers)
nobase_nodist_opal_HEADERS = libevent/include/event2/event-config.h
endif
# Make the output library in this directory, and name it
# libmca_<type>_<name>.la because build is forced to be static-only
component_noinst = libmca_event_libevent2022.la
component_install =
# We only ever build this component statically
noinst_LTLIBRARIES = $(component_noinst)
libmca_event_libevent2022_la_SOURCES =$(sources)
libmca_event_libevent2022_la_LDFLAGS = -module -avoid-version
libmca_event_libevent2022_la_LIBADD = $(builddir)/libevent/libevent.la
libmca_event_libevent2022_la_DEPENDENCIES = $(builddir)/libevent/libevent.la

Просмотреть файл

@ -1 +0,0 @@
libevent

Просмотреть файл

@ -1,255 +0,0 @@
# -*- shell-script -*-
#
# Copyright (c) 2009-2018 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2012-2013 Los Alamos National Security, LLC. All rights reserved.
# Copyright (c) 2015 Intel, Inc. All rights reserved.
# Copyright (c) 2015-2016 Research Organization for Information Science
# and Technology (RIST). All rights reserved.
#
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
AC_DEFUN([MCA_opal_event_libevent2022_PRIORITY], [80])
dnl
dnl Force this component to compile in static-only mode
dnl
AC_DEFUN([MCA_opal_event_libevent2022_COMPILE_MODE], [
AC_MSG_CHECKING([for MCA component $2:$3 compile mode])
$4="static"
AC_MSG_RESULT([$$4])
])
AC_DEFUN([MCA_opal_event_libevent2022_POST_CONFIG], [
AM_CONDITIONAL(OPAL_EVENT_HAVE_THREAD_SUPPORT,
[test "$enable_event_thread_support" = "yes"])
AS_IF([test "$1" = "1"],
[ # Build libevent/include/event2/event-config.h. If we
# don't do it here, then libevent's Makefile.am will build
# it during "make all", which is too late for us (because
# other things are built before the event framework that
# end up including event-config.h). The steps below were
# copied from libevent's Makefile.am.
AC_CONFIG_COMMANDS([opal/mca/event/libevent2022/libevent/include/event2/event-config.h],
[opal_event_libevent2022_basedir="opal/mca/event/libevent2022"
libevent_file="$opal_event_libevent2022_basedir/libevent/include/event2/event-config.h"
rm -f "$libevent_file.new"
cat > "$libevent_file.new" <<EOF
/* event2/event-config.h
*
* This file was generated by autoconf when libevent was built, and
* post- processed by Open MPI's component configure.m4 (so that
* Libevent wouldn't build it during "make all") so that its macros
* would have a uniform prefix.
*
* DO NOT EDIT THIS FILE.
*
* Do not rely on macros in this file existing in later versions
*/
#ifndef _EVENT2_EVENT_CONFIG_H_
#define _EVENT2_EVENT_CONFIG_H_
EOF
sed -e 's/#define /#define _EVENT_/' \
-e 's/#undef /#undef _EVENT_/' \
-e 's/#ifndef /#ifndef _EVENT_/' < "$opal_event_libevent2022_basedir/libevent/config.h" >> "$libevent_file.new"
echo "#endif" >> "$libevent_file.new"
# Only make a new .h libevent_file if the
# contents haven't changed
diff -q $libevent_file "$libevent_file.new" > /dev/null 2> /dev/null
if test "$?" = "0"; then
echo $libevent_file is unchanged
else
cp "$libevent_file.new" $libevent_file
fi
rm -f "$libevent_file.new"])
# Must set this variable so that the framework m4 knows
# what file to include in opal/mca/event/event.h
opal_event_base_include="libevent2022/libevent2022.h"
# Add some stuff to CPPFLAGS so that the rest of the source
# tree can be built
libevent_file=$opal_event_libevent2022_basedir/libevent
CPPFLAGS="-I$OPAL_TOP_SRCDIR/$libevent_file -I$OPAL_TOP_SRCDIR/$libevent_file/include $CPPFLAGS"
AS_IF([test "$OPAL_TOP_BUILDDIR" != "$OPAL_TOP_SRCDIR"],
[CPPFLAGS="-I$OPAL_TOP_BUILDDIR/$libevent_file/include $CPPFLAGS"])
unset libevent_file
])
])
dnl MCA_event_libevent2022_CONFIG([action-if-can-compile],
dnl [action-if-cant-compile])
dnl ------------------------------------------------
AC_DEFUN([MCA_opal_event_libevent2022_CONFIG],[
AC_CONFIG_FILES([opal/mca/event/libevent2022/Makefile])
opal_event_libevent2022_basedir="opal/mca/event/libevent2022"
# We know that the external event component will be configured
# before this one because of its priority. This component is only
# needed if the external component was not successful in selecting
# itself.
AC_MSG_CHECKING([if event external component succeeded])
AS_IF([test "$opal_event_external_support" = "yes"],
[AC_MSG_RESULT([yes])
AC_MSG_NOTICE([event:external succeeded, so this component will be configured, but then will be skipped])
MCA_opal_event_libevent2022_FAKE_CONFIG($2)],
[AC_MSG_RESULT([no])
AC_MSG_NOTICE([event:external failed, so this component will be used])
MCA_opal_event_libevent2022_REAL_CONFIG($1, $2)])
])
dnl
dnl This macro is invoked when event:external is going to be used (and
dnl this component is *not* going to be used).
dnl
dnl $1: action if this component can compile
dnl (we still invoke $1 so that "make distclean" and friends will work)
dnl
AC_DEFUN([MCA_opal_event_libevent2022_FAKE_CONFIG],[
MCA_opal_event_libevent2022_SUB_CONFIGURE([], [], [])
AC_MSG_NOTICE([remember: event:external will be used; this component was configured, but will be skipped])
$1
])
dnl
dnl This macro has a bunch of side effects. It is only meant to be
dnl invoked when this component is going to be used (i.e., when
dnl event:external is *not* going to be used). If this macro is invoked
dnl when event:external is used, Terrible Things will happen.
dnl
dnl $1: action if this component can compile
dnl $2: action if this component cannot compile
dnl
AC_DEFUN([MCA_opal_event_libevent2022_REAL_CONFIG],[
OPAL_VAR_SCOPE_PUSH([CFLAGS_save CPPFLAGS_save libevent_file event_args libevent_happy])
CFLAGS_save="$CFLAGS"
CFLAGS="$OPAL_CFLAGS_BEFORE_PICKY $OPAL_VISIBILITY_CFLAGS"
CPPFLAGS_save="$CPPFLAGS"
CPPFLAGS="-I$OPAL_TOP_SRCDIR -I$OPAL_TOP_BUILDDIR -I$OPAL_TOP_SRCDIR/opal/include $CPPFLAGS"
AC_MSG_CHECKING([libevent configuration args])
event_args="--disable-dns --disable-http --disable-rpc --disable-openssl --enable-thread-support"
AC_ARG_ENABLE(event-rtsig,
AC_HELP_STRING([--enable-event-rtsig],
[enable support for real time signals (experimental)]))
if test "$enable_event_rtsig" = "yes"; then
event_args="$event_args --enable-rtsig"
fi
AC_ARG_ENABLE(event-select,
AC_HELP_STRING([--disable-event-select], [disable select support]))
if test "$enable_event_select" = "no"; then
event_args="$event_args --disable-select"
fi
AC_ARG_ENABLE(event-poll,
AC_HELP_STRING([--disable-event-poll], [disable poll support]))
if test "$enable_event_poll" = "no"; then
event_args="$event_args --disable-poll"
fi
AC_ARG_ENABLE(event-devpoll,
AC_HELP_STRING([--disable-event-devpoll], [disable devpoll support]))
if test "$enable_event_devpoll" = "no"; then
event_args="$event_args --disable-devpoll"
fi
AC_ARG_ENABLE(event-kqueue,
AC_HELP_STRING([--disable-event-kqueue], [disable kqueue support]))
if test "$enable_event_kqueue" = "no"; then
event_args="$event_args --disable-kqueue"
fi
AC_ARG_ENABLE(event-epoll,
AC_HELP_STRING([--disable-event-epoll], [disable epoll support]))
if test "$enable_event_epoll" = "no"; then
event_args="$event_args --disable-epoll"
fi
AC_ARG_ENABLE(event-evport,
AC_HELP_STRING([--enable-event-evport], [enable evport support]))
if test "$enable_event_evport" = "yes"; then
event_args="$event_args --enable-evport"
else
event_args="$event_args --disable-evport"
fi
AC_ARG_ENABLE(event-signal,
AC_HELP_STRING([--disable-event-signal], [disable signal support]))
if test "$enable_event_signal" = "no"; then
event_args="$event_args --disable-signal"
fi
AC_ARG_ENABLE(event-debug,
AC_HELP_STRING([--enable-event-debug], [enable event library debug output]))
if test "$enable_event_debug" = "yes"; then
event_args="$event_args --enable-debug-mode"
CFLAGS="-DUSE_DEBUG $CFLAGS"
fi
AC_MSG_RESULT([$event_args])
# Invoke the embedded configure script.
# We define "random" to be "opal_random" so that Libevent will not
# use random(3) internally (and potentially unexpectedly perturb
# values returned by rand(3) to the application).
CPPFLAGS="$CPPFLAGS -Drandom=opal_random"
MCA_opal_event_libevent2022_SUB_CONFIGURE([$event_args],
[libevent_happy="yes"],
[libevent_happy="no"])
if test "$libevent_happy" = "no"; then
AC_MSG_WARN([Event library failed to configure])
AC_MSG_ERROR([Cannot continue])
fi
# Finally, add some flags to the wrapper compiler if we're
# building with developer headers so that our headers can
# be found.
event_libevent2022_WRAPPER_EXTRA_CPPFLAGS='-I${pkgincludedir}/opal/mca/event/libevent2022/libevent -I${pkgincludedir}/opal/mca/event/libevent2022/libevent/include'
CFLAGS="$CFLAGS_save"
CPPFLAGS="$CPPFLAGS_save"
# If we configured successfully, set OPAL_HAVE_WORKING_EVENTOPS to
# the value in the generated libevent/config.h (NOT
# libevent/include/event2/event-config.h!). Otherwise, set it to
# 0.
libevent_file=$opal_event_libevent2022_basedir/libevent/config.h
AS_IF([test "$libevent_happy" = "yes" && test -r $libevent_file],
[OPAL_HAVE_WORKING_EVENTOPS=`grep HAVE_WORKING_EVENTOPS $libevent_file | awk '{print [$]3 }'`
$1],
[$2
OPAL_HAVE_WORKING_EVENTOPS=0])
OPAL_VAR_SCOPE_POP
])
dnl Call configure in the embedded libevent.
dnl
dnl We still do this so that all the proper GNU Autotools
dnl infrastructure is setup properly (e.g., w.r.t. SUBDIRS=libevent in
dnl this directorys Makefile.am, we still need the Autotools "make
dnl distclean" infrastructure to work properly).
dnl
dnl $1: extra configure arguments
dnl $2: action on success
dnl $3: action on failure
dnl
AC_DEFUN([MCA_opal_event_libevent2022_SUB_CONFIGURE],[
# We define "random" to be "opal_random" so that Libevent will not
# use random(3) internally (and potentially unexpectedly perturb
# values returned by rand(3) to the application).
OPAL_CONFIG_SUBDIR([$opal_event_libevent2022_basedir/libevent],
[$1 $opal_subdir_args 'CPPFLAGS=$CPPFLAGS'],
[$2], [$3])
])

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,257 +0,0 @@
# Doxyfile 1.5.1
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project
#
# All text after a hash (#) is considered a comment and will be ignored
# The format is:
# TAG = value [value, ...]
# For lists items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (" ")
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
PROJECT_NAME = libevent
# Place all output under 'doxygen/'
OUTPUT_DIRECTORY = doxygen/
# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
# will interpret the first line (until the first dot) of a JavaDoc-style
# comment as the brief description. If set to NO, the JavaDoc
# comments will behave just like the Qt-style comments (thus requiring an
# explicit @brief command for a brief description.
JAVADOC_AUTOBRIEF = YES
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
# sources only. Doxygen will then generate output that is more tailored for C.
# For instance, some of the names that are used will be different. The list
# of all members will be omitted, etc.
OPTIMIZE_OUTPUT_FOR_C = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
# brief documentation of file, namespace and class members alphabetically
# by member name. If set to NO (the default) the members will appear in
# declaration order.
SORT_BRIEF_DOCS = YES
# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
# can be used to strip a user-defined part of the path. Stripping is
# only done if one of the specified strings matches the left-hand part of
# the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the
# path to strip.
STRIP_FROM_PATH = include/
#---------------------------------------------------------------------------
# configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag can be used to specify the files and/or directories that contain
# documented source files. You may enter file names like "myfile.cpp" or
# directories like "/usr/src/myproject". Separate the files or directories
# with spaces.
INPUT = \
include/event2/buffer.h \
include/event2/buffer_compat.h \
include/event2/bufferevent.h \
include/event2/bufferevent_compat.h \
include/event2/bufferevent_ssl.h \
include/event2/dns.h \
include/event2/dns_compat.h \
include/event2/event.h \
include/event2/event_compat.h \
include/event2/http.h \
include/event2/http_compat.h \
include/event2/listener.h \
include/event2/rpc.h \
include/event2/rpc_compat.h \
include/event2/tag.h \
include/event2/tag_compat.h \
include/event2/thread.h \
include/event2/util.h
#---------------------------------------------------------------------------
# configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
# generate HTML output.
GENERATE_HTML = YES
#---------------------------------------------------------------------------
# configuration options related to the LaTeX output
#---------------------------------------------------------------------------
# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
# generate Latex output.
GENERATE_LATEX = YES
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `latex' will be used as the default path.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked. If left blank `latex' will be used as the default command name.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
# generate index for LaTeX. If left blank `makeindex' will be used as the
# default command name.
MAKEINDEX_CMD_NAME = makeindex
# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
# LaTeX documents. This may be useful for small projects and may help to
# save some trees in general.
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used
# by the printer. Possible values are: a4, a4wide, letter, legal and
# executive. If left blank a4wide will be used.
PAPER_TYPE = a4wide
# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
# packages that should be included in the LaTeX output.
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
# the generated latex document. The header should contain everything until
# the first chapter. If it is left blank doxygen will generate a
# standard header. Notice: only use this tag if you know what you are doing!
LATEX_HEADER =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
# is prepared for conversion to pdf (using ps2pdf). The pdf file will
# contain links (just like the HTML output) instead of page references
# This makes the output suitable for online browsing using a pdf viewer.
PDF_HYPERLINKS = NO
# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
# plain latex in the generated Makefile. Set this option to YES to get a
# higher quality PDF documentation.
USE_PDFLATEX = YES
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
# command to the generated LaTeX files. This will instruct LaTeX to keep
# running if errors occur, instead of asking the user for help.
# This option is also used when generating formulas in HTML.
LATEX_BATCHMODE = NO
# If LATEX_HIDE_INDICES is set to YES then doxygen will not
# include the index chapters (such as File Index, Compound Index, etc.)
# in the output.
LATEX_HIDE_INDICES = NO
#---------------------------------------------------------------------------
# configuration options related to the man page output
#---------------------------------------------------------------------------
# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
# generate man pages
GENERATE_MAN = NO
# The MAN_EXTENSION tag determines the extension that is added to
# the generated man pages (default is the subroutine's section .3)
MAN_EXTENSION = .3
# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
# then it will generate one additional man file for each entity
# documented in the real man page(s). These additional files
# only source the real man page, but without them the man command
# would be unable to find the correct page. The default is NO.
MAN_LINKS = YES
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
# evaluate all C-preprocessor directives found in the sources and include
# files.
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
# names in the source code. If set to NO (the default) only conditional
# compilation will be performed. Macro expansion can be done in a controlled
# way by setting EXPAND_ONLY_PREDEF to YES.
MACRO_EXPANSION = NO
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
# then the macro expansion is limited to the macros specified with the
# PREDEFINED and EXPAND_AS_DEFINED tags.
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
# in the INCLUDE_PATH (see below) will be search if a #include is found.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by
# the preprocessor.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
# directories. If left blank, the patterns specified with FILE_PATTERNS will
# be used.
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that
# are defined before the preprocessor is started (similar to the -D option of
# gcc). The argument of the tag is a list of macros of the form: name
# or name=definition (no spaces). If the definition and the = are
# omitted =1 is assumed. To prevent a macro definition from being
# undefined via #undef or recursively expanded use the := operator
# instead of the = operator.
PREDEFINED = TAILQ_ENTRY RB_ENTRY _EVENT_DEFINED_TQENTRY _EVENT_IN_DOXYGEN
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
# this tag can be used to specify a list of macro names that should be expanded.
# The macro definition that is found in the sources will be used.
# Use the PREDEFINED tag if you want to use a different macro definition.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
# doxygen's preprocessor will remove all function-like macros that are alone
# on a line, have an all uppercase name, and do not end with a semicolon. Such
# function macros are typically used for boiler-plate code, and will confuse
# the parser if not removed.
SKIP_FUNCTION_MACROS = YES

Просмотреть файл

@ -1,74 +0,0 @@
Libevent is available for use under the following license, commonly known
as the 3-clause (or "modified") BSD license:
==============================
Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================
Portions of Libevent are based on works by others, also made available by
them under the three-clause BSD license above. The copyright notices are
available in the corresponding source files; the license is as above. Here's
a list:
log.c:
Copyright (c) 2000 Dug Song <dugsong@monkey.org>
Copyright (c) 1993 The Regents of the University of California.
strlcpy.c:
Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
win32select.c:
Copyright (c) 2003 Michael A. Davis <mike@datanerds.net>
evport.c:
Copyright (c) 2007 Sun Microsystems
ht-internal.h:
Copyright (c) 2002 Christopher Clark
minheap-internal.h:
Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
==============================
The arc4module is available under the following, sometimes called the
"OpenBSD" license:
Copyright (c) 1996, David Mazieres <dm@uun.org>
Copyright (c) 2008, Damien Miller <djm@openbsd.org>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

Просмотреть файл

@ -1,286 +0,0 @@
# Makefile.am for libevent
# Copyright 2000-2007 Niels Provos
# Copyright 2007-2012 Niels Provos and Nick Mathewson
#
# See LICENSE for copying information.
# 'foreign' means that we're not enforcing GNU package rules strictly.
# '1.7' means that we need automake 1.7 or later (and we do).
AUTOMAKE_OPTIONS = foreign 1.7
ACLOCAL_AMFLAGS = -I m4
# This is the "Release" of the Libevent ABI. It takes precedence over
# the VERSION_INFO, so that two versions of Libevent with the same
# "Release" are never binary-compatible.
#
# This number incremented once for the 2.0 release candidate, and
# will increment for each series until we revise our interfaces enough
# that we can seriously expect ABI compatibility between series.
#
RELEASE = -release 2.0
# This is the version info for the libevent binary API. It has three
# numbers:
# Current -- the number of the binary API that we're implementing
# Revision -- which iteration of the implementation of the binary
# API are we supplying?
# Age -- How many previous binary API versions do we also
# support?
#
# To increment a VERSION_INFO (current:revision:age):
# If the ABI didn't change:
# Return (current:revision+1:age)
# If the ABI changed, but it's backward-compatible:
# Return (current+1:0:age+1)
# If the ABI changed and it isn't backward-compatible:
# Return (current+1:0:0)
#
# Once an RC is out, DO NOT MAKE ANY ABI-BREAKING CHANGES IN THAT SERIES
# UNLESS YOU REALLY REALLY HAVE TO.
VERSION_INFO = 6:9:1
# History: RELEASE VERSION_INFO
# 2.0.1-alpha -- 2.0 1:0:0
# 2.0.2-alpha -- 2:0:0
# 2.0.3-alpha -- 2:0:0 (should have incremented; didn't.)
# 2.0.4-alpha -- 3:0:0
# 2.0.5-beta -- 4:0:0
# 2.0.6-rc -- 2.0 2:0:0
# 2.0.7-rc -- 2.0 3:0:1
# 2.0.8-rc -- 2.0 4:0:2
# 2.0.9-rc -- 2.0 5:0:0 (ABI changed slightly)
# 2.0.10-stable-- 2.0 5:1:0 (No ABI change)
# 2.0.11-stable-- 2.0 6:0:1 (ABI changed, backward-compatible)
# 2.0.12-stable-- 2.0 6:1:1 (No ABI change)
# 2.0.13-stable-- 2.0 6:2:1 (No ABI change)
# 2.0.14-stable-- 2.0 6:3:1 (No ABI change)
# 2.0.15-stable-- 2.0 6:3:1 (Forgot to update :( )
# 2.0.16-stable-- 2.0 6:4:1 (No ABI change)
# 2.0.17-stable-- 2.0 6:5:1 (No ABI change)
# 2.0.18-stable-- 2.0 6:6:1 (No ABI change)
# 2.0.19-stable-- 2.0 6:7:1 (No ABI change)
# 2.0.20-stable-- 2.0 6:8:1 (No ABI change)
# 2.0.21-stable-- 2.0 6:9:1 (No ABI change)
#
# For Libevent 2.1:
# 2.1.1-alpha -- 2.1 1:0:0
# ABI version history for this package effectively restarts every time
# we change RELEASE. Version 1.4.x had RELEASE of 1.4.
#
# Ideally, we would not be using RELEASE at all; instead we could just
# use the VERSION_INFO field to label our backward-incompatible ABI
# changes, and those would be few and far between. Unfortunately,
# Libevent still exposes far too many volatile structures in its
# headers, so we pretty much have to assume that most development
# series will break ABI compatibility. For now, it's simplest just to
# keep incrementing the RELEASE between series and resetting VERSION_INFO.
#
# Eventually, when we get to the point where the structures in the
# headers are all non-changing (or not there at all!), we can shift to
# a more normal worldview where backward-incompatible ABI changes are
# nice and rare. For the next couple of years, though, 'struct event'
# is user-visible, and so we can pretty much guarantee that release
# series won't be binary-compatible.
# Open MPI: Don't install this, ever
#if INSTALL_LIBEVENT
#dist_bin_SCRIPTS = event_rpcgen.py
#endif
# Open MPI: disable pkg-config stuff (left commented for reference)
#pkgconfigdir=$(libdir)/pkgconfig
#LIBEVENT_PKGCONFIG=libevent.pc
# These sources are conditionally added by configure.in or conditionally
# included from other files.
PLATFORM_DEPENDENT_SRC = \
epoll_sub.c \
arc4random.c
# Open MPI: removed test/Makefile.nmake and make-event-config.sed from
# EXTRA_DIST
EXTRA_DIST = \
LICENSE \
autogen.sh \
event_rpcgen.py \
libevent.pc.in \
Doxyfile \
whatsnew-2.0.txt \
Makefile.nmake \
$(PLATFORM_DEPENDENT_SRC)
# Open MPI: disable these lines
#LIBEVENT_LIBS_LA = libevent.la libevent_core.la libevent_extra.la
#if PTHREADS
#LIBEVENT_LIBS_LA += libevent_pthreads.la
#LIBEVENT_PKGCONFIG += libevent_pthreads.pc
#endif
#if OPENSSL
#LIBEVENT_LIBS_LA += libevent_openssl.la
#LIBEVENT_PKGCONFIG += libevent_openssl.pc
#endif
# Open MPI: Changed to noinst and libevent.la
noinst_LTLIBRARIES = libevent.la
# Open MPI: removed sample and test dirs
SUBDIRS = . include
if BUILD_WIN32
SYS_LIBS = -lws2_32 -lshell32 -ladvapi32
SYS_SRC = win32select.c evthread_win32.c buffer_iocp.c event_iocp.c \
bufferevent_async.c
SYS_INCLUDES = -IWIN32-Code
else
SYS_LIBS =
SYS_SRC =
SYS_INCLUDES =
endif
if SELECT_BACKEND
SYS_SRC += select.c
endif
if POLL_BACKEND
SYS_SRC += poll.c
endif
if DEVPOLL_BACKEND
SYS_SRC += devpoll.c
endif
if KQUEUE_BACKEND
SYS_SRC += kqueue.c
endif
if EPOLL_BACKEND
SYS_SRC += epoll.c
endif
if EVPORT_BACKEND
SYS_SRC += evport.c
endif
if SIGNAL_SUPPORT
SYS_SRC += signal.c
endif
# Open MPI: commented this out. We generate event-config.h in the
# component configure.m4.
#BUILT_SOURCES = include/event2/event-config.h
# Open MPI: commented this out. We generate event-config.h in the
# component configure.m4
#include/event2/event-config.h: config.h make-event-config.sed
# test -d include/event2 || $(MKDIR_P) include/event2
# $(SED) -f $(srcdir)/make-event-config.sed < config.h > $@T
# mv -f $@T $@
# Open MPI: -- eliminate extra libevent libraries. We compile
# everything that is enabled into the core. Do not include any
# code not currently in use (buffer, tagging, and listener code).
CORE_SRC = event.c evthread.c evmap.c log.c evutil.c evutil_rand.c \
strlcpy.c $(SYS_SRC)
CORE_LIBS =
if HTTP
CORE_SRC += http.c
endif
if DNS
CORE_SRC += evdns.c
endif
if RPC
CORE_SRC += evrpc.c
endif
if PTHREADS
CORE_SRC += evthread_pthread.c
endif
if OPENSSL
CORE_SRC += bufferevent_openssl.c
CORE_LIBS += -lcrypto -lssl
endif
# Open MPI - comment out all unused extra libs
#if BUILD_WITH_NO_UNDEFINED
#NO_UNDEFINED = -no-undefined
#MAYBE_CORE = libevent_core.la
#else
#NO_UNDEFINED =
#MAYBE_CORE =
#endif
#
#GENERIC_LDFLAGS = -version-info $(VERSION_INFO) $(RELEASE) $(NO_UNDEFINED)
#
#libevent_la_SOURCES = $(CORE_SRC) $(EXTRA_SRC)l
#libevent_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS)
#libevent_la_LDFLAGS = $(GENERIC_LDFLAGS)
#
#libevent_core_la_SOURCES = $(CORE_SRC)
#libevent_core_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS)
#libevent_core_la_LDFLAGS = $(GENERIC_LDFLAGS)
#
#if PTHREADS
#libevent_pthreads_la_SOURCES = evthread_pthread.c
#libevent_pthreads_la_LIBADD = $(MAYBE_CORE)
#libevent_pthreads_la_LDFLAGS = $(GENERIC_LDFLAGS)
#endif
#
#libevent_extra_la_SOURCES = $(EXTRA_SRC)
#libevent_extra_la_LIBADD = $(MAYBE_CORE) $(SYS_LIBS)
#libevent_extra_la_LDFLAGS = $(GENERIC_LDFLAGS)
#
#if OPENSSL
#libevent_openssl_la_SOURCES = bufferevent_openssl.c
#libevent_openssl_la_LIBADD = $(MAYBE_CORE) $(OPENSSL_LIBS)
#libevent_openssl_la_LDFLAGS = $(GENERIC_LDFLAGS)
#endif
#
#noinst_HEADERS = util-internal.h mm-internal.h ipv6-internal.h \
# evrpc-internal.h strlcpy-internal.h evbuffer-internal.h \
# bufferevent-internal.h http-internal.h event-internal.h \
# evthread-internal.h ht-internal.h defer-internal.h \
# minheap-internal.h log-internal.h evsignal-internal.h evmap-internal.h \
# changelist-internal.h iocp-internal.h \
# ratelim-internal.h \
# WIN32-Code/event2/event-config.h \
# WIN32-Code/tree.h \
# compat/sys/queue.h
#
#EVENT1_HDRS = event.h evhttp.h evdns.h evrpc.h evutil.h
#
#if INSTALL_LIBEVENT
#include_HEADERS = $(EVENT1_HDRS)
#else
#noinst_HEADERS += $(EVENT1_HDRS)
#endif
# Open MPI - define libs and headers
libevent_la_SOURCES = $(CORE_SRC) $(headers)
libevent_la_LIBADD = $(CORE_LIBS) $(SYS_LIBS)
EXTRA_DIST += event.h evutil.h util-internal.h mm-internal.h ipv6-internal.h \
strlcpy-internal.h evbuffer-internal.h \
bufferevent-internal.h event-internal.h \
evthread-internal.h defer-internal.h \
minheap-internal.h log-internal.h evsignal-internal.h evmap-internal.h \
changelist-internal.h iocp-internal.h \
ratelim-internal.h \
WIN32-Code/event2/event-config.h \
WIN32-Code/tree.h \
compat/sys/queue.h $(SYS_INCLUDES) \
evhttp.h http-internal.h ht-internal.h \
evrpc.h evrpc-internal.h \
evdns.h
AM_CPPFLAGS = -I$(srcdir)/compat -I$(srcdir)/include -I./include $(SYS_INCLUDES)
# Open MPI: remove verify, doxygen, and FORCE
#verify: check
#
#doxygen: FORCE
# doxygen $(srcdir)/Doxyfile
#FORCE:
DISTCLEANFILES = *~ libevent.pc ./include/event2/event-config.h

Просмотреть файл

@ -1,47 +0,0 @@
# WATCH OUT! This makefile is a work in progress. It is probably missing
# tons of important things. DO NOT RELY ON IT TO BUILD A GOOD LIBEVENT.
# Needed for correctness
CFLAGS=/IWIN32-Code /Iinclude /Icompat /DWIN32 /DHAVE_CONFIG_H /I.
# For optimization and warnings
CFLAGS=$(CFLAGS) /Ox /W3 /wd4996 /nologo
# XXXX have a debug mode
LIBFLAGS=/nologo
CORE_OBJS=event.obj buffer.obj bufferevent.obj bufferevent_sock.obj \
bufferevent_pair.obj listener.obj evmap.obj log.obj evutil.obj \
strlcpy.obj signal.obj bufferevent_filter.obj evthread.obj \
bufferevent_ratelim.obj evutil_rand.obj
WIN_OBJS=win32select.obj evthread_win32.obj buffer_iocp.obj \
event_iocp.obj bufferevent_async.obj
EXTRA_OBJS=event_tagging.obj http.obj evdns.obj evrpc.obj
ALL_OBJS=$(CORE_OBJS) $(WIN_OBJS) $(EXTRA_OBJS)
STATIC_LIBS=libevent_core.lib libevent_extras.lib libevent.lib
all: static_libs tests
static_libs: $(STATIC_LIBS)
libevent_core.lib: $(CORE_OBJS) $(WIN_OBJS)
lib $(LIBFLAGS) $(CORE_OBJS) $(WIN_OBJS) /out:libevent_core.lib
libevent_extras.lib: $(EXTRA_OBJS)
lib $(LIBFLAGS) $(EXTRA_OBJS) /out:libevent_extras.lib
libevent.lib: $(CORE_OBJS) $(WIN_OBJS) $(EXTRA_OBJS)
lib $(LIBFLAGS) $(CORE_OBJS) $(EXTRA_OBJS) $(WIN_OBJS) /out:libevent.lib
clean:
del $(ALL_OBJS)
del $(STATIC_LIBS)
cd test
$(MAKE) /F Makefile.nmake clean
tests:
cd test
$(MAKE) /F Makefile.nmake

Просмотреть файл

@ -1,198 +0,0 @@
0. BUILDING AND INSTALLATION (Briefly)
$ ./configure
$ make
$ make verify # (optional)
$ sudo make install
1. BUILDING AND INSTALLATION (In Depth)
To build libevent, type
$ ./configure && make
(If you got libevent from the git repository, you will
first need to run the included "autogen.sh" script in order to
generate the configure script.)
You can run the regression tests by running
$ make verify
Install as root via
# make install
Before, reporting any problems, please run the regression tests.
To enable the low-level tracing build the library as:
CFLAGS=-DUSE_DEBUG ./configure [...]
Standard configure flags should work. In particular, see:
--disable-shared Only build static libraries
--prefix Install all files relative to this directory.
The configure script also supports the following flags:
--enable-gcc-warnings Enable extra compiler checking with GCC.
--disable-malloc-replacement
Don't let applications replace our memory
management functions
--disable-openssl Disable support for OpenSSL encryption.
--disable-thread-support Don't support multithreaded environments.
2. USEFUL LINKS:
For the latest released version of Libevent, see the official website at
http://libevent.org/ .
There's a pretty good work-in-progress manual up at
http://www.wangafu.net/~nickm/libevent-book/ .
For the latest development versions of Libevent, access our Git repository
via
"git clone git://levent.git.sourceforge.net/gitroot/levent/libevent"
You can browse the git repository online at
http://levent.git.sourceforge.net/git/gitweb-index.cgi .
To report bugs, request features, or submit patches to Libevent,
use the Sourceforge trackers at
https://sourceforge.net/tracker/?group_id=50884 .
There's also a libevent-users mailing list for talking about Libevent
use and development: http://archives.seul.org/libevent/users/
3. ACKNOWLEDGMENTS
The following people have helped with suggestions, ideas, code or
fixing bugs:
Arno Bakker
Alejo
Weston Andros Adamson
William Ahern
Ivan Andropov
Sergey Avseyev
Avi Bab
Gilad Benjamini
Stas Bekman
Joachim Bauch
Denis Bilenko
Julien Blache
Kevin Bowling
Tomash Brechko
Kelly Brock
Ralph Castain
Adrian Chadd
Lawnstein Chan
Shuo Chen
Ka-Hing Cheung
Andrew Cox
George Danchev
Andrew Danforth
Antony Dovgal
Ed Day
Christopher Davis
Mike Davis
Mihai Draghicioiu
Mark Ellzey
Shie Erlich
Leonid Evdokimov
Juan Pablo Fernandez
Christophe Fillot
Mike Frysinger
Remi Gacogne
Alexander von Gernler
Artur Grabowski
Sebastian Hahn
Dave Hart
Greg Hazel
Michael Herf
Savg He
Mark Heily
Greg Hewgill
Andrew Hochhaus
Aaron Hopkins
Tani Hosokawa
Jamie Iles
Claudio Jeker
Evan Jones
George Kadianakis
Phua Keat
Kevin Ko
Brian Koehmstedt
Marko Kreen
Valery Kyholodov
Ross Lagerwall
Scott Lamb
Christopher Layne
Adam Langley
Philip Lewis
Zhou Li
David Libenzi
Yan Lin
Moshe Litvin
Simon Liu
Mitchell Livingston
Hagne Mahre
Lubomir Marinov
Abilio Marques
Nick Mathewson
James Mansion
Nicholas Marriott
Andrey Matveev
Caitlin Mercer
Dagobert Michelsen
Mansour Moufid
Felix Nawothnig
Trond Norbye
Linus Nordberg
Richard Nyberg
Jon Oberheide
Phil Oleson
Dave Pacheco
Tassilo von Parseval
Catalin Patulea
Patrick Pelletier
Simon Perreault
Pierre Phaneuf
Ryan Phillips
Dimitre Piskyulev
Pavel Plesov
Jon Poland
Nate R
Robert Ransom
Bert JW Regeer
Peter Rosin
Maseeb Abdul Qadir
Wang Qin
Alex S
Hanna Schroeter
Ralf Schmitt
Mike Smellie
Kevin Springborn
Harlan Stenn
Steve Snyder
Dug Song
Dongsheng Song
Hannes Sowa
Ferenc Szalai
Brodie Thiesfield
Jason Toffaletti
Gisle Vanem
Bas Verhoeven
Constantine Verutin
Colin Watt
Zack Weinberg
Jardel Weyrich
Alex
Taral
propanbutan
mmadia
If we have forgotten your name, please contact us.

Просмотреть файл

@ -1,363 +0,0 @@
/* event2/event-config.h
*
* This file was generated by autoconf when libevent was built, and post-
* processed by Libevent so that its macros would have a uniform prefix.
*
* DO NOT EDIT THIS FILE.
*
* Do not rely on macros in this file existing in later versions.
*/
#ifndef _EVENT_CONFIG_H_
#define _EVENT_CONFIG_H_
/* config.h. Generated by configure. */
/* config.h.in. Generated from configure.in by autoheader. */
/* Define if libevent should not allow replacing the mm functions */
/* #undef _EVENT_DISABLE_MM_REPLACEMENT */
/* Define if libevent should not be compiled with thread support */
/* #undef _EVENT_DISABLE_THREAD_SUPPORT */
/* Define if clock_gettime is available in libc */
/* #undef _EVENT_DNS_USE_CPU_CLOCK_FOR_ID */
/* Define is no secure id variant is available */
/* #define _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID 1 */
#define _EVENT_DNS_USE_FTIME_FOR_ID 1
/* Define to 1 if you have the <arpa/inet.h> header file. */
/* #undef _EVENT_HAVE_ARPA_INET_H */
/* Define to 1 if you have the `clock_gettime' function. */
/* #undef _EVENT_HAVE_CLOCK_GETTIME */
/* Define if /dev/poll is available */
/* #undef _EVENT_HAVE_DEVPOLL */
/* Define to 1 if you have the <dlfcn.h> header file. */
/* #undef _EVENT_HAVE_DLFCN_H */
/* Define if your system supports the epoll system calls */
/* #undef _EVENT_HAVE_EPOLL */
/* Define to 1 if you have the `epoll_ctl' function. */
/* #undef _EVENT_HAVE_EPOLL_CTL */
/* Define to 1 if you have the `eventfd' function. */
/* #undef _EVENT_HAVE_EVENTFD */
/* Define if your system supports event ports */
/* #undef _EVENT_HAVE_EVENT_PORTS */
/* Define to 1 if you have the `fcntl' function. */
/* #undef _EVENT_HAVE_FCNTL */
/* Define to 1 if you have the <fcntl.h> header file. */
#define _EVENT_HAVE_FCNTL_H 1
/* Define to 1 if you have the `getaddrinfo' function. */
#define _EVENT_HAVE_GETADDRINFO 1
/* Define to 1 if you have the `getnameinfo' function. */
#define _EVENT_HAVE_GETNAMEINFO 1
/* Define to 1 if you have the `getprotobynumber' function. */
#define _EVENT_HAVE_GETPROTOBYNUMBER 1
/* Define to 1 if you have the `getservbyname' function. */
#define _EVENT_HAVE_GETSERVBYNAME 1
/* Define to 1 if you have the `gettimeofday' function. */
/* #define _EVENT_HAVE_GETTIMEOFDAY 1 */
/* Define to 1 if you have the `inet_aton' function. */
/* #undef _EVENT_HAVE_INET_ATON */
/* Define to 1 if you have the `inet_ntop' function. */
/* #undef _EVENT_HAVE_INET_NTOP */
/* Define to 1 if you have the `inet_pton' function. */
/* #undef _EVENT_HAVE_INET_PTON */
/* Define to 1 if you have the <inttypes.h> header file. */
/* #define _EVENT_HAVE_INTTYPES_H 1 */
/* Define to 1 if you have the `kqueue' function. */
/* #undef _EVENT_HAVE_KQUEUE */
/* Define if the system has zlib */
/* #undef _EVENT_HAVE_LIBZ */
/* Define to 1 if you have the <memory.h> header file. */
#define _EVENT_HAVE_MEMORY_H 1
/* Define to 1 if you have the `mmap' function. */
/* #undef _EVENT_HAVE_MMAP */
/* Define to 1 if you have the <netinet/in6.h> header file. */
/* #undef _EVENT_HAVE_NETINET_IN6_H */
/* Define to 1 if you have the <netinet/in.h> header file. */
/* #undef _EVENT_HAVE_NETINET_IN_H */
/* Define to 1 if you have the `pipe' function. */
/* #undef _EVENT_HAVE_PIPE */
/* Define to 1 if you have the `poll' function. */
/* #undef _EVENT_HAVE_POLL */
/* Define to 1 if you have the <poll.h> header file. */
/* #undef _EVENT_HAVE_POLL_H */
/* Define to 1 if you have the `port_create' function. */
/* #undef _EVENT_HAVE_PORT_CREATE */
/* Define to 1 if you have the <port.h> header file. */
/* #undef _EVENT_HAVE_PORT_H */
/* Define if you have POSIX threads libraries and header files. */
/* #undef _EVENT_HAVE_PTHREAD */
/* Define if we have pthreads on this system */
/* #undef _EVENT_HAVE_PTHREADS */
/* Define to 1 if the system has the type `sa_family_t'. */
/* #undef _EVENT_HAVE_SA_FAMILY_T */
/* Define to 1 if you have the `select' function. */
/* #undef _EVENT_HAVE_SELECT */
/* Define to 1 if you have the `sendfile' function. */
/* #undef _EVENT_HAVE_SENDFILE */
/* Define if F_SETFD is defined in <fcntl.h> */
/* #undef _EVENT_HAVE_SETFD */
/* Define to 1 if you have the `sigaction' function. */
/* #undef _EVENT_HAVE_SIGACTION */
/* Define to 1 if you have the `signal' function. */
#define _EVENT_HAVE_SIGNAL 1
/* Define to 1 if you have the `splice' function. */
/* #undef _EVENT_HAVE_SPLICE */
/* Define to 1 if you have the <stdarg.h> header file. */
#define _EVENT_HAVE_STDARG_H 1
/* Define to 1 if you have the <stddef.h> header file. */
#define _EVENT_HAVE_STDDEF_H 1
/* Define to 1 if you have the <stdint.h> header file. */
/* #define _EVENT_HAVE_STDINT_H 1 */
/* Define to 1 if you have the <stdlib.h> header file. */
#define _EVENT_HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define _EVENT_HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define _EVENT_HAVE_STRING_H 1
/* Define to 1 if you have the `strlcpy' function. */
/* #undef _EVENT_HAVE_STRLCPY */
/* Define to 1 if you have the `strsep' function. */
/* #undef _EVENT_HAVE_STRSEP */
/* Define to 1 if you have the `strtok_r' function. */
/* #undef _EVENT_HAVE_STRTOK_R */
/* Define to 1 if you have the `strtoll' function. */
/* #define _EVENT_HAVE_STRTOLL 1 */
#define _EVENT_HAVE_STRUCT_ADDRINFO 1
/* Define to 1 if the system has the type `struct in6_addr'. */
#define _EVENT_HAVE_STRUCT_IN6_ADDR 1
/* Define to 1 if `s6_addr16' is member of `struct in6_addr'. */
#define _EVENT_HAVE_STRUCT_IN6_ADDR_S6_ADDR16 1
/* Define to 1 if `s6_addr32' is member of `struct in6_addr'. */
#define _EVENT_HAVE_STRUCT_IN6_ADDR_S6_ADDR32 1
/* Define to 1 if the system has the type `struct sockaddr_in6'. */
#define _EVENT_HAVE_STRUCT_SOCKADDR_IN6 1
/* Define to 1 if `sin6_len' is member of `struct sockaddr_in6'. */
/* #undef _EVENT_HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN */
/* Define to 1 if `sin_len' is member of `struct sockaddr_in'. */
/* #undef _EVENT_HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
/* Define to 1 if the system has the type `struct sockaddr_storage'. */
#define _EVENT_HAVE_STRUCT_SOCKADDR_STORAGE 1
/* Define to 1 if you have the <sys/devpoll.h> header file. */
/* #undef _EVENT_HAVE_SYS_DEVPOLL_H */
/* Define to 1 if you have the <sys/epoll.h> header file. */
/* #undef _EVENT_HAVE_SYS_EPOLL_H */
/* Define to 1 if you have the <sys/eventfd.h> header file. */
/* #undef _EVENT_HAVE_SYS_EVENTFD_H */
/* Define to 1 if you have the <sys/event.h> header file. */
/* #undef _EVENT_HAVE_SYS_EVENT_H */
/* Define to 1 if you have the <sys/ioctl.h> header file. */
/* #undef _EVENT_HAVE_SYS_IOCTL_H */
/* Define to 1 if you have the <sys/mman.h> header file. */
/* #undef _EVENT_HAVE_SYS_MMAN_H */
/* Define to 1 if you have the <sys/param.h> header file. */
/* #define _EVENT_HAVE_SYS_PARAM_H 1 */
/* Define to 1 if you have the <sys/queue.h> header file. */
/* #undef _EVENT_HAVE_SYS_QUEUE_H */
/* Define to 1 if you have the <sys/select.h> header file. */
/* #undef _EVENT_HAVE_SYS_SELECT_H */
/* Define to 1 if you have the <sys/sendfile.h> header file. */
/* #undef _EVENT_HAVE_SYS_SENDFILE_H */
/* Define to 1 if you have the <sys/socket.h> header file. */
/* #undef _EVENT_HAVE_SYS_SOCKET_H */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define _EVENT_HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/time.h> header file. */
/* #define _EVENT_HAVE_SYS_TIME_H 1 */
/* Define to 1 if you have the <sys/types.h> header file. */
#define _EVENT_HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <sys/uio.h> header file. */
/* #undef _EVENT_HAVE_SYS_UIO_H */
/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
/* #undef _EVENT_HAVE_TAILQFOREACH */
/* Define if timeradd is defined in <sys/time.h> */
/* #undef _EVENT_HAVE_TIMERADD */
/* Define if timerclear is defined in <sys/time.h> */
#define _EVENT_HAVE_TIMERCLEAR 1
/* Define if timercmp is defined in <sys/time.h> */
#define _EVENT_HAVE_TIMERCMP 1
/* Define if timerisset is defined in <sys/time.h> */
#define _EVENT_HAVE_TIMERISSET 1
/* Define to 1 if the system has the type `uint16_t'. */
/* #define _EVENT_HAVE_UINT16_T 1 */
/* Define to 1 if the system has the type `uint32_t'. */
/* #define _EVENT_HAVE_UINT32_T 1 */
/* Define to 1 if the system has the type `uint64_t'. */
/* #define _EVENT_HAVE_UINT64_T 1 */
/* Define to 1 if the system has the type `uint8_t'. */
/* #define _EVENT_HAVE_UINT8_T 1 */
/* Define to 1 if you have the <unistd.h> header file. */
/* #define _EVENT_HAVE_UNISTD_H 1 */
/* Define to 1 if you have the `vasprintf' function. */
/* #undef _EVENT_HAVE_VASPRINTF */
/* Define if kqueue works correctly with pipes */
/* #undef _EVENT_HAVE_WORKING_KQUEUE */
/* Numeric representation of the version */
#define _EVENT_NUMERIC_VERSION 0x02001600
/* Name of package */
#define _EVENT_PACKAGE "libevent"
/* Define to the address where bug reports for this package should be sent. */
#define _EVENT_PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define _EVENT_PACKAGE_NAME ""
/* Define to the full name and version of this package. */
#define _EVENT_PACKAGE_STRING ""
/* Define to the one symbol short name of this package. */
#define _EVENT_PACKAGE_TARNAME ""
/* Define to the version of this package. */
#define _EVENT_PACKAGE_VERSION ""
/* Define to necessary symbol if this constant uses a non-standard name on
your system. */
/* #undef _EVENT_PTHREAD_CREATE_JOINABLE */
/* The size of a `int', as computed by sizeof. */
#define _EVENT_SIZEOF_INT 4
/* The size of a `long', as computed by sizeof. */
#define _EVENT_SIZEOF_LONG 4
/* The size of a `long long', as computed by sizeof. */
#define _EVENT_SIZEOF_LONG_LONG 8
/* The size of a `short', as computed by sizeof. */
#define _EVENT_SIZEOF_SHORT 2
/* The size of `size_t', as computed by sizeof. */
#ifdef _WIN64
#define _EVENT_SIZEOF_SIZE_T 8
#else
#define _EVENT_SIZEOF_SIZE_T 4
#endif
/* The size of `void *', as computed by sizeof. */
#ifdef _WIN64
#define _EVENT_SIZEOF_VOID_P 8
#else
#define _EVENT_SIZEOF_VOID_P 4
#endif
/* Define to 1 if you have the ANSI C header files. */
#define _EVENT_STDC_HEADERS 1
/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
#define _EVENT_TIME_WITH_SYS_TIME 1
/* Version number of package */
#define _EVENT_VERSION "2.0.22-stable"
/* Define to appropriate substitue if compiler doesnt have __func__ */
#define _EVENT___func__ __FUNCTION__
/* Define to empty if `const' does not conform to ANSI C. */
/* #undef _EVENT_const */
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef _EVENT___cplusplus
#define _EVENT_inline __inline
#endif
/* Define to `int' if <sys/types.h> does not define. */
/* #undef _EVENT_pid_t */
/* Define to `unsigned' if <sys/types.h> does not define. */
/* #undef _EVENT_size_t */
/* Define to unsigned int if you dont have it */
#define _EVENT_socklen_t unsigned int
/* Define to `int' if <sys/types.h> does not define. */
#define _EVENT_ssize_t SSIZE_T
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,561 +0,0 @@
/* Portable arc4random.c based on arc4random.c from OpenBSD.
* Portable version by Chris Davis, adapted for Libevent by Nick Mathewson
* Copyright (c) 2010 Chris Davis, Niels Provos, and Nick Mathewson
* Copyright (c) 2010-2012 Niels Provos and Nick Mathewson
*
* Note that in Libevent, this file isn't compiled directly. Instead,
* it's included from evutil_rand.c
*/
/*
* Copyright (c) 1996, David Mazieres <dm@uun.org>
* Copyright (c) 2008, Damien Miller <djm@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Arc4 random number generator for OpenBSD.
*
* This code is derived from section 17.1 of Applied Cryptography,
* second edition, which describes a stream cipher allegedly
* compatible with RSA Labs "RC4" cipher (the actual description of
* which is a trade secret). The same algorithm is used as a stream
* cipher called "arcfour" in Tatu Ylonen's ssh package.
*
* Here the stream cipher has been modified always to include the time
* when initializing the state. That makes it impossible to
* regenerate the same random sequence twice, so this can't be used
* for encryption, but will generate good random numbers.
*
* RC4 is a registered trademark of RSA Laboratories.
*/
#ifndef ARC4RANDOM_EXPORT
#define ARC4RANDOM_EXPORT
#endif
#ifndef ARC4RANDOM_UINT32
#define ARC4RANDOM_UINT32 uint32_t
#endif
#ifndef ARC4RANDOM_NO_INCLUDES
#ifdef WIN32
#include <wincrypt.h>
#include <process.h>
#else
#include <fcntl.h>
#include <unistd.h>
#include <sys/param.h>
#include <sys/time.h>
#ifdef _EVENT_HAVE_SYS_SYSCTL_H
#include <sys/sysctl.h>
#endif
#endif
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#endif
/* Add platform entropy 32 bytes (256 bits) at a time. */
#define ADD_ENTROPY 32
/* Re-seed from the platform RNG after generating this many bytes. */
#define BYTES_BEFORE_RESEED 1600000
struct arc4_stream {
unsigned char i;
unsigned char j;
unsigned char s[256];
};
#ifdef WIN32
#define getpid _getpid
#define pid_t int
#endif
static int rs_initialized;
static struct arc4_stream rs;
static pid_t arc4_stir_pid;
static int arc4_count;
static int arc4_seeded_ok;
static inline unsigned char arc4_getbyte(void);
static inline void
arc4_init(void)
{
int n;
for (n = 0; n < 256; n++)
rs.s[n] = n;
rs.i = 0;
rs.j = 0;
}
static inline void
arc4_addrandom(const unsigned char *dat, int datlen)
{
int n;
unsigned char si;
rs.i--;
for (n = 0; n < 256; n++) {
rs.i = (rs.i + 1);
si = rs.s[rs.i];
rs.j = (rs.j + si + dat[n % datlen]);
rs.s[rs.i] = rs.s[rs.j];
rs.s[rs.j] = si;
}
rs.j = rs.i;
}
#ifndef WIN32
static ssize_t
read_all(int fd, unsigned char *buf, size_t count)
{
size_t numread = 0;
ssize_t result;
while (numread < count) {
result = read(fd, buf+numread, count-numread);
if (result<0)
return -1;
else if (result == 0)
break;
numread += result;
}
return (ssize_t)numread;
}
#endif
#ifdef WIN32
#define TRY_SEED_WIN32
static int
arc4_seed_win32(void)
{
/* This is adapted from Tor's crypto_seed_rng() */
static int provider_set = 0;
static HCRYPTPROV provider;
unsigned char buf[ADD_ENTROPY];
if (!provider_set) {
if (!CryptAcquireContext(&provider, NULL, NULL, PROV_RSA_FULL,
CRYPT_VERIFYCONTEXT)) {
if (GetLastError() != (DWORD)NTE_BAD_KEYSET)
return -1;
}
provider_set = 1;
}
if (!CryptGenRandom(provider, sizeof(buf), buf))
return -1;
arc4_addrandom(buf, sizeof(buf));
evutil_memclear_(buf, sizeof(buf));
arc4_seeded_ok = 1;
return 0;
}
#endif
#if defined(_EVENT_HAVE_SYS_SYSCTL_H) && defined(_EVENT_HAVE_SYSCTL)
#if _EVENT_HAVE_DECL_CTL_KERN && _EVENT_HAVE_DECL_KERN_RANDOM && _EVENT_HAVE_DECL_RANDOM_UUID
#define TRY_SEED_SYSCTL_LINUX
static int
arc4_seed_sysctl_linux(void)
{
/* Based on code by William Ahern, this function tries to use the
* RANDOM_UUID sysctl to get entropy from the kernel. This can work
* even if /dev/urandom is inaccessible for some reason (e.g., we're
* running in a chroot). */
int mib[] = { CTL_KERN, KERN_RANDOM, RANDOM_UUID };
unsigned char buf[ADD_ENTROPY];
size_t len, n;
unsigned i;
int any_set;
memset(buf, 0, sizeof(buf));
for (len = 0; len < sizeof(buf); len += n) {
n = sizeof(buf) - len;
if (0 != sysctl(mib, 3, &buf[len], &n, NULL, 0))
return -1;
}
/* make sure that the buffer actually got set. */
for (i=0,any_set=0; i<sizeof(buf); ++i) {
any_set |= buf[i];
}
if (!any_set)
return -1;
arc4_addrandom(buf, sizeof(buf));
evutil_memclear_(buf, sizeof(buf));
arc4_seeded_ok = 1;
return 0;
}
#endif
#if _EVENT_HAVE_DECL_CTL_KERN && _EVENT_HAVE_DECL_KERN_ARND
#define TRY_SEED_SYSCTL_BSD
static int
arc4_seed_sysctl_bsd(void)
{
/* Based on code from William Ahern and from OpenBSD, this function
* tries to use the KERN_ARND syscall to get entropy from the kernel.
* This can work even if /dev/urandom is inaccessible for some reason
* (e.g., we're running in a chroot). */
int mib[] = { CTL_KERN, KERN_ARND };
unsigned char buf[ADD_ENTROPY];
size_t len, n;
int i, any_set;
memset(buf, 0, sizeof(buf));
len = sizeof(buf);
if (sysctl(mib, 2, buf, &len, NULL, 0) == -1) {
for (len = 0; len < sizeof(buf); len += sizeof(unsigned)) {
n = sizeof(unsigned);
if (n + len > sizeof(buf))
n = len - sizeof(buf);
if (sysctl(mib, 2, &buf[len], &n, NULL, 0) == -1)
return -1;
}
}
/* make sure that the buffer actually got set. */
for (i=any_set=0; i<sizeof(buf); ++i) {
any_set |= buf[i];
}
if (!any_set)
return -1;
arc4_addrandom(buf, sizeof(buf));
evutil_memclear_(buf, sizeof(buf));
arc4_seeded_ok = 1;
return 0;
}
#endif
#endif /* defined(_EVENT_HAVE_SYS_SYSCTL_H) */
#ifdef __linux__
#define TRY_SEED_PROC_SYS_KERNEL_RANDOM_UUID
static int
arc4_seed_proc_sys_kernel_random_uuid(void)
{
/* Occasionally, somebody will make /proc/sys accessible in a chroot,
* but not /dev/urandom. Let's try /proc/sys/kernel/random/uuid.
* Its format is stupid, so we need to decode it from hex.
*/
int fd;
char buf[128];
unsigned char entropy[64];
int bytes, n, i, nybbles;
for (bytes = 0; bytes<ADD_ENTROPY; ) {
fd = evutil_open_closeonexec("/proc/sys/kernel/random/uuid", O_RDONLY, 0);
if (fd < 0)
return -1;
n = read(fd, buf, sizeof(buf));
close(fd);
if (n<=0)
return -1;
memset(entropy, 0, sizeof(entropy));
for (i=nybbles=0; i<n; ++i) {
if (EVUTIL_ISXDIGIT(buf[i])) {
int nyb = evutil_hex_char_to_int(buf[i]);
if (nybbles & 1) {
entropy[nybbles/2] |= nyb;
} else {
entropy[nybbles/2] |= nyb<<4;
}
++nybbles;
}
}
if (nybbles < 2)
return -1;
arc4_addrandom(entropy, nybbles/2);
bytes += nybbles/2;
}
evutil_memclear_(entropy, sizeof(entropy));
evutil_memclear_(buf, sizeof(buf));
arc4_seeded_ok = 1;
return 0;
}
#endif
#ifndef WIN32
#define TRY_SEED_URANDOM
static char *arc4random_urandom_filename = NULL;
static int arc4_seed_urandom_helper_(const char *fname)
{
unsigned char buf[ADD_ENTROPY];
int fd;
size_t n;
fd = evutil_open_closeonexec(fname, O_RDONLY, 0);
if (fd<0)
return -1;
n = read_all(fd, buf, sizeof(buf));
close(fd);
if (n != sizeof(buf))
return -1;
arc4_addrandom(buf, sizeof(buf));
evutil_memclear_(buf, sizeof(buf));
arc4_seeded_ok = 1;
return 0;
}
static int
arc4_seed_urandom(void)
{
/* This is adapted from Tor's crypto_seed_rng() */
static const char *filenames[] = {
"/dev/srandom", "/dev/urandom", "/dev/random", NULL
};
int i;
if (arc4random_urandom_filename)
return arc4_seed_urandom_helper_(arc4random_urandom_filename);
for (i = 0; filenames[i]; ++i) {
if (arc4_seed_urandom_helper_(filenames[i]) == 0) {
return 0;
}
}
return -1;
}
#endif
static int
arc4_seed(void)
{
int ok = 0;
/* We try every method that might work, and don't give up even if one
* does seem to work. There's no real harm in over-seeding, and if
* one of these sources turns out to be broken, that would be bad. */
#ifdef TRY_SEED_WIN32
if (0 == arc4_seed_win32())
ok = 1;
#endif
#ifdef TRY_SEED_URANDOM
if (0 == arc4_seed_urandom())
ok = 1;
#endif
#ifdef TRY_SEED_PROC_SYS_KERNEL_RANDOM_UUID
if (arc4random_urandom_filename == NULL &&
0 == arc4_seed_proc_sys_kernel_random_uuid())
ok = 1;
#endif
#ifdef TRY_SEED_SYSCTL_LINUX
/* Apparently Linux is deprecating sysctl, and spewing warning
* messages when you try to use it. */
if (!ok && 0 == arc4_seed_sysctl_linux())
ok = 1;
#endif
#ifdef TRY_SEED_SYSCTL_BSD
if (0 == arc4_seed_sysctl_bsd())
ok = 1;
#endif
return ok ? 0 : -1;
}
static int
arc4_stir(void)
{
int i;
if (!rs_initialized) {
arc4_init();
rs_initialized = 1;
}
arc4_seed();
if (!arc4_seeded_ok)
return -1;
/*
* Discard early keystream, as per recommendations in
* "Weaknesses in the Key Scheduling Algorithm of RC4" by
* Scott Fluhrer, Itsik Mantin, and Adi Shamir.
* http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
*
* Ilya Mironov's "(Not So) Random Shuffles of RC4" suggests that
* we drop at least 2*256 bytes, with 12*256 as a conservative
* value.
*
* RFC4345 says to drop 6*256.
*
* At least some versions of this code drop 4*256, in a mistaken
* belief that "words" in the Fluhrer/Mantin/Shamir paper refers
* to processor words.
*
* We add another sect to the cargo cult, and choose 12*256.
*/
for (i = 0; i < 12*256; i++)
(void)arc4_getbyte();
arc4_count = BYTES_BEFORE_RESEED;
return 0;
}
static void
arc4_stir_if_needed(void)
{
pid_t pid = getpid();
if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != pid)
{
arc4_stir_pid = pid;
arc4_stir();
}
}
static inline unsigned char
arc4_getbyte(void)
{
unsigned char si, sj;
rs.i = (rs.i + 1);
si = rs.s[rs.i];
rs.j = (rs.j + si);
sj = rs.s[rs.j];
rs.s[rs.i] = sj;
rs.s[rs.j] = si;
return (rs.s[(si + sj) & 0xff]);
}
static inline unsigned int
arc4_getword(void)
{
unsigned int val;
val = arc4_getbyte() << 24;
val |= arc4_getbyte() << 16;
val |= arc4_getbyte() << 8;
val |= arc4_getbyte();
return val;
}
#ifndef ARC4RANDOM_NOSTIR
ARC4RANDOM_EXPORT int
arc4random_stir(void)
{
int val;
_ARC4_LOCK();
val = arc4_stir();
_ARC4_UNLOCK();
return val;
}
#endif
/**** OMPI CHANGE ****/
/* We don't use arc4random_addrandom anywhere,
* and some OS's don't like it - so just don't
* build the darn thing */
#if 0
#ifndef ARC4RANDOM_NOADDRANDOM
ARC4RANDOM_EXPORT void
arc4random_addrandom(const unsigned char *dat, int datlen)
{
int j;
_ARC4_LOCK();
if (!rs_initialized)
arc4_stir();
for (j = 0; j < datlen; j += 256) {
/* arc4_addrandom() ignores all but the first 256 bytes of
* its input. We want to make sure to look at ALL the
* data in 'dat', just in case the user is doing something
* crazy like passing us all the files in /var/log. */
arc4_addrandom(dat + j, datlen - j);
}
_ARC4_UNLOCK();
}
#endif
#endif
#ifndef ARC4RANDOM_NORANDOM
ARC4RANDOM_EXPORT ARC4RANDOM_UINT32
arc4random(void)
{
ARC4RANDOM_UINT32 val;
_ARC4_LOCK();
arc4_count -= 4;
arc4_stir_if_needed();
val = arc4_getword();
_ARC4_UNLOCK();
return val;
}
#endif
ARC4RANDOM_EXPORT void
arc4random_buf(void *_buf, size_t n)
{
unsigned char *buf = _buf;
_ARC4_LOCK();
arc4_stir_if_needed();
while (n--) {
if (--arc4_count <= 0)
arc4_stir();
buf[n] = arc4_getbyte();
}
_ARC4_UNLOCK();
}
#ifndef ARC4RANDOM_NOUNIFORM
/*
* Calculate a uniformly distributed random number less than upper_bound
* avoiding "modulo bias".
*
* Uniformity is achieved by generating new random numbers until the one
* returned is outside the range [0, 2**32 % upper_bound). This
* guarantees the selected random number will be inside
* [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
* after reduction modulo upper_bound.
*/
ARC4RANDOM_EXPORT unsigned int
arc4random_uniform(unsigned int upper_bound)
{
ARC4RANDOM_UINT32 r, min;
if (upper_bound < 2)
return 0;
#if (UINT_MAX > 0xffffffffUL)
min = 0x100000000UL % upper_bound;
#else
/* Calculate (2**32 % upper_bound) avoiding 64-bit math */
if (upper_bound > 0x80000000)
min = 1 + ~upper_bound; /* 2**32 - upper_bound */
else {
/* (2**32 - (x * 2)) % x == 2**32 % x when x <= 2**31 */
min = ((0xffffffff - (upper_bound * 2)) + 1) % upper_bound;
}
#endif
/*
* This could theoretically loop forever but each retry has
* p > 0.5 (worst case, usually far better) of selecting a
* number inside the range we need, so it should rarely need
* to re-roll.
*/
for (;;) {
r = arc4random();
if (r >= min)
break;
}
return r % upper_bound;
}
#endif

Просмотреть файл

@ -1,15 +0,0 @@
#!/bin/sh
if [ -x "`which autoreconf 2>/dev/null`" ] ; then
exec autoreconf -ivf
fi
LIBTOOLIZE=libtoolize
SYSNAME=`uname`
if [ "x$SYSNAME" = "xDarwin" ] ; then
LIBTOOLIZE=glibtoolize
fi
aclocal -I m4 && \
autoheader && \
$LIBTOOLIZE && \
autoconf && \
automake --add-missing --force-missing --copy

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,325 +0,0 @@
/*
* Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
@file buffer_iocp.c
This module implements overlapped read and write functions for evbuffer
objects on Windows.
*/
#include "event2/buffer.h"
#include "event2/buffer_compat.h"
#include "event2/util.h"
#include "event2/thread.h"
#include "event2/event-config.h"
#include "util-internal.h"
#include "evthread-internal.h"
#include "evbuffer-internal.h"
#include "iocp-internal.h"
#include "mm-internal.h"
#include <winsock2.h>
#include <windows.h>
#include <stdio.h>
#define MAX_WSABUFS 16
/** An evbuffer that can handle overlapped IO. */
struct evbuffer_overlapped {
struct evbuffer buffer;
/** The socket that we're doing overlapped IO on. */
evutil_socket_t fd;
/** pending I/O type */
unsigned read_in_progress : 1;
unsigned write_in_progress : 1;
/** The first pinned chain in the buffer. */
struct evbuffer_chain *first_pinned;
/** How many chains are pinned; how many of the fields in buffers
* are we using. */
int n_buffers;
WSABUF buffers[MAX_WSABUFS];
};
/** Given an evbuffer, return the correponding evbuffer structure, or NULL if
* the evbuffer isn't overlapped. */
static inline struct evbuffer_overlapped *
upcast_evbuffer(struct evbuffer *buf)
{
if (!buf || !buf->is_overlapped)
return NULL;
return EVUTIL_UPCAST(buf, struct evbuffer_overlapped, buffer);
}
/** Unpin all the chains noted as pinned in 'eo'. */
static void
pin_release(struct evbuffer_overlapped *eo, unsigned flag)
{
int i;
struct evbuffer_chain *next, *chain = eo->first_pinned;
for (i = 0; i < eo->n_buffers; ++i) {
EVUTIL_ASSERT(chain);
next = chain->next;
_evbuffer_chain_unpin(chain, flag);
chain = next;
}
}
void
evbuffer_commit_read(struct evbuffer *evbuf, ev_ssize_t nBytes)
{
struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf);
struct evbuffer_chain **chainp;
size_t remaining, len;
unsigned i;
EVBUFFER_LOCK(evbuf);
EVUTIL_ASSERT(buf->read_in_progress && !buf->write_in_progress);
EVUTIL_ASSERT(nBytes >= 0); /* XXXX Can this be false? */
evbuffer_unfreeze(evbuf, 0);
chainp = evbuf->last_with_datap;
if (!((*chainp)->flags & EVBUFFER_MEM_PINNED_R))
chainp = &(*chainp)->next;
remaining = nBytes;
for (i = 0; remaining > 0 && i < (unsigned)buf->n_buffers; ++i) {
EVUTIL_ASSERT(*chainp);
len = buf->buffers[i].len;
if (remaining < len)
len = remaining;
(*chainp)->off += len;
evbuf->last_with_datap = chainp;
remaining -= len;
chainp = &(*chainp)->next;
}
pin_release(buf, EVBUFFER_MEM_PINNED_R);
buf->read_in_progress = 0;
evbuf->total_len += nBytes;
evbuf->n_add_for_cb += nBytes;
evbuffer_invoke_callbacks(evbuf);
_evbuffer_decref_and_unlock(evbuf);
}
void
evbuffer_commit_write(struct evbuffer *evbuf, ev_ssize_t nBytes)
{
struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf);
EVBUFFER_LOCK(evbuf);
EVUTIL_ASSERT(buf->write_in_progress && !buf->read_in_progress);
evbuffer_unfreeze(evbuf, 1);
evbuffer_drain(evbuf, nBytes);
pin_release(buf,EVBUFFER_MEM_PINNED_W);
buf->write_in_progress = 0;
_evbuffer_decref_and_unlock(evbuf);
}
struct evbuffer *
evbuffer_overlapped_new(evutil_socket_t fd)
{
struct evbuffer_overlapped *evo;
evo = mm_calloc(1, sizeof(struct evbuffer_overlapped));
if (!evo)
return NULL;
TAILQ_INIT(&evo->buffer.callbacks);
evo->buffer.refcnt = 1;
evo->buffer.last_with_datap = &evo->buffer.first;
evo->buffer.is_overlapped = 1;
evo->fd = fd;
return &evo->buffer;
}
int
evbuffer_launch_write(struct evbuffer *buf, ev_ssize_t at_most,
struct event_overlapped *ol)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
int r = -1;
int i;
struct evbuffer_chain *chain;
DWORD bytesSent;
if (!buf) {
/* No buffer, or it isn't overlapped */
return -1;
}
EVBUFFER_LOCK(buf);
EVUTIL_ASSERT(!buf_o->read_in_progress);
if (buf->freeze_start || buf_o->write_in_progress)
goto done;
if (!buf->total_len) {
/* Nothing to write */
r = 0;
goto done;
} else if (at_most < 0 || (size_t)at_most > buf->total_len) {
at_most = buf->total_len;
}
evbuffer_freeze(buf, 1);
buf_o->first_pinned = NULL;
buf_o->n_buffers = 0;
memset(buf_o->buffers, 0, sizeof(buf_o->buffers));
chain = buf_o->first_pinned = buf->first;
for (i=0; i < MAX_WSABUFS && chain; ++i, chain=chain->next) {
WSABUF *b = &buf_o->buffers[i];
b->buf = (char*)( chain->buffer + chain->misalign );
_evbuffer_chain_pin(chain, EVBUFFER_MEM_PINNED_W);
if ((size_t)at_most > chain->off) {
/* XXXX Cast is safe for now, since win32 has no
mmaped chains. But later, we need to have this
add more WSAbufs if chain->off is greater than
ULONG_MAX */
b->len = (unsigned long)chain->off;
at_most -= chain->off;
} else {
b->len = (unsigned long)at_most;
++i;
break;
}
}
buf_o->n_buffers = i;
_evbuffer_incref(buf);
if (WSASend(buf_o->fd, buf_o->buffers, i, &bytesSent, 0,
&ol->overlapped, NULL)) {
int error = WSAGetLastError();
if (error != WSA_IO_PENDING) {
/* An actual error. */
pin_release(buf_o, EVBUFFER_MEM_PINNED_W);
evbuffer_unfreeze(buf, 1);
evbuffer_free(buf); /* decref */
goto done;
}
}
buf_o->write_in_progress = 1;
r = 0;
done:
EVBUFFER_UNLOCK(buf);
return r;
}
int
evbuffer_launch_read(struct evbuffer *buf, size_t at_most,
struct event_overlapped *ol)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
int r = -1, i;
int nvecs;
int npin=0;
struct evbuffer_chain *chain=NULL, **chainp;
DWORD bytesRead;
DWORD flags = 0;
struct evbuffer_iovec vecs[MAX_WSABUFS];
if (!buf_o)
return -1;
EVBUFFER_LOCK(buf);
EVUTIL_ASSERT(!buf_o->write_in_progress);
if (buf->freeze_end || buf_o->read_in_progress)
goto done;
buf_o->first_pinned = NULL;
buf_o->n_buffers = 0;
memset(buf_o->buffers, 0, sizeof(buf_o->buffers));
if (_evbuffer_expand_fast(buf, at_most, MAX_WSABUFS) == -1)
goto done;
evbuffer_freeze(buf, 0);
nvecs = _evbuffer_read_setup_vecs(buf, at_most,
vecs, MAX_WSABUFS, &chainp, 1);
for (i=0;i<nvecs;++i) {
WSABUF_FROM_EVBUFFER_IOV(
&buf_o->buffers[i],
&vecs[i]);
}
buf_o->n_buffers = nvecs;
buf_o->first_pinned = chain = *chainp;
npin=0;
for ( ; chain; chain = chain->next) {
_evbuffer_chain_pin(chain, EVBUFFER_MEM_PINNED_R);
++npin;
}
EVUTIL_ASSERT(npin == nvecs);
_evbuffer_incref(buf);
if (WSARecv(buf_o->fd, buf_o->buffers, nvecs, &bytesRead, &flags,
&ol->overlapped, NULL)) {
int error = WSAGetLastError();
if (error != WSA_IO_PENDING) {
/* An actual error. */
pin_release(buf_o, EVBUFFER_MEM_PINNED_R);
evbuffer_unfreeze(buf, 0);
evbuffer_free(buf); /* decref */
goto done;
}
}
buf_o->read_in_progress = 1;
r = 0;
done:
EVBUFFER_UNLOCK(buf);
return r;
}
evutil_socket_t
_evbuffer_overlapped_get_fd(struct evbuffer *buf)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
return buf_o ? buf_o->fd : -1;
}
void
_evbuffer_overlapped_set_fd(struct evbuffer *buf, evutil_socket_t fd)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
EVBUFFER_LOCK(buf);
/* XXX is this right?, should it cancel current I/O operations? */
if (buf_o)
buf_o->fd = fd;
EVBUFFER_UNLOCK(buf);
}

Просмотреть файл

@ -1,410 +0,0 @@
/*
* Copyright (c) 2008-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _BUFFEREVENT_INTERNAL_H_
#define _BUFFEREVENT_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "event2/event-config.h"
#include "event2/util.h"
#include "defer-internal.h"
#include "evthread-internal.h"
#include "event2/thread.h"
#include "ratelim-internal.h"
#include "event2/bufferevent_struct.h"
/* These flags are reasons that we might be declining to actually enable
reading or writing on a bufferevent.
*/
/* On a all bufferevents, for reading: used when we have read up to the
watermark value.
On a filtering bufferevent, for writing: used when the underlying
bufferevent's write buffer has been filled up to its watermark
value.
*/
#define BEV_SUSPEND_WM 0x01
/* On a base bufferevent: when we have emptied a bandwidth buckets */
#define BEV_SUSPEND_BW 0x02
/* On a base bufferevent: when we have emptied the group's bandwidth bucket. */
#define BEV_SUSPEND_BW_GROUP 0x04
/* On a socket bufferevent: can't do any operations while we're waiting for
* name lookup to finish. */
#define BEV_SUSPEND_LOOKUP 0x08
/* On a base bufferevent, for reading: used when a filter has choked this
* (underlying) bufferevent because it has stopped reading from it. */
#define BEV_SUSPEND_FILT_READ 0x10
typedef ev_uint16_t bufferevent_suspend_flags;
struct bufferevent_rate_limit_group {
/** List of all members in the group */
TAILQ_HEAD(rlim_group_member_list, bufferevent_private) members;
/** Current limits for the group. */
struct ev_token_bucket rate_limit;
struct ev_token_bucket_cfg rate_limit_cfg;
/** True iff we don't want to read from any member of the group.until
* the token bucket refills. */
unsigned read_suspended : 1;
/** True iff we don't want to write from any member of the group.until
* the token bucket refills. */
unsigned write_suspended : 1;
/** True iff we were unable to suspend one of the bufferevents in the
* group for reading the last time we tried, and we should try
* again. */
unsigned pending_unsuspend_read : 1;
/** True iff we were unable to suspend one of the bufferevents in the
* group for writing the last time we tried, and we should try
* again. */
unsigned pending_unsuspend_write : 1;
/*@{*/
/** Total number of bytes read or written in this group since last
* reset. */
ev_uint64_t total_read;
ev_uint64_t total_written;
/*@}*/
/** The number of bufferevents in the group. */
int n_members;
/** The smallest number of bytes that any member of the group should
* be limited to read or write at a time. */
ev_ssize_t min_share;
ev_ssize_t configured_min_share;
/** Timeout event that goes off once a tick, when the bucket is ready
* to refill. */
struct event master_refill_event;
/** Lock to protect the members of this group. This lock should nest
* within every bufferevent lock: if you are holding this lock, do
* not assume you can lock another bufferevent. */
void *lock;
};
/** Fields for rate-limiting a single bufferevent. */
struct bufferevent_rate_limit {
/* Linked-list elements for storing this bufferevent_private in a
* group.
*
* Note that this field is supposed to be protected by the group
* lock */
TAILQ_ENTRY(bufferevent_private) next_in_group;
/** The rate-limiting group for this bufferevent, or NULL if it is
* only rate-limited on its own. */
struct bufferevent_rate_limit_group *group;
/* This bufferevent's current limits. */
struct ev_token_bucket limit;
/* Pointer to the rate-limit configuration for this bufferevent.
* Can be shared. XXX reference-count this? */
struct ev_token_bucket_cfg *cfg;
/* Timeout event used when one this bufferevent's buckets are
* empty. */
struct event refill_bucket_event;
};
/** Parts of the bufferevent structure that are shared among all bufferevent
* types, but not exposed in bufferevent_struct.h. */
struct bufferevent_private {
/** The underlying bufferevent structure. */
struct bufferevent bev;
/** Evbuffer callback to enforce watermarks on input. */
struct evbuffer_cb_entry *read_watermarks_cb;
/** If set, we should free the lock when we free the bufferevent. */
unsigned own_lock : 1;
/** Flag: set if we have deferred callbacks and a read callback is
* pending. */
unsigned readcb_pending : 1;
/** Flag: set if we have deferred callbacks and a write callback is
* pending. */
unsigned writecb_pending : 1;
/** Flag: set if we are currently busy connecting. */
unsigned connecting : 1;
/** Flag: set if a connect failed prematurely; this is a hack for
* getting around the bufferevent abstraction. */
unsigned connection_refused : 1;
/** Set to the events pending if we have deferred callbacks and
* an events callback is pending. */
short eventcb_pending;
/** If set, read is suspended until one or more conditions are over.
* The actual value here is a bitfield of those conditions; see the
* BEV_SUSPEND_* flags above. */
bufferevent_suspend_flags read_suspended;
/** If set, writing is suspended until one or more conditions are over.
* The actual value here is a bitfield of those conditions; see the
* BEV_SUSPEND_* flags above. */
bufferevent_suspend_flags write_suspended;
/** Set to the current socket errno if we have deferred callbacks and
* an events callback is pending. */
int errno_pending;
/** The DNS error code for bufferevent_socket_connect_hostname */
int dns_error;
/** Used to implement deferred callbacks */
struct deferred_cb deferred;
/** The options this bufferevent was constructed with */
enum bufferevent_options options;
/** Current reference count for this bufferevent. */
int refcnt;
/** Lock for this bufferevent. Shared by the inbuf and the outbuf.
* If NULL, locking is disabled. */
void *lock;
/** Rate-limiting information for this bufferevent */
struct bufferevent_rate_limit *rate_limiting;
};
/** Possible operations for a control callback. */
enum bufferevent_ctrl_op {
BEV_CTRL_SET_FD,
BEV_CTRL_GET_FD,
BEV_CTRL_GET_UNDERLYING,
BEV_CTRL_CANCEL_ALL
};
/** Possible data types for a control callback */
union bufferevent_ctrl_data {
void *ptr;
evutil_socket_t fd;
};
/**
Implementation table for a bufferevent: holds function pointers and other
information to make the various bufferevent types work.
*/
struct bufferevent_ops {
/** The name of the bufferevent's type. */
const char *type;
/** At what offset into the implementation type will we find a
bufferevent structure?
Example: if the type is implemented as
struct bufferevent_x {
int extra_data;
struct bufferevent bev;
}
then mem_offset should be offsetof(struct bufferevent_x, bev)
*/
off_t mem_offset;
/** Enables one or more of EV_READ|EV_WRITE on a bufferevent. Does
not need to adjust the 'enabled' field. Returns 0 on success, -1
on failure.
*/
int (*enable)(struct bufferevent *, short);
/** Disables one or more of EV_READ|EV_WRITE on a bufferevent. Does
not need to adjust the 'enabled' field. Returns 0 on success, -1
on failure.
*/
int (*disable)(struct bufferevent *, short);
/** Free any storage and deallocate any extra data or structures used
in this implementation.
*/
void (*destruct)(struct bufferevent *);
/** Called when the timeouts on the bufferevent have changed.*/
int (*adj_timeouts)(struct bufferevent *);
/** Called to flush data. */
int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode);
/** Called to access miscellaneous fields. */
int (*ctrl)(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
};
extern const struct bufferevent_ops bufferevent_ops_socket;
extern const struct bufferevent_ops bufferevent_ops_filter;
extern const struct bufferevent_ops bufferevent_ops_pair;
#define BEV_IS_SOCKET(bevp) ((bevp)->be_ops == &bufferevent_ops_socket)
#define BEV_IS_FILTER(bevp) ((bevp)->be_ops == &bufferevent_ops_filter)
#define BEV_IS_PAIR(bevp) ((bevp)->be_ops == &bufferevent_ops_pair)
#ifdef WIN32
extern const struct bufferevent_ops bufferevent_ops_async;
#define BEV_IS_ASYNC(bevp) ((bevp)->be_ops == &bufferevent_ops_async)
#else
#define BEV_IS_ASYNC(bevp) 0
#endif
/** Initialize the shared parts of a bufferevent. */
int bufferevent_init_common(struct bufferevent_private *, struct event_base *, const struct bufferevent_ops *, enum bufferevent_options options);
/** For internal use: temporarily stop all reads on bufev, until the conditions
* in 'what' are over. */
void bufferevent_suspend_read(struct bufferevent *bufev, bufferevent_suspend_flags what);
/** For internal use: clear the conditions 'what' on bufev, and re-enable
* reading if there are no conditions left. */
void bufferevent_unsuspend_read(struct bufferevent *bufev, bufferevent_suspend_flags what);
/** For internal use: temporarily stop all writes on bufev, until the conditions
* in 'what' are over. */
void bufferevent_suspend_write(struct bufferevent *bufev, bufferevent_suspend_flags what);
/** For internal use: clear the conditions 'what' on bufev, and re-enable
* writing if there are no conditions left. */
void bufferevent_unsuspend_write(struct bufferevent *bufev, bufferevent_suspend_flags what);
#define bufferevent_wm_suspend_read(b) \
bufferevent_suspend_read((b), BEV_SUSPEND_WM)
#define bufferevent_wm_unsuspend_read(b) \
bufferevent_unsuspend_read((b), BEV_SUSPEND_WM)
/*
Disable a bufferevent. Equivalent to bufferevent_disable(), but
first resets 'connecting' flag to force EV_WRITE down for sure.
XXXX this method will go away in the future; try not to add new users.
See comment in evhttp_connection_reset() for discussion.
@param bufev the bufferevent to be disabled
@param event any combination of EV_READ | EV_WRITE.
@return 0 if successful, or -1 if an error occurred
@see bufferevent_disable()
*/
int bufferevent_disable_hard(struct bufferevent *bufev, short event);
/** Internal: Set up locking on a bufferevent. If lock is set, use it.
* Otherwise, use a new lock. */
int bufferevent_enable_locking(struct bufferevent *bufev, void *lock);
/** Internal: Increment the reference count on bufev. */
void bufferevent_incref(struct bufferevent *bufev);
/** Internal: Lock bufev and increase its reference count.
* unlocking it otherwise. */
void _bufferevent_incref_and_lock(struct bufferevent *bufev);
/** Internal: Decrement the reference count on bufev. Returns 1 if it freed
* the bufferevent.*/
int bufferevent_decref(struct bufferevent *bufev);
/** Internal: Drop the reference count on bufev, freeing as necessary, and
* unlocking it otherwise. Returns 1 if it freed the bufferevent. */
int _bufferevent_decref_and_unlock(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have a read callback, schedule
* a readcb. Otherwise just run the readcb. */
void _bufferevent_run_readcb(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have a write callback, schedule
* a writecb. Otherwise just run the writecb. */
void _bufferevent_run_writecb(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have an eventcb, schedule
* it to run with events "what". Otherwise just run the eventcb. */
void _bufferevent_run_eventcb(struct bufferevent *bufev, short what);
/** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in
* which case add ev with no timeout. */
int _bufferevent_add_event(struct event *ev, const struct timeval *tv);
/* =========
* These next functions implement timeouts for bufferevents that aren't doing
* anything else with ev_read and ev_write, to handle timeouts.
* ========= */
/** Internal use: Set up the ev_read and ev_write callbacks so that
* the other "generic_timeout" functions will work on it. Call this from
* the constructor function. */
void _bufferevent_init_generic_timeout_cbs(struct bufferevent *bev);
/** Internal use: Delete the ev_read and ev_write callbacks if they're pending.
* Call this from the destructor function. */
int _bufferevent_del_generic_timeout_cbs(struct bufferevent *bev);
/** Internal use: Add or delete the generic timeout events as appropriate.
* (If an event is enabled and a timeout is set, we add the event. Otherwise
* we delete it.) Call this from anything that changes the timeout values,
* that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */
int _bufferevent_generic_adj_timeouts(struct bufferevent *bev);
/** Internal use: We have just successfully read data into an inbuf, so
* reset the read timeout (if any). */
#define BEV_RESET_GENERIC_READ_TIMEOUT(bev) \
do { \
if (evutil_timerisset(&(bev)->timeout_read)) \
event_add(&(bev)->ev_read, &(bev)->timeout_read); \
} while (0)
/** Internal use: We have just successfully written data from an inbuf, so
* reset the read timeout (if any). */
#define BEV_RESET_GENERIC_WRITE_TIMEOUT(bev) \
do { \
if (evutil_timerisset(&(bev)->timeout_write)) \
event_add(&(bev)->ev_write, &(bev)->timeout_write); \
} while (0)
#define BEV_DEL_GENERIC_READ_TIMEOUT(bev) \
event_del(&(bev)->ev_read)
#define BEV_DEL_GENERIC_WRITE_TIMEOUT(bev) \
event_del(&(bev)->ev_write)
/** Internal: Given a bufferevent, return its corresponding
* bufferevent_private. */
#define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev)
#ifdef _EVENT_DISABLE_THREAD_SUPPORT
#define BEV_LOCK(b) _EVUTIL_NIL_STMT
#define BEV_UNLOCK(b) _EVUTIL_NIL_STMT
#else
/** Internal: Grab the lock (if any) on a bufferevent */
#define BEV_LOCK(b) do { \
struct bufferevent_private *locking = BEV_UPCAST(b); \
EVLOCK_LOCK(locking->lock, 0); \
} while (0)
/** Internal: Release the lock (if any) on a bufferevent */
#define BEV_UNLOCK(b) do { \
struct bufferevent_private *locking = BEV_UPCAST(b); \
EVLOCK_UNLOCK(locking->lock, 0); \
} while (0)
#endif
/* ==== For rate-limiting. */
int _bufferevent_decrement_write_buckets(struct bufferevent_private *bev,
ev_ssize_t bytes);
int _bufferevent_decrement_read_buckets(struct bufferevent_private *bev,
ev_ssize_t bytes);
ev_ssize_t _bufferevent_get_read_max(struct bufferevent_private *bev);
ev_ssize_t _bufferevent_get_write_max(struct bufferevent_private *bev);
#ifdef __cplusplus
}
#endif
#endif /* _BUFFEREVENT_INTERNAL_H_ */

Просмотреть файл

@ -1,875 +0,0 @@
/*
* Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2012 Niels Provos, Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include "event2/event-config.h"
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _EVENT_HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef WIN32
#include <winsock2.h>
#endif
#include <errno.h>
#include "event2/util.h"
#include "event2/buffer.h"
#include "event2/buffer_compat.h"
#include "event2/bufferevent.h"
#include "event2/bufferevent_struct.h"
#include "event2/bufferevent_compat.h"
#include "event2/event.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "bufferevent-internal.h"
#include "evbuffer-internal.h"
#include "util-internal.h"
static void _bufferevent_cancel_all(struct bufferevent *bev);
void
bufferevent_suspend_read(struct bufferevent *bufev, bufferevent_suspend_flags what)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
if (!bufev_private->read_suspended)
bufev->be_ops->disable(bufev, EV_READ);
bufev_private->read_suspended |= what;
BEV_UNLOCK(bufev);
}
void
bufferevent_unsuspend_read(struct bufferevent *bufev, bufferevent_suspend_flags what)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
bufev_private->read_suspended &= ~what;
if (!bufev_private->read_suspended && (bufev->enabled & EV_READ))
bufev->be_ops->enable(bufev, EV_READ);
BEV_UNLOCK(bufev);
}
void
bufferevent_suspend_write(struct bufferevent *bufev, bufferevent_suspend_flags what)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
if (!bufev_private->write_suspended)
bufev->be_ops->disable(bufev, EV_WRITE);
bufev_private->write_suspended |= what;
BEV_UNLOCK(bufev);
}
void
bufferevent_unsuspend_write(struct bufferevent *bufev, bufferevent_suspend_flags what)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
bufev_private->write_suspended &= ~what;
if (!bufev_private->write_suspended && (bufev->enabled & EV_WRITE))
bufev->be_ops->enable(bufev, EV_WRITE);
BEV_UNLOCK(bufev);
}
/* Callback to implement watermarks on the input buffer. Only enabled
* if the watermark is set. */
static void
bufferevent_inbuf_wm_cb(struct evbuffer *buf,
const struct evbuffer_cb_info *cbinfo,
void *arg)
{
struct bufferevent *bufev = arg;
size_t size;
size = evbuffer_get_length(buf);
if (size >= bufev->wm_read.high)
bufferevent_wm_suspend_read(bufev);
else
bufferevent_wm_unsuspend_read(bufev);
}
static void
bufferevent_run_deferred_callbacks_locked(struct deferred_cb *_, void *arg)
{
struct bufferevent_private *bufev_private = arg;
struct bufferevent *bufev = &bufev_private->bev;
BEV_LOCK(bufev);
if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
bufev->errorcb) {
/* The "connected" happened before any reads or writes, so
send it first. */
bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
bufev->errorcb(bufev, BEV_EVENT_CONNECTED, bufev->cbarg);
}
if (bufev_private->readcb_pending && bufev->readcb) {
bufev_private->readcb_pending = 0;
bufev->readcb(bufev, bufev->cbarg);
}
if (bufev_private->writecb_pending && bufev->writecb) {
bufev_private->writecb_pending = 0;
bufev->writecb(bufev, bufev->cbarg);
}
if (bufev_private->eventcb_pending && bufev->errorcb) {
short what = bufev_private->eventcb_pending;
int err = bufev_private->errno_pending;
bufev_private->eventcb_pending = 0;
bufev_private->errno_pending = 0;
EVUTIL_SET_SOCKET_ERROR(err);
bufev->errorcb(bufev, what, bufev->cbarg);
}
_bufferevent_decref_and_unlock(bufev);
}
static void
bufferevent_run_deferred_callbacks_unlocked(struct deferred_cb *_, void *arg)
{
struct bufferevent_private *bufev_private = arg;
struct bufferevent *bufev = &bufev_private->bev;
BEV_LOCK(bufev);
#define UNLOCKED(stmt) \
do { BEV_UNLOCK(bufev); stmt; BEV_LOCK(bufev); } while(0)
if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
bufev->errorcb) {
/* The "connected" happened before any reads or writes, so
send it first. */
bufferevent_event_cb errorcb = bufev->errorcb;
void *cbarg = bufev->cbarg;
bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
UNLOCKED(errorcb(bufev, BEV_EVENT_CONNECTED, cbarg));
}
if (bufev_private->readcb_pending && bufev->readcb) {
bufferevent_data_cb readcb = bufev->readcb;
void *cbarg = bufev->cbarg;
bufev_private->readcb_pending = 0;
UNLOCKED(readcb(bufev, cbarg));
}
if (bufev_private->writecb_pending && bufev->writecb) {
bufferevent_data_cb writecb = bufev->writecb;
void *cbarg = bufev->cbarg;
bufev_private->writecb_pending = 0;
UNLOCKED(writecb(bufev, cbarg));
}
if (bufev_private->eventcb_pending && bufev->errorcb) {
bufferevent_event_cb errorcb = bufev->errorcb;
void *cbarg = bufev->cbarg;
short what = bufev_private->eventcb_pending;
int err = bufev_private->errno_pending;
bufev_private->eventcb_pending = 0;
bufev_private->errno_pending = 0;
EVUTIL_SET_SOCKET_ERROR(err);
UNLOCKED(errorcb(bufev,what,cbarg));
}
_bufferevent_decref_and_unlock(bufev);
#undef UNLOCKED
}
#define SCHEDULE_DEFERRED(bevp) \
do { \
bufferevent_incref(&(bevp)->bev); \
event_deferred_cb_schedule( \
event_base_get_deferred_cb_queue((bevp)->bev.ev_base), \
&(bevp)->deferred); \
} while (0)
void
_bufferevent_run_readcb(struct bufferevent *bufev)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
if (bufev->readcb == NULL)
return;
if (p->options & BEV_OPT_DEFER_CALLBACKS) {
p->readcb_pending = 1;
if (!p->deferred.queued)
SCHEDULE_DEFERRED(p);
} else {
bufev->readcb(bufev, bufev->cbarg);
}
}
void
_bufferevent_run_writecb(struct bufferevent *bufev)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
if (bufev->writecb == NULL)
return;
if (p->options & BEV_OPT_DEFER_CALLBACKS) {
p->writecb_pending = 1;
if (!p->deferred.queued)
SCHEDULE_DEFERRED(p);
} else {
bufev->writecb(bufev, bufev->cbarg);
}
}
void
_bufferevent_run_eventcb(struct bufferevent *bufev, short what)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
if (bufev->errorcb == NULL)
return;
if (p->options & BEV_OPT_DEFER_CALLBACKS) {
p->eventcb_pending |= what;
p->errno_pending = EVUTIL_SOCKET_ERROR();
if (!p->deferred.queued)
SCHEDULE_DEFERRED(p);
} else {
bufev->errorcb(bufev, what, bufev->cbarg);
}
}
int
bufferevent_init_common(struct bufferevent_private *bufev_private,
struct event_base *base,
const struct bufferevent_ops *ops,
enum bufferevent_options options)
{
struct bufferevent *bufev = &bufev_private->bev;
if (!bufev->input) {
if ((bufev->input = evbuffer_new()) == NULL)
return -1;
}
if (!bufev->output) {
if ((bufev->output = evbuffer_new()) == NULL) {
evbuffer_free(bufev->input);
return -1;
}
}
bufev_private->refcnt = 1;
bufev->ev_base = base;
/* Disable timeouts. */
evutil_timerclear(&bufev->timeout_read);
evutil_timerclear(&bufev->timeout_write);
bufev->be_ops = ops;
/*
* Set to EV_WRITE so that using bufferevent_write is going to
* trigger a callback. Reading needs to be explicitly enabled
* because otherwise no data will be available.
*/
bufev->enabled = EV_WRITE;
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
if (options & BEV_OPT_THREADSAFE) {
if (bufferevent_enable_locking(bufev, NULL) < 0) {
/* cleanup */
evbuffer_free(bufev->input);
evbuffer_free(bufev->output);
bufev->input = NULL;
bufev->output = NULL;
return -1;
}
}
#endif
if ((options & (BEV_OPT_DEFER_CALLBACKS|BEV_OPT_UNLOCK_CALLBACKS))
== BEV_OPT_UNLOCK_CALLBACKS) {
event_warnx("UNLOCK_CALLBACKS requires DEFER_CALLBACKS");
return -1;
}
if (options & BEV_OPT_DEFER_CALLBACKS) {
if (options & BEV_OPT_UNLOCK_CALLBACKS)
event_deferred_cb_init(&bufev_private->deferred,
bufferevent_run_deferred_callbacks_unlocked,
bufev_private);
else
event_deferred_cb_init(&bufev_private->deferred,
bufferevent_run_deferred_callbacks_locked,
bufev_private);
}
bufev_private->options = options;
evbuffer_set_parent(bufev->input, bufev);
evbuffer_set_parent(bufev->output, bufev);
return 0;
}
void
bufferevent_setcb(struct bufferevent *bufev,
bufferevent_data_cb readcb, bufferevent_data_cb writecb,
bufferevent_event_cb eventcb, void *cbarg)
{
BEV_LOCK(bufev);
bufev->readcb = readcb;
bufev->writecb = writecb;
bufev->errorcb = eventcb;
bufev->cbarg = cbarg;
BEV_UNLOCK(bufev);
}
struct evbuffer *
bufferevent_get_input(struct bufferevent *bufev)
{
return bufev->input;
}
struct evbuffer *
bufferevent_get_output(struct bufferevent *bufev)
{
return bufev->output;
}
struct event_base *
bufferevent_get_base(struct bufferevent *bufev)
{
return bufev->ev_base;
}
int
bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)
{
if (evbuffer_add(bufev->output, data, size) == -1)
return (-1);
return 0;
}
int
bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf)
{
if (evbuffer_add_buffer(bufev->output, buf) == -1)
return (-1);
return 0;
}
size_t
bufferevent_read(struct bufferevent *bufev, void *data, size_t size)
{
return (evbuffer_remove(bufev->input, data, size));
}
int
bufferevent_read_buffer(struct bufferevent *bufev, struct evbuffer *buf)
{
return (evbuffer_add_buffer(buf, bufev->input));
}
int
bufferevent_enable(struct bufferevent *bufev, short event)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
short impl_events = event;
int r = 0;
_bufferevent_incref_and_lock(bufev);
if (bufev_private->read_suspended)
impl_events &= ~EV_READ;
if (bufev_private->write_suspended)
impl_events &= ~EV_WRITE;
bufev->enabled |= event;
if (impl_events && bufev->be_ops->enable(bufev, impl_events) < 0)
r = -1;
_bufferevent_decref_and_unlock(bufev);
return r;
}
int
bufferevent_set_timeouts(struct bufferevent *bufev,
const struct timeval *tv_read,
const struct timeval *tv_write)
{
int r = 0;
BEV_LOCK(bufev);
if (tv_read) {
bufev->timeout_read = *tv_read;
} else {
evutil_timerclear(&bufev->timeout_read);
}
if (tv_write) {
bufev->timeout_write = *tv_write;
} else {
evutil_timerclear(&bufev->timeout_write);
}
if (bufev->be_ops->adj_timeouts)
r = bufev->be_ops->adj_timeouts(bufev);
BEV_UNLOCK(bufev);
return r;
}
/* Obsolete; use bufferevent_set_timeouts */
void
bufferevent_settimeout(struct bufferevent *bufev,
int timeout_read, int timeout_write)
{
struct timeval tv_read, tv_write;
struct timeval *ptv_read = NULL, *ptv_write = NULL;
memset(&tv_read, 0, sizeof(tv_read));
memset(&tv_write, 0, sizeof(tv_write));
if (timeout_read) {
tv_read.tv_sec = timeout_read;
ptv_read = &tv_read;
}
if (timeout_write) {
tv_write.tv_sec = timeout_write;
ptv_write = &tv_write;
}
bufferevent_set_timeouts(bufev, ptv_read, ptv_write);
}
int
bufferevent_disable_hard(struct bufferevent *bufev, short event)
{
int r = 0;
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
bufev->enabled &= ~event;
bufev_private->connecting = 0;
if (bufev->be_ops->disable(bufev, event) < 0)
r = -1;
BEV_UNLOCK(bufev);
return r;
}
int
bufferevent_disable(struct bufferevent *bufev, short event)
{
int r = 0;
BEV_LOCK(bufev);
bufev->enabled &= ~event;
if (bufev->be_ops->disable(bufev, event) < 0)
r = -1;
BEV_UNLOCK(bufev);
return r;
}
/*
* Sets the water marks
*/
void
bufferevent_setwatermark(struct bufferevent *bufev, short events,
size_t lowmark, size_t highmark)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
if (events & EV_WRITE) {
bufev->wm_write.low = lowmark;
bufev->wm_write.high = highmark;
}
if (events & EV_READ) {
bufev->wm_read.low = lowmark;
bufev->wm_read.high = highmark;
if (highmark) {
/* There is now a new high-water mark for read.
enable the callback if needed, and see if we should
suspend/bufferevent_wm_unsuspend. */
if (bufev_private->read_watermarks_cb == NULL) {
bufev_private->read_watermarks_cb =
evbuffer_add_cb(bufev->input,
bufferevent_inbuf_wm_cb,
bufev);
}
evbuffer_cb_set_flags(bufev->input,
bufev_private->read_watermarks_cb,
EVBUFFER_CB_ENABLED|EVBUFFER_CB_NODEFER);
if (evbuffer_get_length(bufev->input) >= highmark)
bufferevent_wm_suspend_read(bufev);
else if (evbuffer_get_length(bufev->input) < highmark)
bufferevent_wm_unsuspend_read(bufev);
} else {
/* There is now no high-water mark for read. */
if (bufev_private->read_watermarks_cb)
evbuffer_cb_clear_flags(bufev->input,
bufev_private->read_watermarks_cb,
EVBUFFER_CB_ENABLED);
bufferevent_wm_unsuspend_read(bufev);
}
}
BEV_UNLOCK(bufev);
}
int
bufferevent_flush(struct bufferevent *bufev,
short iotype,
enum bufferevent_flush_mode mode)
{
int r = -1;
BEV_LOCK(bufev);
if (bufev->be_ops->flush)
r = bufev->be_ops->flush(bufev, iotype, mode);
BEV_UNLOCK(bufev);
return r;
}
void
_bufferevent_incref_and_lock(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
BEV_UPCAST(bufev);
BEV_LOCK(bufev);
++bufev_private->refcnt;
}
#if 0
static void
_bufferevent_transfer_lock_ownership(struct bufferevent *donor,
struct bufferevent *recipient)
{
struct bufferevent_private *d = BEV_UPCAST(donor);
struct bufferevent_private *r = BEV_UPCAST(recipient);
if (d->lock != r->lock)
return;
if (r->own_lock)
return;
if (d->own_lock) {
d->own_lock = 0;
r->own_lock = 1;
}
}
#endif
int
_bufferevent_decref_and_unlock(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
struct bufferevent *underlying;
EVUTIL_ASSERT(bufev_private->refcnt > 0);
if (--bufev_private->refcnt) {
BEV_UNLOCK(bufev);
return 0;
}
underlying = bufferevent_get_underlying(bufev);
/* Clean up the shared info */
if (bufev->be_ops->destruct)
bufev->be_ops->destruct(bufev);
/* XXX what happens if refcnt for these buffers is > 1?
* The buffers can share a lock with this bufferevent object,
* but the lock might be destroyed below. */
/* evbuffer will free the callbacks */
evbuffer_free(bufev->input);
evbuffer_free(bufev->output);
if (bufev_private->rate_limiting) {
if (bufev_private->rate_limiting->group)
bufferevent_remove_from_rate_limit_group_internal(bufev,0);
if (event_initialized(&bufev_private->rate_limiting->refill_bucket_event))
event_del(&bufev_private->rate_limiting->refill_bucket_event);
event_debug_unassign(&bufev_private->rate_limiting->refill_bucket_event);
mm_free(bufev_private->rate_limiting);
bufev_private->rate_limiting = NULL;
}
event_debug_unassign(&bufev->ev_read);
event_debug_unassign(&bufev->ev_write);
BEV_UNLOCK(bufev);
if (bufev_private->own_lock)
EVTHREAD_FREE_LOCK(bufev_private->lock,
EVTHREAD_LOCKTYPE_RECURSIVE);
/* Free the actual allocated memory. */
mm_free(((char*)bufev) - bufev->be_ops->mem_offset);
/* Release the reference to underlying now that we no longer need the
* reference to it. We wait this long mainly in case our lock is
* shared with underlying.
*
* The 'destruct' function will also drop a reference to underlying
* if BEV_OPT_CLOSE_ON_FREE is set.
*
* XXX Should we/can we just refcount evbuffer/bufferevent locks?
* It would probably save us some headaches.
*/
if (underlying)
bufferevent_decref(underlying);
return 1;
}
int
bufferevent_decref(struct bufferevent *bufev)
{
BEV_LOCK(bufev);
return _bufferevent_decref_and_unlock(bufev);
}
void
bufferevent_free(struct bufferevent *bufev)
{
BEV_LOCK(bufev);
bufferevent_setcb(bufev, NULL, NULL, NULL, NULL);
_bufferevent_cancel_all(bufev);
_bufferevent_decref_and_unlock(bufev);
}
void
bufferevent_incref(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
++bufev_private->refcnt;
BEV_UNLOCK(bufev);
}
int
bufferevent_enable_locking(struct bufferevent *bufev, void *lock)
{
#ifdef _EVENT_DISABLE_THREAD_SUPPORT
return -1;
#else
struct bufferevent *underlying;
if (BEV_UPCAST(bufev)->lock)
return -1;
underlying = bufferevent_get_underlying(bufev);
if (!lock && underlying && BEV_UPCAST(underlying)->lock) {
lock = BEV_UPCAST(underlying)->lock;
BEV_UPCAST(bufev)->lock = lock;
BEV_UPCAST(bufev)->own_lock = 0;
} else if (!lock) {
EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
if (!lock)
return -1;
BEV_UPCAST(bufev)->lock = lock;
BEV_UPCAST(bufev)->own_lock = 1;
} else {
BEV_UPCAST(bufev)->lock = lock;
BEV_UPCAST(bufev)->own_lock = 0;
}
evbuffer_enable_locking(bufev->input, lock);
evbuffer_enable_locking(bufev->output, lock);
if (underlying && !BEV_UPCAST(underlying)->lock)
bufferevent_enable_locking(underlying, lock);
return 0;
#endif
}
int
bufferevent_setfd(struct bufferevent *bev, evutil_socket_t fd)
{
union bufferevent_ctrl_data d;
int res = -1;
d.fd = fd;
BEV_LOCK(bev);
if (bev->be_ops->ctrl)
res = bev->be_ops->ctrl(bev, BEV_CTRL_SET_FD, &d);
BEV_UNLOCK(bev);
return res;
}
evutil_socket_t
bufferevent_getfd(struct bufferevent *bev)
{
union bufferevent_ctrl_data d;
int res = -1;
d.fd = -1;
BEV_LOCK(bev);
if (bev->be_ops->ctrl)
res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_FD, &d);
BEV_UNLOCK(bev);
return (res<0) ? -1 : d.fd;
}
static void
_bufferevent_cancel_all(struct bufferevent *bev)
{
union bufferevent_ctrl_data d;
memset(&d, 0, sizeof(d));
BEV_LOCK(bev);
if (bev->be_ops->ctrl)
bev->be_ops->ctrl(bev, BEV_CTRL_CANCEL_ALL, &d);
BEV_UNLOCK(bev);
}
short
bufferevent_get_enabled(struct bufferevent *bufev)
{
short r;
BEV_LOCK(bufev);
r = bufev->enabled;
BEV_UNLOCK(bufev);
return r;
}
struct bufferevent *
bufferevent_get_underlying(struct bufferevent *bev)
{
union bufferevent_ctrl_data d;
int res = -1;
d.ptr = NULL;
BEV_LOCK(bev);
if (bev->be_ops->ctrl)
res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_UNDERLYING, &d);
BEV_UNLOCK(bev);
return (res<0) ? NULL : d.ptr;
}
static void
bufferevent_generic_read_timeout_cb(evutil_socket_t fd, short event, void *ctx)
{
struct bufferevent *bev = ctx;
_bufferevent_incref_and_lock(bev);
bufferevent_disable(bev, EV_READ);
_bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
_bufferevent_decref_and_unlock(bev);
}
static void
bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx)
{
struct bufferevent *bev = ctx;
_bufferevent_incref_and_lock(bev);
bufferevent_disable(bev, EV_WRITE);
_bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
_bufferevent_decref_and_unlock(bev);
}
void
_bufferevent_init_generic_timeout_cbs(struct bufferevent *bev)
{
evtimer_assign(&bev->ev_read, bev->ev_base,
bufferevent_generic_read_timeout_cb, bev);
evtimer_assign(&bev->ev_write, bev->ev_base,
bufferevent_generic_write_timeout_cb, bev);
}
int
_bufferevent_del_generic_timeout_cbs(struct bufferevent *bev)
{
int r1,r2;
r1 = event_del(&bev->ev_read);
r2 = event_del(&bev->ev_write);
if (r1<0 || r2<0)
return -1;
return 0;
}
int
_bufferevent_generic_adj_timeouts(struct bufferevent *bev)
{
const short enabled = bev->enabled;
struct bufferevent_private *bev_p =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
int r1=0, r2=0;
if ((enabled & EV_READ) && !bev_p->read_suspended &&
evutil_timerisset(&bev->timeout_read))
r1 = event_add(&bev->ev_read, &bev->timeout_read);
else
r1 = event_del(&bev->ev_read);
if ((enabled & EV_WRITE) && !bev_p->write_suspended &&
evutil_timerisset(&bev->timeout_write) &&
evbuffer_get_length(bev->output))
r2 = event_add(&bev->ev_write, &bev->timeout_write);
else
r2 = event_del(&bev->ev_write);
if (r1 < 0 || r2 < 0)
return -1;
return 0;
}
int
_bufferevent_add_event(struct event *ev, const struct timeval *tv)
{
if (tv->tv_sec == 0 && tv->tv_usec == 0)
return event_add(ev, NULL);
else
return event_add(ev, tv);
}
/* For use by user programs only; internally, we should be calling
either _bufferevent_incref_and_lock(), or BEV_LOCK. */
void
bufferevent_lock(struct bufferevent *bev)
{
_bufferevent_incref_and_lock(bev);
}
void
bufferevent_unlock(struct bufferevent *bev)
{
_bufferevent_decref_and_unlock(bev);
}

Просмотреть файл

@ -1,690 +0,0 @@
/*
* Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _EVENT_HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef _EVENT_HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef WIN32
#include <winsock2.h>
#include <ws2tcpip.h>
#endif
#include <sys/queue.h>
#include "event2/util.h"
#include "event2/bufferevent.h"
#include "event2/buffer.h"
#include "event2/bufferevent_struct.h"
#include "event2/event.h"
#include "event2/util.h"
#include "event-internal.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "bufferevent-internal.h"
#include "util-internal.h"
#include "iocp-internal.h"
#ifndef SO_UPDATE_CONNECT_CONTEXT
/* Mingw is sometimes missing this */
#define SO_UPDATE_CONNECT_CONTEXT 0x7010
#endif
/* prototypes */
static int be_async_enable(struct bufferevent *, short);
static int be_async_disable(struct bufferevent *, short);
static void be_async_destruct(struct bufferevent *);
static int be_async_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
static int be_async_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
struct bufferevent_async {
struct bufferevent_private bev;
struct event_overlapped connect_overlapped;
struct event_overlapped read_overlapped;
struct event_overlapped write_overlapped;
size_t read_in_progress;
size_t write_in_progress;
unsigned ok : 1;
unsigned read_added : 1;
unsigned write_added : 1;
};
const struct bufferevent_ops bufferevent_ops_async = {
"socket_async",
evutil_offsetof(struct bufferevent_async, bev.bev),
be_async_enable,
be_async_disable,
be_async_destruct,
_bufferevent_generic_adj_timeouts,
be_async_flush,
be_async_ctrl,
};
static inline struct bufferevent_async *
upcast(struct bufferevent *bev)
{
struct bufferevent_async *bev_a;
if (bev->be_ops != &bufferevent_ops_async)
return NULL;
bev_a = EVUTIL_UPCAST(bev, struct bufferevent_async, bev.bev);
return bev_a;
}
static inline struct bufferevent_async *
upcast_connect(struct event_overlapped *eo)
{
struct bufferevent_async *bev_a;
bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, connect_overlapped);
EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
return bev_a;
}
static inline struct bufferevent_async *
upcast_read(struct event_overlapped *eo)
{
struct bufferevent_async *bev_a;
bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, read_overlapped);
EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
return bev_a;
}
static inline struct bufferevent_async *
upcast_write(struct event_overlapped *eo)
{
struct bufferevent_async *bev_a;
bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, write_overlapped);
EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
return bev_a;
}
static void
bev_async_del_write(struct bufferevent_async *beva)
{
struct bufferevent *bev = &beva->bev.bev;
if (beva->write_added) {
beva->write_added = 0;
event_base_del_virtual(bev->ev_base);
}
}
static void
bev_async_del_read(struct bufferevent_async *beva)
{
struct bufferevent *bev = &beva->bev.bev;
if (beva->read_added) {
beva->read_added = 0;
event_base_del_virtual(bev->ev_base);
}
}
static void
bev_async_add_write(struct bufferevent_async *beva)
{
struct bufferevent *bev = &beva->bev.bev;
if (!beva->write_added) {
beva->write_added = 1;
event_base_add_virtual(bev->ev_base);
}
}
static void
bev_async_add_read(struct bufferevent_async *beva)
{
struct bufferevent *bev = &beva->bev.bev;
if (!beva->read_added) {
beva->read_added = 1;
event_base_add_virtual(bev->ev_base);
}
}
static void
bev_async_consider_writing(struct bufferevent_async *beva)
{
size_t at_most;
int limit;
struct bufferevent *bev = &beva->bev.bev;
/* Don't write if there's a write in progress, or we do not
* want to write, or when there's nothing left to write. */
if (beva->write_in_progress || beva->bev.connecting)
return;
if (!beva->ok || !(bev->enabled&EV_WRITE) ||
!evbuffer_get_length(bev->output)) {
bev_async_del_write(beva);
return;
}
at_most = evbuffer_get_length(bev->output);
/* This is safe so long as bufferevent_get_write_max never returns
* more than INT_MAX. That's true for now. XXXX */
limit = (int)_bufferevent_get_write_max(&beva->bev);
if (at_most >= (size_t)limit && limit >= 0)
at_most = limit;
if (beva->bev.write_suspended) {
bev_async_del_write(beva);
return;
}
/* XXXX doesn't respect low-water mark very well. */
bufferevent_incref(bev);
if (evbuffer_launch_write(bev->output, at_most,
&beva->write_overlapped)) {
bufferevent_decref(bev);
beva->ok = 0;
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
} else {
beva->write_in_progress = at_most;
_bufferevent_decrement_write_buckets(&beva->bev, at_most);
bev_async_add_write(beva);
}
}
static void
bev_async_consider_reading(struct bufferevent_async *beva)
{
size_t cur_size;
size_t read_high;
size_t at_most;
int limit;
struct bufferevent *bev = &beva->bev.bev;
/* Don't read if there is a read in progress, or we do not
* want to read. */
if (beva->read_in_progress || beva->bev.connecting)
return;
if (!beva->ok || !(bev->enabled&EV_READ)) {
bev_async_del_read(beva);
return;
}
/* Don't read if we're full */
cur_size = evbuffer_get_length(bev->input);
read_high = bev->wm_read.high;
if (read_high) {
if (cur_size >= read_high) {
bev_async_del_read(beva);
return;
}
at_most = read_high - cur_size;
} else {
at_most = 16384; /* FIXME totally magic. */
}
/* XXXX This over-commits. */
/* XXXX see also not above on cast on _bufferevent_get_write_max() */
limit = (int)_bufferevent_get_read_max(&beva->bev);
if (at_most >= (size_t)limit && limit >= 0)
at_most = limit;
if (beva->bev.read_suspended) {
bev_async_del_read(beva);
return;
}
bufferevent_incref(bev);
if (evbuffer_launch_read(bev->input, at_most, &beva->read_overlapped)) {
beva->ok = 0;
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
bufferevent_decref(bev);
} else {
beva->read_in_progress = at_most;
_bufferevent_decrement_read_buckets(&beva->bev, at_most);
bev_async_add_read(beva);
}
return;
}
static void
be_async_outbuf_callback(struct evbuffer *buf,
const struct evbuffer_cb_info *cbinfo,
void *arg)
{
struct bufferevent *bev = arg;
struct bufferevent_async *bev_async = upcast(bev);
/* If we added data to the outbuf and were not writing before,
* we may want to write now. */
_bufferevent_incref_and_lock(bev);
if (cbinfo->n_added)
bev_async_consider_writing(bev_async);
_bufferevent_decref_and_unlock(bev);
}
static void
be_async_inbuf_callback(struct evbuffer *buf,
const struct evbuffer_cb_info *cbinfo,
void *arg)
{
struct bufferevent *bev = arg;
struct bufferevent_async *bev_async = upcast(bev);
/* If we drained data from the inbuf and were not reading before,
* we may want to read now */
_bufferevent_incref_and_lock(bev);
if (cbinfo->n_deleted)
bev_async_consider_reading(bev_async);
_bufferevent_decref_and_unlock(bev);
}
static int
be_async_enable(struct bufferevent *buf, short what)
{
struct bufferevent_async *bev_async = upcast(buf);
if (!bev_async->ok)
return -1;
if (bev_async->bev.connecting) {
/* Don't launch anything during connection attempts. */
return 0;
}
if (what & EV_READ)
BEV_RESET_GENERIC_READ_TIMEOUT(buf);
if (what & EV_WRITE)
BEV_RESET_GENERIC_WRITE_TIMEOUT(buf);
/* If we newly enable reading or writing, and we aren't reading or
writing already, consider launching a new read or write. */
if (what & EV_READ)
bev_async_consider_reading(bev_async);
if (what & EV_WRITE)
bev_async_consider_writing(bev_async);
return 0;
}
static int
be_async_disable(struct bufferevent *bev, short what)
{
struct bufferevent_async *bev_async = upcast(bev);
/* XXXX If we disable reading or writing, we may want to consider
* canceling any in-progress read or write operation, though it might
* not work. */
if (what & EV_READ) {
BEV_DEL_GENERIC_READ_TIMEOUT(bev);
bev_async_del_read(bev_async);
}
if (what & EV_WRITE) {
BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
bev_async_del_write(bev_async);
}
return 0;
}
static void
be_async_destruct(struct bufferevent *bev)
{
struct bufferevent_async *bev_async = upcast(bev);
struct bufferevent_private *bev_p = BEV_UPCAST(bev);
evutil_socket_t fd;
EVUTIL_ASSERT(!upcast(bev)->write_in_progress &&
!upcast(bev)->read_in_progress);
bev_async_del_read(bev_async);
bev_async_del_write(bev_async);
fd = _evbuffer_overlapped_get_fd(bev->input);
if (bev_p->options & BEV_OPT_CLOSE_ON_FREE) {
/* XXXX possible double-close */
evutil_closesocket(fd);
}
/* delete this in case non-blocking connect was used */
if (event_initialized(&bev->ev_write)) {
event_del(&bev->ev_write);
_bufferevent_del_generic_timeout_cbs(bev);
}
}
/* GetQueuedCompletionStatus doesn't reliably yield WSA error codes, so
* we use WSAGetOverlappedResult to translate. */
static void
bev_async_set_wsa_error(struct bufferevent *bev, struct event_overlapped *eo)
{
DWORD bytes, flags;
evutil_socket_t fd;
fd = _evbuffer_overlapped_get_fd(bev->input);
WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags);
}
static int
be_async_flush(struct bufferevent *bev, short what,
enum bufferevent_flush_mode mode)
{
return 0;
}
static void
connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
ev_ssize_t nbytes, int ok)
{
struct bufferevent_async *bev_a = upcast_connect(eo);
struct bufferevent *bev = &bev_a->bev.bev;
evutil_socket_t sock;
BEV_LOCK(bev);
EVUTIL_ASSERT(bev_a->bev.connecting);
bev_a->bev.connecting = 0;
sock = _evbuffer_overlapped_get_fd(bev_a->bev.bev.input);
/* XXXX Handle error? */
setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0);
if (ok)
bufferevent_async_set_connected(bev);
else
bev_async_set_wsa_error(bev, eo);
_bufferevent_run_eventcb(bev,
ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR);
event_base_del_virtual(bev->ev_base);
_bufferevent_decref_and_unlock(bev);
}
static void
read_complete(struct event_overlapped *eo, ev_uintptr_t key,
ev_ssize_t nbytes, int ok)
{
struct bufferevent_async *bev_a = upcast_read(eo);
struct bufferevent *bev = &bev_a->bev.bev;
short what = BEV_EVENT_READING;
ev_ssize_t amount_unread;
BEV_LOCK(bev);
EVUTIL_ASSERT(bev_a->read_in_progress);
amount_unread = bev_a->read_in_progress - nbytes;
evbuffer_commit_read(bev->input, nbytes);
bev_a->read_in_progress = 0;
if (amount_unread)
_bufferevent_decrement_read_buckets(&bev_a->bev, -amount_unread);
if (!ok)
bev_async_set_wsa_error(bev, eo);
if (bev_a->ok) {
if (ok && nbytes) {
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
if (evbuffer_get_length(bev->input) >= bev->wm_read.low)
_bufferevent_run_readcb(bev);
bev_async_consider_reading(bev_a);
} else if (!ok) {
what |= BEV_EVENT_ERROR;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
} else if (!nbytes) {
what |= BEV_EVENT_EOF;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
}
}
_bufferevent_decref_and_unlock(bev);
}
static void
write_complete(struct event_overlapped *eo, ev_uintptr_t key,
ev_ssize_t nbytes, int ok)
{
struct bufferevent_async *bev_a = upcast_write(eo);
struct bufferevent *bev = &bev_a->bev.bev;
short what = BEV_EVENT_WRITING;
ev_ssize_t amount_unwritten;
BEV_LOCK(bev);
EVUTIL_ASSERT(bev_a->write_in_progress);
amount_unwritten = bev_a->write_in_progress - nbytes;
evbuffer_commit_write(bev->output, nbytes);
bev_a->write_in_progress = 0;
if (amount_unwritten)
_bufferevent_decrement_write_buckets(&bev_a->bev,
-amount_unwritten);
if (!ok)
bev_async_set_wsa_error(bev, eo);
if (bev_a->ok) {
if (ok && nbytes) {
BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
if (evbuffer_get_length(bev->output) <=
bev->wm_write.low)
_bufferevent_run_writecb(bev);
bev_async_consider_writing(bev_a);
} else if (!ok) {
what |= BEV_EVENT_ERROR;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
} else if (!nbytes) {
what |= BEV_EVENT_EOF;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
}
}
_bufferevent_decref_and_unlock(bev);
}
struct bufferevent *
bufferevent_async_new(struct event_base *base,
evutil_socket_t fd, int options)
{
struct bufferevent_async *bev_a;
struct bufferevent *bev;
struct event_iocp_port *iocp;
options |= BEV_OPT_THREADSAFE;
if (!(iocp = event_base_get_iocp(base)))
return NULL;
if (fd >= 0 && event_iocp_port_associate(iocp, fd, 1)<0) {
int err = GetLastError();
/* We may have alrady associated this fd with a port.
* Let's hope it's this port, and that the error code
* for doing this neer changes. */
if (err != ERROR_INVALID_PARAMETER)
return NULL;
}
if (!(bev_a = mm_calloc(1, sizeof(struct bufferevent_async))))
return NULL;
bev = &bev_a->bev.bev;
if (!(bev->input = evbuffer_overlapped_new(fd))) {
mm_free(bev_a);
return NULL;
}
if (!(bev->output = evbuffer_overlapped_new(fd))) {
evbuffer_free(bev->input);
mm_free(bev_a);
return NULL;
}
if (bufferevent_init_common(&bev_a->bev, base, &bufferevent_ops_async,
options)<0)
goto err;
evbuffer_add_cb(bev->input, be_async_inbuf_callback, bev);
evbuffer_add_cb(bev->output, be_async_outbuf_callback, bev);
event_overlapped_init(&bev_a->connect_overlapped, connect_complete);
event_overlapped_init(&bev_a->read_overlapped, read_complete);
event_overlapped_init(&bev_a->write_overlapped, write_complete);
bev_a->ok = fd >= 0;
if (bev_a->ok)
_bufferevent_init_generic_timeout_cbs(bev);
return bev;
err:
bufferevent_free(&bev_a->bev.bev);
return NULL;
}
void
bufferevent_async_set_connected(struct bufferevent *bev)
{
struct bufferevent_async *bev_async = upcast(bev);
bev_async->ok = 1;
_bufferevent_init_generic_timeout_cbs(bev);
/* Now's a good time to consider reading/writing */
be_async_enable(bev, bev->enabled);
}
int
bufferevent_async_can_connect(struct bufferevent *bev)
{
const struct win32_extension_fns *ext =
event_get_win32_extension_fns();
if (BEV_IS_ASYNC(bev) &&
event_base_get_iocp(bev->ev_base) &&
ext && ext->ConnectEx)
return 1;
return 0;
}
int
bufferevent_async_connect(struct bufferevent *bev, evutil_socket_t fd,
const struct sockaddr *sa, int socklen)
{
BOOL rc;
struct bufferevent_async *bev_async = upcast(bev);
struct sockaddr_storage ss;
const struct win32_extension_fns *ext =
event_get_win32_extension_fns();
EVUTIL_ASSERT(ext && ext->ConnectEx && fd >= 0 && sa != NULL);
/* ConnectEx() requires that the socket be bound to an address
* with bind() before using, otherwise it will fail. We attempt
* to issue a bind() here, taking into account that the error
* code is set to WSAEINVAL when the socket is already bound. */
memset(&ss, 0, sizeof(ss));
if (sa->sa_family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = INADDR_ANY;
} else if (sa->sa_family == AF_INET6) {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
sin6->sin6_family = AF_INET6;
sin6->sin6_addr = in6addr_any;
} else {
/* Well, the user will have to bind() */
return -1;
}
if (bind(fd, (struct sockaddr *)&ss, sizeof(ss)) < 0 &&
WSAGetLastError() != WSAEINVAL)
return -1;
event_base_add_virtual(bev->ev_base);
bufferevent_incref(bev);
rc = ext->ConnectEx(fd, sa, socklen, NULL, 0, NULL,
&bev_async->connect_overlapped.overlapped);
if (rc || WSAGetLastError() == ERROR_IO_PENDING)
return 0;
event_base_del_virtual(bev->ev_base);
bufferevent_decref(bev);
return -1;
}
static int
be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
union bufferevent_ctrl_data *data)
{
switch (op) {
case BEV_CTRL_GET_FD:
data->fd = _evbuffer_overlapped_get_fd(bev->input);
return 0;
case BEV_CTRL_SET_FD: {
struct event_iocp_port *iocp;
if (data->fd == _evbuffer_overlapped_get_fd(bev->input))
return 0;
if (!(iocp = event_base_get_iocp(bev->ev_base)))
return -1;
if (event_iocp_port_associate(iocp, data->fd, 1) < 0)
return -1;
_evbuffer_overlapped_set_fd(bev->input, data->fd);
_evbuffer_overlapped_set_fd(bev->output, data->fd);
return 0;
}
case BEV_CTRL_CANCEL_ALL: {
struct bufferevent_async *bev_a = upcast(bev);
evutil_socket_t fd = _evbuffer_overlapped_get_fd(bev->input);
if (fd != (evutil_socket_t)INVALID_SOCKET &&
(bev_a->bev.options & BEV_OPT_CLOSE_ON_FREE)) {
closesocket(fd);
}
bev_a->ok = 0;
return 0;
}
case BEV_CTRL_GET_UNDERLYING:
default:
return -1;
}
}

Просмотреть файл

@ -1,511 +0,0 @@
/*
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
* Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include "event2/event-config.h"
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _EVENT_HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef WIN32
#include <winsock2.h>
#endif
#include "event2/util.h"
#include "event2/bufferevent.h"
#include "event2/buffer.h"
#include "event2/bufferevent_struct.h"
#include "event2/event.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "bufferevent-internal.h"
#include "util-internal.h"
/* prototypes */
static int be_filter_enable(struct bufferevent *, short);
static int be_filter_disable(struct bufferevent *, short);
static void be_filter_destruct(struct bufferevent *);
static void be_filter_readcb(struct bufferevent *, void *);
static void be_filter_writecb(struct bufferevent *, void *);
static void be_filter_eventcb(struct bufferevent *, short, void *);
static int be_filter_flush(struct bufferevent *bufev,
short iotype, enum bufferevent_flush_mode mode);
static int be_filter_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
static void bufferevent_filtered_outbuf_cb(struct evbuffer *buf,
const struct evbuffer_cb_info *info, void *arg);
struct bufferevent_filtered {
struct bufferevent_private bev;
/** The bufferevent that we read/write filtered data from/to. */
struct bufferevent *underlying;
/** A callback on our outbuf to notice when somebody adds data */
struct evbuffer_cb_entry *outbuf_cb;
/** True iff we have received an EOF callback from the underlying
* bufferevent. */
unsigned got_eof;
/** Function to free context when we're done. */
void (*free_context)(void *);
/** Input filter */
bufferevent_filter_cb process_in;
/** Output filter */
bufferevent_filter_cb process_out;
/** User-supplied argument to the filters. */
void *context;
};
const struct bufferevent_ops bufferevent_ops_filter = {
"filter",
evutil_offsetof(struct bufferevent_filtered, bev.bev),
be_filter_enable,
be_filter_disable,
be_filter_destruct,
_bufferevent_generic_adj_timeouts,
be_filter_flush,
be_filter_ctrl,
};
/* Given a bufferevent that's really the bev filter of a bufferevent_filtered,
* return that bufferevent_filtered. Returns NULL otherwise.*/
static inline struct bufferevent_filtered *
upcast(struct bufferevent *bev)
{
struct bufferevent_filtered *bev_f;
if (bev->be_ops != &bufferevent_ops_filter)
return NULL;
bev_f = (void*)( ((char*)bev) -
evutil_offsetof(struct bufferevent_filtered, bev.bev));
EVUTIL_ASSERT(bev_f->bev.bev.be_ops == &bufferevent_ops_filter);
return bev_f;
}
#define downcast(bev_f) (&(bev_f)->bev.bev)
/** Return 1 iff bevf's underlying bufferevent's output buffer is at or
* over its high watermark such that we should not write to it in a given
* flush mode. */
static int
be_underlying_writebuf_full(struct bufferevent_filtered *bevf,
enum bufferevent_flush_mode state)
{
struct bufferevent *u = bevf->underlying;
return state == BEV_NORMAL &&
u->wm_write.high &&
evbuffer_get_length(u->output) >= u->wm_write.high;
}
/** Return 1 if our input buffer is at or over its high watermark such that we
* should not write to it in a given flush mode. */
static int
be_readbuf_full(struct bufferevent_filtered *bevf,
enum bufferevent_flush_mode state)
{
struct bufferevent *bufev = downcast(bevf);
return state == BEV_NORMAL &&
bufev->wm_read.high &&
evbuffer_get_length(bufev->input) >= bufev->wm_read.high;
}
/* Filter to use when we're created with a NULL filter. */
static enum bufferevent_filter_result
be_null_filter(struct evbuffer *src, struct evbuffer *dst, ev_ssize_t lim,
enum bufferevent_flush_mode state, void *ctx)
{
(void)state;
if (evbuffer_remove_buffer(src, dst, lim) == 0)
return BEV_OK;
else
return BEV_ERROR;
}
struct bufferevent *
bufferevent_filter_new(struct bufferevent *underlying,
bufferevent_filter_cb input_filter,
bufferevent_filter_cb output_filter,
int options,
void (*free_context)(void *),
void *ctx)
{
struct bufferevent_filtered *bufev_f;
int tmp_options = options & ~BEV_OPT_THREADSAFE;
if (!underlying)
return NULL;
if (!input_filter)
input_filter = be_null_filter;
if (!output_filter)
output_filter = be_null_filter;
bufev_f = mm_calloc(1, sizeof(struct bufferevent_filtered));
if (!bufev_f)
return NULL;
if (bufferevent_init_common(&bufev_f->bev, underlying->ev_base,
&bufferevent_ops_filter, tmp_options) < 0) {
mm_free(bufev_f);
return NULL;
}
if (options & BEV_OPT_THREADSAFE) {
bufferevent_enable_locking(downcast(bufev_f), NULL);
}
bufev_f->underlying = underlying;
bufev_f->process_in = input_filter;
bufev_f->process_out = output_filter;
bufev_f->free_context = free_context;
bufev_f->context = ctx;
bufferevent_setcb(bufev_f->underlying,
be_filter_readcb, be_filter_writecb, be_filter_eventcb, bufev_f);
bufev_f->outbuf_cb = evbuffer_add_cb(downcast(bufev_f)->output,
bufferevent_filtered_outbuf_cb, bufev_f);
_bufferevent_init_generic_timeout_cbs(downcast(bufev_f));
bufferevent_incref(underlying);
bufferevent_enable(underlying, EV_READ|EV_WRITE);
bufferevent_suspend_read(underlying, BEV_SUSPEND_FILT_READ);
return downcast(bufev_f);
}
static void
be_filter_destruct(struct bufferevent *bev)
{
struct bufferevent_filtered *bevf = upcast(bev);
EVUTIL_ASSERT(bevf);
if (bevf->free_context)
bevf->free_context(bevf->context);
if (bevf->bev.options & BEV_OPT_CLOSE_ON_FREE) {
/* Yes, there is also a decref in bufferevent_decref.
* That decref corresponds to the incref when we set
* underlying for the first time. This decref is an
* extra one to remove the last reference.
*/
if (BEV_UPCAST(bevf->underlying)->refcnt < 2) {
event_warnx("BEV_OPT_CLOSE_ON_FREE set on an "
"bufferevent with too few references");
} else {
bufferevent_free(bevf->underlying);
}
} else {
if (bevf->underlying) {
if (bevf->underlying->errorcb == be_filter_eventcb)
bufferevent_setcb(bevf->underlying,
NULL, NULL, NULL, NULL);
bufferevent_unsuspend_read(bevf->underlying,
BEV_SUSPEND_FILT_READ);
}
}
_bufferevent_del_generic_timeout_cbs(bev);
}
static int
be_filter_enable(struct bufferevent *bev, short event)
{
struct bufferevent_filtered *bevf = upcast(bev);
if (event & EV_WRITE)
BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
if (event & EV_READ) {
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
bufferevent_unsuspend_read(bevf->underlying,
BEV_SUSPEND_FILT_READ);
}
return 0;
}
static int
be_filter_disable(struct bufferevent *bev, short event)
{
struct bufferevent_filtered *bevf = upcast(bev);
if (event & EV_WRITE)
BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
if (event & EV_READ) {
BEV_DEL_GENERIC_READ_TIMEOUT(bev);
bufferevent_suspend_read(bevf->underlying,
BEV_SUSPEND_FILT_READ);
}
return 0;
}
static enum bufferevent_filter_result
be_filter_process_input(struct bufferevent_filtered *bevf,
enum bufferevent_flush_mode state,
int *processed_out)
{
enum bufferevent_filter_result res;
struct bufferevent *bev = downcast(bevf);
if (state == BEV_NORMAL) {
/* If we're in 'normal' mode, don't urge data on the filter
* unless we're reading data and under our high-water mark.*/
if (!(bev->enabled & EV_READ) ||
be_readbuf_full(bevf, state))
return BEV_OK;
}
do {
ev_ssize_t limit = -1;
if (state == BEV_NORMAL && bev->wm_read.high)
limit = bev->wm_read.high -
evbuffer_get_length(bev->input);
res = bevf->process_in(bevf->underlying->input,
bev->input, limit, state, bevf->context);
if (res == BEV_OK)
*processed_out = 1;
} while (res == BEV_OK &&
(bev->enabled & EV_READ) &&
evbuffer_get_length(bevf->underlying->input) &&
!be_readbuf_full(bevf, state));
if (*processed_out)
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
return res;
}
static enum bufferevent_filter_result
be_filter_process_output(struct bufferevent_filtered *bevf,
enum bufferevent_flush_mode state,
int *processed_out)
{
/* Requires references and lock: might call writecb */
enum bufferevent_filter_result res = BEV_OK;
struct bufferevent *bufev = downcast(bevf);
int again = 0;
if (state == BEV_NORMAL) {
/* If we're in 'normal' mode, don't urge data on the
* filter unless we're writing data, and the underlying
* bufferevent is accepting data, and we have data to
* give the filter. If we're in 'flush' or 'finish',
* call the filter no matter what. */
if (!(bufev->enabled & EV_WRITE) ||
be_underlying_writebuf_full(bevf, state) ||
!evbuffer_get_length(bufev->output))
return BEV_OK;
}
/* disable the callback that calls this function
when the user adds to the output buffer. */
evbuffer_cb_set_flags(bufev->output, bevf->outbuf_cb, 0);
do {
int processed = 0;
again = 0;
do {
ev_ssize_t limit = -1;
if (state == BEV_NORMAL &&
bevf->underlying->wm_write.high)
limit = bevf->underlying->wm_write.high -
evbuffer_get_length(bevf->underlying->output);
res = bevf->process_out(downcast(bevf)->output,
bevf->underlying->output,
limit,
state,
bevf->context);
if (res == BEV_OK)
processed = *processed_out = 1;
} while (/* Stop if the filter wasn't successful...*/
res == BEV_OK &&
/* Or if we aren't writing any more. */
(bufev->enabled & EV_WRITE) &&
/* Of if we have nothing more to write and we are
* not flushing. */
evbuffer_get_length(bufev->output) &&
/* Or if we have filled the underlying output buffer. */
!be_underlying_writebuf_full(bevf,state));
if (processed &&
evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
/* call the write callback.*/
_bufferevent_run_writecb(bufev);
if (res == BEV_OK &&
(bufev->enabled & EV_WRITE) &&
evbuffer_get_length(bufev->output) &&
!be_underlying_writebuf_full(bevf, state)) {
again = 1;
}
}
} while (again);
/* reenable the outbuf_cb */
evbuffer_cb_set_flags(bufev->output,bevf->outbuf_cb,
EVBUFFER_CB_ENABLED);
if (*processed_out)
BEV_RESET_GENERIC_WRITE_TIMEOUT(bufev);
return res;
}
/* Called when the size of our outbuf changes. */
static void
bufferevent_filtered_outbuf_cb(struct evbuffer *buf,
const struct evbuffer_cb_info *cbinfo, void *arg)
{
struct bufferevent_filtered *bevf = arg;
struct bufferevent *bev = downcast(bevf);
if (cbinfo->n_added) {
int processed_any = 0;
/* Somebody added more data to the output buffer. Try to
* process it, if we should. */
_bufferevent_incref_and_lock(bev);
be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
_bufferevent_decref_and_unlock(bev);
}
}
/* Called when the underlying socket has read. */
static void
be_filter_readcb(struct bufferevent *underlying, void *_me)
{
struct bufferevent_filtered *bevf = _me;
enum bufferevent_filter_result res;
enum bufferevent_flush_mode state;
struct bufferevent *bufev = downcast(bevf);
int processed_any = 0;
_bufferevent_incref_and_lock(bufev);
if (bevf->got_eof)
state = BEV_FINISHED;
else
state = BEV_NORMAL;
/* XXXX use return value */
res = be_filter_process_input(bevf, state, &processed_any);
(void)res;
/* XXX This should be in process_input, not here. There are
* other places that can call process-input, and they should
* force readcb calls as needed. */
if (processed_any &&
evbuffer_get_length(bufev->input) >= bufev->wm_read.low)
_bufferevent_run_readcb(bufev);
_bufferevent_decref_and_unlock(bufev);
}
/* Called when the underlying socket has drained enough that we can write to
it. */
static void
be_filter_writecb(struct bufferevent *underlying, void *_me)
{
struct bufferevent_filtered *bevf = _me;
struct bufferevent *bev = downcast(bevf);
int processed_any = 0;
_bufferevent_incref_and_lock(bev);
be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
_bufferevent_decref_and_unlock(bev);
}
/* Called when the underlying socket has given us an error */
static void
be_filter_eventcb(struct bufferevent *underlying, short what, void *_me)
{
struct bufferevent_filtered *bevf = _me;
struct bufferevent *bev = downcast(bevf);
_bufferevent_incref_and_lock(bev);
/* All we can really to is tell our own eventcb. */
_bufferevent_run_eventcb(bev, what);
_bufferevent_decref_and_unlock(bev);
}
static int
be_filter_flush(struct bufferevent *bufev,
short iotype, enum bufferevent_flush_mode mode)
{
struct bufferevent_filtered *bevf = upcast(bufev);
int processed_any = 0;
EVUTIL_ASSERT(bevf);
_bufferevent_incref_and_lock(bufev);
if (iotype & EV_READ) {
be_filter_process_input(bevf, mode, &processed_any);
}
if (iotype & EV_WRITE) {
be_filter_process_output(bevf, mode, &processed_any);
}
/* XXX check the return value? */
/* XXX does this want to recursively call lower-level flushes? */
bufferevent_flush(bevf->underlying, iotype, mode);
_bufferevent_decref_and_unlock(bufev);
return processed_any;
}
static int
be_filter_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
union bufferevent_ctrl_data *data)
{
struct bufferevent_filtered *bevf;
switch (op) {
case BEV_CTRL_GET_UNDERLYING:
bevf = upcast(bev);
data->ptr = bevf->underlying;
return 0;
case BEV_CTRL_GET_FD:
case BEV_CTRL_SET_FD:
case BEV_CTRL_CANCEL_ALL:
default:
return -1;
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,334 +0,0 @@
/*
* Copyright (c) 2009-2012 Niels Provos, Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#ifdef WIN32
#include <winsock2.h>
#endif
#include "event2/event-config.h"
#include "event2/util.h"
#include "event2/buffer.h"
#include "event2/bufferevent.h"
#include "event2/bufferevent_struct.h"
#include "event2/event.h"
#include "defer-internal.h"
#include "bufferevent-internal.h"
#include "mm-internal.h"
#include "util-internal.h"
struct bufferevent_pair {
struct bufferevent_private bev;
struct bufferevent_pair *partner;
};
/* Given a bufferevent that's really a bev part of a bufferevent_pair,
* return that bufferevent_filtered. Returns NULL otherwise.*/
static inline struct bufferevent_pair *
upcast(struct bufferevent *bev)
{
struct bufferevent_pair *bev_p;
if (bev->be_ops != &bufferevent_ops_pair)
return NULL;
bev_p = EVUTIL_UPCAST(bev, struct bufferevent_pair, bev.bev);
EVUTIL_ASSERT(bev_p->bev.bev.be_ops == &bufferevent_ops_pair);
return bev_p;
}
#define downcast(bev_pair) (&(bev_pair)->bev.bev)
static inline void
incref_and_lock(struct bufferevent *b)
{
struct bufferevent_pair *bevp;
_bufferevent_incref_and_lock(b);
bevp = upcast(b);
if (bevp->partner)
_bufferevent_incref_and_lock(downcast(bevp->partner));
}
static inline void
decref_and_unlock(struct bufferevent *b)
{
struct bufferevent_pair *bevp = upcast(b);
if (bevp->partner)
_bufferevent_decref_and_unlock(downcast(bevp->partner));
_bufferevent_decref_and_unlock(b);
}
/* XXX Handle close */
static void be_pair_outbuf_cb(struct evbuffer *,
const struct evbuffer_cb_info *, void *);
static struct bufferevent_pair *
bufferevent_pair_elt_new(struct event_base *base,
int options)
{
struct bufferevent_pair *bufev;
if (! (bufev = mm_calloc(1, sizeof(struct bufferevent_pair))))
return NULL;
if (bufferevent_init_common(&bufev->bev, base, &bufferevent_ops_pair,
options)) {
mm_free(bufev);
return NULL;
}
if (!evbuffer_add_cb(bufev->bev.bev.output, be_pair_outbuf_cb, bufev)) {
bufferevent_free(downcast(bufev));
return NULL;
}
_bufferevent_init_generic_timeout_cbs(&bufev->bev.bev);
return bufev;
}
int
bufferevent_pair_new(struct event_base *base, int options,
struct bufferevent *pair[2])
{
struct bufferevent_pair *bufev1 = NULL, *bufev2 = NULL;
int tmp_options;
options |= BEV_OPT_DEFER_CALLBACKS;
tmp_options = options & ~BEV_OPT_THREADSAFE;
bufev1 = bufferevent_pair_elt_new(base, options);
if (!bufev1)
return -1;
bufev2 = bufferevent_pair_elt_new(base, tmp_options);
if (!bufev2) {
bufferevent_free(downcast(bufev1));
return -1;
}
if (options & BEV_OPT_THREADSAFE) {
/*XXXX check return */
bufferevent_enable_locking(downcast(bufev2), bufev1->bev.lock);
}
bufev1->partner = bufev2;
bufev2->partner = bufev1;
evbuffer_freeze(downcast(bufev1)->input, 0);
evbuffer_freeze(downcast(bufev1)->output, 1);
evbuffer_freeze(downcast(bufev2)->input, 0);
evbuffer_freeze(downcast(bufev2)->output, 1);
pair[0] = downcast(bufev1);
pair[1] = downcast(bufev2);
return 0;
}
static void
be_pair_transfer(struct bufferevent *src, struct bufferevent *dst,
int ignore_wm)
{
size_t src_size, dst_size;
size_t n;
evbuffer_unfreeze(src->output, 1);
evbuffer_unfreeze(dst->input, 0);
if (dst->wm_read.high) {
dst_size = evbuffer_get_length(dst->input);
if (dst_size < dst->wm_read.high) {
n = dst->wm_read.high - dst_size;
evbuffer_remove_buffer(src->output, dst->input, n);
} else {
if (!ignore_wm)
goto done;
n = evbuffer_get_length(src->output);
evbuffer_add_buffer(dst->input, src->output);
}
} else {
n = evbuffer_get_length(src->output);
evbuffer_add_buffer(dst->input, src->output);
}
if (n) {
BEV_RESET_GENERIC_READ_TIMEOUT(dst);
if (evbuffer_get_length(dst->output))
BEV_RESET_GENERIC_WRITE_TIMEOUT(dst);
else
BEV_DEL_GENERIC_WRITE_TIMEOUT(dst);
}
src_size = evbuffer_get_length(src->output);
dst_size = evbuffer_get_length(dst->input);
if (dst_size >= dst->wm_read.low) {
_bufferevent_run_readcb(dst);
}
if (src_size <= src->wm_write.low) {
_bufferevent_run_writecb(src);
}
done:
evbuffer_freeze(src->output, 1);
evbuffer_freeze(dst->input, 0);
}
static inline int
be_pair_wants_to_talk(struct bufferevent_pair *src,
struct bufferevent_pair *dst)
{
return (downcast(src)->enabled & EV_WRITE) &&
(downcast(dst)->enabled & EV_READ) &&
!dst->bev.read_suspended &&
evbuffer_get_length(downcast(src)->output);
}
static void
be_pair_outbuf_cb(struct evbuffer *outbuf,
const struct evbuffer_cb_info *info, void *arg)
{
struct bufferevent_pair *bev_pair = arg;
struct bufferevent_pair *partner = bev_pair->partner;
incref_and_lock(downcast(bev_pair));
if (info->n_added > info->n_deleted && partner) {
/* We got more data. If the other side's reading, then
hand it over. */
if (be_pair_wants_to_talk(bev_pair, partner)) {
be_pair_transfer(downcast(bev_pair), downcast(partner), 0);
}
}
decref_and_unlock(downcast(bev_pair));
}
static int
be_pair_enable(struct bufferevent *bufev, short events)
{
struct bufferevent_pair *bev_p = upcast(bufev);
struct bufferevent_pair *partner = bev_p->partner;
incref_and_lock(bufev);
if (events & EV_READ) {
BEV_RESET_GENERIC_READ_TIMEOUT(bufev);
}
if ((events & EV_WRITE) && evbuffer_get_length(bufev->output))
BEV_RESET_GENERIC_WRITE_TIMEOUT(bufev);
/* We're starting to read! Does the other side have anything to write?*/
if ((events & EV_READ) && partner &&
be_pair_wants_to_talk(partner, bev_p)) {
be_pair_transfer(downcast(partner), bufev, 0);
}
/* We're starting to write! Does the other side want to read? */
if ((events & EV_WRITE) && partner &&
be_pair_wants_to_talk(bev_p, partner)) {
be_pair_transfer(bufev, downcast(partner), 0);
}
decref_and_unlock(bufev);
return 0;
}
static int
be_pair_disable(struct bufferevent *bev, short events)
{
if (events & EV_READ) {
BEV_DEL_GENERIC_READ_TIMEOUT(bev);
}
if (events & EV_WRITE)
BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
return 0;
}
static void
be_pair_destruct(struct bufferevent *bev)
{
struct bufferevent_pair *bev_p = upcast(bev);
if (bev_p->partner) {
bev_p->partner->partner = NULL;
bev_p->partner = NULL;
}
_bufferevent_del_generic_timeout_cbs(bev);
}
static int
be_pair_flush(struct bufferevent *bev, short iotype,
enum bufferevent_flush_mode mode)
{
struct bufferevent_pair *bev_p = upcast(bev);
struct bufferevent *partner;
incref_and_lock(bev);
if (!bev_p->partner)
return -1;
partner = downcast(bev_p->partner);
if (mode == BEV_NORMAL)
return 0;
if ((iotype & EV_READ) != 0)
be_pair_transfer(partner, bev, 1);
if ((iotype & EV_WRITE) != 0)
be_pair_transfer(bev, partner, 1);
if (mode == BEV_FINISHED) {
_bufferevent_run_eventcb(partner, iotype|BEV_EVENT_EOF);
}
decref_and_unlock(bev);
return 0;
}
struct bufferevent *
bufferevent_pair_get_partner(struct bufferevent *bev)
{
struct bufferevent_pair *bev_p;
struct bufferevent *partner = NULL;
bev_p = upcast(bev);
if (! bev_p)
return NULL;
incref_and_lock(bev);
if (bev_p->partner)
partner = downcast(bev_p->partner);
decref_and_unlock(bev);
return partner;
}
const struct bufferevent_ops bufferevent_ops_pair = {
"pair_elt",
evutil_offsetof(struct bufferevent_pair, bev.bev),
be_pair_enable,
be_pair_disable,
be_pair_destruct,
_bufferevent_generic_adj_timeouts,
be_pair_flush,
NULL, /* ctrl */
};

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,699 +0,0 @@
/*
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
* Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include "event2/event-config.h"
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _EVENT_HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef _EVENT_HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef WIN32
#include <winsock2.h>
#include <ws2tcpip.h>
#endif
#ifdef _EVENT_HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef _EVENT_HAVE_NETINET_IN_H
#include <netinet/in.h>
#endif
#ifdef _EVENT_HAVE_NETINET_IN6_H
#include <netinet/in6.h>
#endif
#include "event2/util.h"
#include "event2/bufferevent.h"
#include "event2/buffer.h"
#include "event2/bufferevent_struct.h"
#include "event2/bufferevent_compat.h"
#include "event2/event.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "bufferevent-internal.h"
#include "util-internal.h"
#ifdef WIN32
#include "iocp-internal.h"
#endif
/* prototypes */
static int be_socket_enable(struct bufferevent *, short);
static int be_socket_disable(struct bufferevent *, short);
static void be_socket_destruct(struct bufferevent *);
static int be_socket_adj_timeouts(struct bufferevent *);
static int be_socket_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
static int be_socket_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
static void be_socket_setfd(struct bufferevent *, evutil_socket_t);
const struct bufferevent_ops bufferevent_ops_socket = {
"socket",
evutil_offsetof(struct bufferevent_private, bev),
be_socket_enable,
be_socket_disable,
be_socket_destruct,
be_socket_adj_timeouts,
be_socket_flush,
be_socket_ctrl,
};
#define be_socket_add(ev, t) \
_bufferevent_add_event((ev), (t))
static void
bufferevent_socket_outbuf_cb(struct evbuffer *buf,
const struct evbuffer_cb_info *cbinfo,
void *arg)
{
struct bufferevent *bufev = arg;
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
if (cbinfo->n_added &&
(bufev->enabled & EV_WRITE) &&
!event_pending(&bufev->ev_write, EV_WRITE, NULL) &&
!bufev_p->write_suspended) {
/* Somebody added data to the buffer, and we would like to
* write, and we were not writing. So, start writing. */
if (be_socket_add(&bufev->ev_write, &bufev->timeout_write) == -1) {
/* Should we log this? */
}
}
}
static void
bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
{
struct bufferevent *bufev = arg;
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
struct evbuffer *input;
int res = 0;
short what = BEV_EVENT_READING;
ev_ssize_t howmuch = -1, readmax=-1;
_bufferevent_incref_and_lock(bufev);
if (event == EV_TIMEOUT) {
/* Note that we only check for event==EV_TIMEOUT. If
* event==EV_TIMEOUT|EV_READ, we can safely ignore the
* timeout, since a read has occurred */
what |= BEV_EVENT_TIMEOUT;
goto error;
}
input = bufev->input;
/*
* If we have a high watermark configured then we don't want to
* read more data than would make us reach the watermark.
*/
if (bufev->wm_read.high != 0) {
howmuch = bufev->wm_read.high - evbuffer_get_length(input);
/* we somehow lowered the watermark, stop reading */
if (howmuch <= 0) {
bufferevent_wm_suspend_read(bufev);
goto done;
}
}
readmax = _bufferevent_get_read_max(bufev_p);
if (howmuch < 0 || howmuch > readmax) /* The use of -1 for "unlimited"
* uglifies this code. XXXX */
howmuch = readmax;
if (bufev_p->read_suspended)
goto done;
evbuffer_unfreeze(input, 0);
res = evbuffer_read(input, fd, (int)howmuch); /* XXXX evbuffer_read would do better to take and return ev_ssize_t */
evbuffer_freeze(input, 0);
if (res == -1) {
int err = evutil_socket_geterror(fd);
if (EVUTIL_ERR_RW_RETRIABLE(err))
goto reschedule;
/* error case */
what |= BEV_EVENT_ERROR;
} else if (res == 0) {
/* eof case */
what |= BEV_EVENT_EOF;
}
if (res <= 0)
goto error;
_bufferevent_decrement_read_buckets(bufev_p, res);
/* Invoke the user callback - must always be called last */
if (evbuffer_get_length(input) >= bufev->wm_read.low)
_bufferevent_run_readcb(bufev);
goto done;
reschedule:
goto done;
error:
bufferevent_disable(bufev, EV_READ);
_bufferevent_run_eventcb(bufev, what);
done:
_bufferevent_decref_and_unlock(bufev);
}
static void
bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
{
struct bufferevent *bufev = arg;
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
int res = 0;
short what = BEV_EVENT_WRITING;
int connected = 0;
ev_ssize_t atmost = -1;
_bufferevent_incref_and_lock(bufev);
if (event == EV_TIMEOUT) {
/* Note that we only check for event==EV_TIMEOUT. If
* event==EV_TIMEOUT|EV_WRITE, we can safely ignore the
* timeout, since a read has occurred */
what |= BEV_EVENT_TIMEOUT;
goto error;
}
if (bufev_p->connecting) {
int c = evutil_socket_finished_connecting(fd);
/* we need to fake the error if the connection was refused
* immediately - usually connection to localhost on BSD */
if (bufev_p->connection_refused) {
bufev_p->connection_refused = 0;
c = -1;
}
if (c == 0)
goto done;
bufev_p->connecting = 0;
if (c < 0) {
event_del(&bufev->ev_write);
event_del(&bufev->ev_read);
_bufferevent_run_eventcb(bufev, BEV_EVENT_ERROR);
goto done;
} else {
connected = 1;
#ifdef WIN32
if (BEV_IS_ASYNC(bufev)) {
event_del(&bufev->ev_write);
bufferevent_async_set_connected(bufev);
_bufferevent_run_eventcb(bufev,
BEV_EVENT_CONNECTED);
goto done;
}
#endif
_bufferevent_run_eventcb(bufev,
BEV_EVENT_CONNECTED);
if (!(bufev->enabled & EV_WRITE) ||
bufev_p->write_suspended) {
event_del(&bufev->ev_write);
goto done;
}
}
}
atmost = _bufferevent_get_write_max(bufev_p);
if (bufev_p->write_suspended)
goto done;
if (evbuffer_get_length(bufev->output)) {
evbuffer_unfreeze(bufev->output, 1);
res = evbuffer_write_atmost(bufev->output, fd, atmost);
evbuffer_freeze(bufev->output, 1);
if (res == -1) {
int err = evutil_socket_geterror(fd);
if (EVUTIL_ERR_RW_RETRIABLE(err))
goto reschedule;
what |= BEV_EVENT_ERROR;
} else if (res == 0) {
/* eof case
XXXX Actually, a 0 on write doesn't indicate
an EOF. An ECONNRESET might be more typical.
*/
what |= BEV_EVENT_EOF;
}
if (res <= 0)
goto error;
_bufferevent_decrement_write_buckets(bufev_p, res);
}
if (evbuffer_get_length(bufev->output) == 0) {
event_del(&bufev->ev_write);
}
/*
* Invoke the user callback if our buffer is drained or below the
* low watermark.
*/
if ((res || !connected) &&
evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
_bufferevent_run_writecb(bufev);
}
goto done;
reschedule:
if (evbuffer_get_length(bufev->output) == 0) {
event_del(&bufev->ev_write);
}
goto done;
error:
bufferevent_disable(bufev, EV_WRITE);
_bufferevent_run_eventcb(bufev, what);
done:
_bufferevent_decref_and_unlock(bufev);
}
struct bufferevent *
bufferevent_socket_new(struct event_base *base, evutil_socket_t fd,
int options)
{
struct bufferevent_private *bufev_p;
struct bufferevent *bufev;
#ifdef WIN32
if (base && event_base_get_iocp(base))
return bufferevent_async_new(base, fd, options);
#endif
if ((bufev_p = mm_calloc(1, sizeof(struct bufferevent_private)))== NULL)
return NULL;
if (bufferevent_init_common(bufev_p, base, &bufferevent_ops_socket,
options) < 0) {
mm_free(bufev_p);
return NULL;
}
bufev = &bufev_p->bev;
evbuffer_set_flags(bufev->output, EVBUFFER_FLAG_DRAINS_TO_FD);
event_assign(&bufev->ev_read, bufev->ev_base, fd,
EV_READ|EV_PERSIST, bufferevent_readcb, bufev);
event_assign(&bufev->ev_write, bufev->ev_base, fd,
EV_WRITE|EV_PERSIST, bufferevent_writecb, bufev);
evbuffer_add_cb(bufev->output, bufferevent_socket_outbuf_cb, bufev);
evbuffer_freeze(bufev->input, 0);
evbuffer_freeze(bufev->output, 1);
return bufev;
}
int
bufferevent_socket_connect(struct bufferevent *bev,
struct sockaddr *sa, int socklen)
{
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
evutil_socket_t fd;
int r = 0;
int result=-1;
int ownfd = 0;
_bufferevent_incref_and_lock(bev);
if (!bufev_p)
goto done;
fd = bufferevent_getfd(bev);
if (fd < 0) {
if (!sa)
goto done;
fd = socket(sa->sa_family, SOCK_STREAM, 0);
if (fd < 0)
goto done;
if (evutil_make_socket_nonblocking(fd)<0)
goto done;
ownfd = 1;
}
if (sa) {
#ifdef WIN32
if (bufferevent_async_can_connect(bev)) {
bufferevent_setfd(bev, fd);
r = bufferevent_async_connect(bev, fd, sa, socklen);
if (r < 0)
goto freesock;
bufev_p->connecting = 1;
result = 0;
goto done;
} else
#endif
r = evutil_socket_connect(&fd, sa, socklen);
if (r < 0)
goto freesock;
}
#ifdef WIN32
/* ConnectEx() isn't always around, even when IOCP is enabled.
* Here, we borrow the socket object's write handler to fall back
* on a non-blocking connect() when ConnectEx() is unavailable. */
if (BEV_IS_ASYNC(bev)) {
event_assign(&bev->ev_write, bev->ev_base, fd,
EV_WRITE|EV_PERSIST, bufferevent_writecb, bev);
}
#endif
bufferevent_setfd(bev, fd);
if (r == 0) {
if (! be_socket_enable(bev, EV_WRITE)) {
bufev_p->connecting = 1;
result = 0;
goto done;
}
} else if (r == 1) {
/* The connect succeeded already. How very BSD of it. */
result = 0;
bufev_p->connecting = 1;
event_active(&bev->ev_write, EV_WRITE, 1);
} else {
/* The connect failed already. How very BSD of it. */
bufev_p->connection_refused = 1;
bufev_p->connecting = 1;
result = 0;
event_active(&bev->ev_write, EV_WRITE, 1);
}
goto done;
freesock:
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
if (ownfd)
evutil_closesocket(fd);
/* do something about the error? */
done:
_bufferevent_decref_and_unlock(bev);
return result;
}
static void
bufferevent_connect_getaddrinfo_cb(int result, struct evutil_addrinfo *ai,
void *arg)
{
struct bufferevent *bev = arg;
struct bufferevent_private *bev_p =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
int r;
BEV_LOCK(bev);
bufferevent_unsuspend_write(bev, BEV_SUSPEND_LOOKUP);
bufferevent_unsuspend_read(bev, BEV_SUSPEND_LOOKUP);
if (result != 0) {
bev_p->dns_error = result;
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
_bufferevent_decref_and_unlock(bev);
if (ai)
evutil_freeaddrinfo(ai);
return;
}
/* XXX use the other addrinfos? */
/* XXX use this return value */
r = bufferevent_socket_connect(bev, ai->ai_addr, (int)ai->ai_addrlen);
(void)r;
_bufferevent_decref_and_unlock(bev);
evutil_freeaddrinfo(ai);
}
int
bufferevent_socket_connect_hostname(struct bufferevent *bev,
struct evdns_base *evdns_base, int family, const char *hostname, int port)
{
char portbuf[10];
struct evutil_addrinfo hint;
int err;
struct bufferevent_private *bev_p =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
if (family != AF_INET && family != AF_INET6 && family != AF_UNSPEC)
return -1;
if (port < 1 || port > 65535)
return -1;
BEV_LOCK(bev);
bev_p->dns_error = 0;
BEV_UNLOCK(bev);
evutil_snprintf(portbuf, sizeof(portbuf), "%d", port);
memset(&hint, 0, sizeof(hint));
hint.ai_family = family;
hint.ai_protocol = IPPROTO_TCP;
hint.ai_socktype = SOCK_STREAM;
bufferevent_suspend_write(bev, BEV_SUSPEND_LOOKUP);
bufferevent_suspend_read(bev, BEV_SUSPEND_LOOKUP);
bufferevent_incref(bev);
err = evutil_getaddrinfo_async(evdns_base, hostname, portbuf,
&hint, bufferevent_connect_getaddrinfo_cb, bev);
if (err == 0) {
return 0;
} else {
bufferevent_unsuspend_write(bev, BEV_SUSPEND_LOOKUP);
bufferevent_unsuspend_read(bev, BEV_SUSPEND_LOOKUP);
return -1;
}
}
int
bufferevent_socket_get_dns_error(struct bufferevent *bev)
{
int rv;
struct bufferevent_private *bev_p =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
BEV_LOCK(bev);
rv = bev_p->dns_error;
BEV_UNLOCK(bev);
return rv;
}
/*
* Create a new buffered event object.
*
* The read callback is invoked whenever we read new data.
* The write callback is invoked whenever the output buffer is drained.
* The error callback is invoked on a write/read error or on EOF.
*
* Both read and write callbacks maybe NULL. The error callback is not
* allowed to be NULL and have to be provided always.
*/
struct bufferevent *
bufferevent_new(evutil_socket_t fd,
bufferevent_data_cb readcb, bufferevent_data_cb writecb,
bufferevent_event_cb eventcb, void *cbarg)
{
struct bufferevent *bufev;
if (!(bufev = bufferevent_socket_new(NULL, fd, 0)))
return NULL;
bufferevent_setcb(bufev, readcb, writecb, eventcb, cbarg);
return bufev;
}
static int
be_socket_enable(struct bufferevent *bufev, short event)
{
if (event & EV_READ) {
if (be_socket_add(&bufev->ev_read,&bufev->timeout_read) == -1)
return -1;
}
if (event & EV_WRITE) {
if (be_socket_add(&bufev->ev_write,&bufev->timeout_write) == -1)
return -1;
}
return 0;
}
static int
be_socket_disable(struct bufferevent *bufev, short event)
{
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
if (event & EV_READ) {
if (event_del(&bufev->ev_read) == -1)
return -1;
}
/* Don't actually disable the write if we are trying to connect. */
if ((event & EV_WRITE) && ! bufev_p->connecting) {
if (event_del(&bufev->ev_write) == -1)
return -1;
}
return 0;
}
static void
be_socket_destruct(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
evutil_socket_t fd;
EVUTIL_ASSERT(bufev->be_ops == &bufferevent_ops_socket);
fd = event_get_fd(&bufev->ev_read);
event_del(&bufev->ev_read);
event_del(&bufev->ev_write);
if ((bufev_p->options & BEV_OPT_CLOSE_ON_FREE) && fd >= 0)
EVUTIL_CLOSESOCKET(fd);
}
static int
be_socket_adj_timeouts(struct bufferevent *bufev)
{
int r = 0;
if (event_pending(&bufev->ev_read, EV_READ, NULL))
if (be_socket_add(&bufev->ev_read, &bufev->timeout_read) < 0)
r = -1;
if (event_pending(&bufev->ev_write, EV_WRITE, NULL)) {
if (be_socket_add(&bufev->ev_write, &bufev->timeout_write) < 0)
r = -1;
}
return r;
}
static int
be_socket_flush(struct bufferevent *bev, short iotype,
enum bufferevent_flush_mode mode)
{
return 0;
}
static void
be_socket_setfd(struct bufferevent *bufev, evutil_socket_t fd)
{
BEV_LOCK(bufev);
EVUTIL_ASSERT(bufev->be_ops == &bufferevent_ops_socket);
event_del(&bufev->ev_read);
event_del(&bufev->ev_write);
event_assign(&bufev->ev_read, bufev->ev_base, fd,
EV_READ|EV_PERSIST, bufferevent_readcb, bufev);
event_assign(&bufev->ev_write, bufev->ev_base, fd,
EV_WRITE|EV_PERSIST, bufferevent_writecb, bufev);
if (fd >= 0)
bufferevent_enable(bufev, bufev->enabled);
BEV_UNLOCK(bufev);
}
/* XXXX Should non-socket bufferevents support this? */
int
bufferevent_priority_set(struct bufferevent *bufev, int priority)
{
int r = -1;
BEV_LOCK(bufev);
if (bufev->be_ops != &bufferevent_ops_socket)
goto done;
if (event_priority_set(&bufev->ev_read, priority) == -1)
goto done;
if (event_priority_set(&bufev->ev_write, priority) == -1)
goto done;
r = 0;
done:
BEV_UNLOCK(bufev);
return r;
}
/* XXXX Should non-socket bufferevents support this? */
int
bufferevent_base_set(struct event_base *base, struct bufferevent *bufev)
{
int res = -1;
BEV_LOCK(bufev);
if (bufev->be_ops != &bufferevent_ops_socket)
goto done;
bufev->ev_base = base;
res = event_base_set(base, &bufev->ev_read);
if (res == -1)
goto done;
res = event_base_set(base, &bufev->ev_write);
done:
BEV_UNLOCK(bufev);
return res;
}
static int
be_socket_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
union bufferevent_ctrl_data *data)
{
switch (op) {
case BEV_CTRL_SET_FD:
be_socket_setfd(bev, data->fd);
return 0;
case BEV_CTRL_GET_FD:
data->fd = event_get_fd(&bev->ev_read);
return 0;
case BEV_CTRL_GET_UNDERLYING:
case BEV_CTRL_CANCEL_ALL:
default:
return -1;
}
}

Просмотреть файл

@ -1,101 +0,0 @@
/*
* Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _CHANGELIST_H_
#define _CHANGELIST_H_
/*
A "changelist" is a list of all the fd status changes that should be made
between calls to the backend's dispatch function. There are a few reasons
that a backend would want to queue changes like this rather than processing
them immediately.
1) Sometimes applications will add and delete the same event more than
once between calls to dispatch. Processing these changes immediately
is needless, and potentially expensive (especially if we're on a system
that makes one syscall per changed event).
2) Sometimes we can coalesce multiple changes on the same fd into a single
syscall if we know about them in advance. For example, epoll can do an
add and a delete at the same time, but only if we have found out about
both of them before we tell epoll.
3) Sometimes adding an event that we immediately delete can cause
unintended consequences: in kqueue, this makes pending events get
reported spuriously.
*/
#include "event2/util.h"
/** Represents a */
struct event_change {
/** The fd or signal whose events are to be changed */
evutil_socket_t fd;
/* The events that were enabled on the fd before any of these changes
were made. May include EV_READ or EV_WRITE. */
short old_events;
/* The changes that we want to make in reading and writing on this fd.
* If this is a signal, then read_change has EV_CHANGE_SIGNAL set,
* and write_change is unused. */
ev_uint8_t read_change;
ev_uint8_t write_change;
};
/* Flags for read_change and write_change. */
/* If set, add the event. */
#define EV_CHANGE_ADD 0x01
/* If set, delete the event. Exclusive with EV_CHANGE_ADD */
#define EV_CHANGE_DEL 0x02
/* If set, this event refers a signal, not an fd. */
#define EV_CHANGE_SIGNAL EV_SIGNAL
/* Set for persistent events. Currently not used. */
#define EV_CHANGE_PERSIST EV_PERSIST
/* Set for adding edge-triggered events. */
#define EV_CHANGE_ET EV_ET
/* The value of fdinfo_size that a backend should use if it is letting
* changelist handle its add and delete functions. */
#define EVENT_CHANGELIST_FDINFO_SIZE sizeof(int)
/** Set up the data fields in a changelist. */
void event_changelist_init(struct event_changelist *changelist);
/** Remove every change in the changelist, and make corresponding changes
* in the event maps in the base. This function is generally used right
* after making all the changes in the changelist. */
void event_changelist_remove_all(struct event_changelist *changelist,
struct event_base *base);
/** Free all memory held in a changelist. */
void event_changelist_freemem(struct event_changelist *changelist);
/** Implementation of eventop_add that queues the event in a changelist. */
int event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events,
void *p);
/** Implementation of eventop_del that queues the event in a changelist. */
int event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events,
void *p);
#endif

Просмотреть файл

@ -1,488 +0,0 @@
/* $OpenBSD: queue.h,v 1.16 2000/09/07 19:47:59 art Exp $ */
/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef _SYS_QUEUE_H_
#define _SYS_QUEUE_H_
/*
* This file defines five types of data structures: singly-linked lists,
* lists, simple queues, tail queues, and circular queues.
*
*
* A singly-linked list is headed by a single forward pointer. The elements
* are singly linked for minimum space and pointer manipulation overhead at
* the expense of O(n) removal for arbitrary elements. New elements can be
* added to the list after an existing element or at the head of the list.
* Elements being removed from the head of the list should use the explicit
* macro for this purpose for optimum efficiency. A singly-linked list may
* only be traversed in the forward direction. Singly-linked lists are ideal
* for applications with large datasets and few or no removals or for
* implementing a LIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A simple queue is headed by a pair of pointers, one the head of the
* list and the other to the tail of the list. The elements are singly
* linked to save space, so elements can only be removed from the
* head of the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the
* list. A simple queue may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* A circle queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the list.
* A circle queue may be traversed in either direction, but has a more
* complex end of list detection.
*
* For details on the use of these macros, see the queue(3) manual page.
*/
/*
* Singly-linked List definitions.
*/
#define SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#ifndef WIN32
#define SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
#endif
/*
* Singly-linked List access methods.
*/
#define SLIST_FIRST(head) ((head)->slh_first)
#define SLIST_END(head) NULL
#define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head))
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
#define SLIST_FOREACH(var, head, field) \
for((var) = SLIST_FIRST(head); \
(var) != SLIST_END(head); \
(var) = SLIST_NEXT(var, field))
/*
* Singly-linked List functions.
*/
#define SLIST_INIT(head) { \
SLIST_FIRST(head) = SLIST_END(head); \
}
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
(elm)->field.sle_next = (slistelm)->field.sle_next; \
(slistelm)->field.sle_next = (elm); \
} while (0)
#define SLIST_INSERT_HEAD(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
(head)->slh_first = (elm); \
} while (0)
#define SLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (0)
/*
* List definitions.
*/
#define LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define LIST_HEAD_INITIALIZER(head) \
{ NULL }
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List access methods
*/
#define LIST_FIRST(head) ((head)->lh_first)
#define LIST_END(head) NULL
#define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head))
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
#define LIST_FOREACH(var, head, field) \
for((var) = LIST_FIRST(head); \
(var)!= LIST_END(head); \
(var) = LIST_NEXT(var, field))
/*
* List functions.
*/
#define LIST_INIT(head) do { \
LIST_FIRST(head) = LIST_END(head); \
} while (0)
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
(listelm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
(listelm)->field.le_next = (elm); \
(elm)->field.le_prev = &(listelm)->field.le_next; \
} while (0)
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (0)
#define LIST_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.le_next = (head)->lh_first) != NULL) \
(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
(head)->lh_first = (elm); \
(elm)->field.le_prev = &(head)->lh_first; \
} while (0)
#define LIST_REMOVE(elm, field) do { \
if ((elm)->field.le_next != NULL) \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
} while (0)
#define LIST_REPLACE(elm, elm2, field) do { \
if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
(elm2)->field.le_next->field.le_prev = \
&(elm2)->field.le_next; \
(elm2)->field.le_prev = (elm)->field.le_prev; \
*(elm2)->field.le_prev = (elm2); \
} while (0)
/*
* Simple queue definitions.
*/
#define SIMPLEQ_HEAD(name, type) \
struct name { \
struct type *sqh_first; /* first element */ \
struct type **sqh_last; /* addr of last next element */ \
}
#define SIMPLEQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).sqh_first }
#define SIMPLEQ_ENTRY(type) \
struct { \
struct type *sqe_next; /* next element */ \
}
/*
* Simple queue access methods.
*/
#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
#define SIMPLEQ_END(head) NULL
#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
#define SIMPLEQ_FOREACH(var, head, field) \
for((var) = SIMPLEQ_FIRST(head); \
(var) != SIMPLEQ_END(head); \
(var) = SIMPLEQ_NEXT(var, field))
/*
* Simple queue functions.
*/
#define SIMPLEQ_INIT(head) do { \
(head)->sqh_first = NULL; \
(head)->sqh_last = &(head)->sqh_first; \
} while (0)
#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
(head)->sqh_last = &(elm)->field.sqe_next; \
(head)->sqh_first = (elm); \
} while (0)
#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
*(head)->sqh_last = (elm); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (0)
#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
(head)->sqh_last = &(elm)->field.sqe_next; \
(listelm)->field.sqe_next = (elm); \
} while (0)
#define SIMPLEQ_REMOVE_HEAD(head, elm, field) do { \
if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) \
(head)->sqh_last = &(head)->sqh_first; \
} while (0)
/*
* Tail queue definitions.
*/
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; /* first element */ \
struct type **tqh_last; /* addr of last next element */ \
}
#define TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
}
/*
* tail queue access methods
*/
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_END(head) NULL
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
/* XXX */
#define TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define TAILQ_EMPTY(head) \
(TAILQ_FIRST(head) == TAILQ_END(head))
#define TAILQ_FOREACH(var, head, field) \
for((var) = TAILQ_FIRST(head); \
(var) != TAILQ_END(head); \
(var) = TAILQ_NEXT(var, field))
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for((var) = TAILQ_LAST(head, headname); \
(var) != TAILQ_END(head); \
(var) = TAILQ_PREV(var, headname, field))
/*
* Tail queue functions.
*/
#define TAILQ_INIT(head) do { \
(head)->tqh_first = NULL; \
(head)->tqh_last = &(head)->tqh_first; \
} while (0)
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
(head)->tqh_first->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(head)->tqh_first = (elm); \
(elm)->field.tqe_prev = &(head)->tqh_first; \
} while (0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &(elm)->field.tqe_next; \
} while (0)
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
(elm)->field.tqe_next->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(listelm)->field.tqe_next = (elm); \
(elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
} while (0)
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (0)
#define TAILQ_REMOVE(head, elm, field) do { \
if (((elm)->field.tqe_next) != NULL) \
(elm)->field.tqe_next->field.tqe_prev = \
(elm)->field.tqe_prev; \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
} while (0)
#define TAILQ_REPLACE(head, elm, elm2, field) do { \
if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \
(elm2)->field.tqe_next->field.tqe_prev = \
&(elm2)->field.tqe_next; \
else \
(head)->tqh_last = &(elm2)->field.tqe_next; \
(elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
*(elm2)->field.tqe_prev = (elm2); \
} while (0)
/*
* Circular queue definitions.
*/
#define CIRCLEQ_HEAD(name, type) \
struct name { \
struct type *cqh_first; /* first element */ \
struct type *cqh_last; /* last element */ \
}
#define CIRCLEQ_HEAD_INITIALIZER(head) \
{ CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
#define CIRCLEQ_ENTRY(type) \
struct { \
struct type *cqe_next; /* next element */ \
struct type *cqe_prev; /* previous element */ \
}
/*
* Circular queue access methods
*/
#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
#define CIRCLEQ_LAST(head) ((head)->cqh_last)
#define CIRCLEQ_END(head) ((void *)(head))
#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
#define CIRCLEQ_EMPTY(head) \
(CIRCLEQ_FIRST(head) == CIRCLEQ_END(head))
#define CIRCLEQ_FOREACH(var, head, field) \
for((var) = CIRCLEQ_FIRST(head); \
(var) != CIRCLEQ_END(head); \
(var) = CIRCLEQ_NEXT(var, field))
#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
for((var) = CIRCLEQ_LAST(head); \
(var) != CIRCLEQ_END(head); \
(var) = CIRCLEQ_PREV(var, field))
/*
* Circular queue functions.
*/
#define CIRCLEQ_INIT(head) do { \
(head)->cqh_first = CIRCLEQ_END(head); \
(head)->cqh_last = CIRCLEQ_END(head); \
} while (0)
#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm)->field.cqe_next; \
(elm)->field.cqe_prev = (listelm); \
if ((listelm)->field.cqe_next == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm); \
else \
(listelm)->field.cqe_next->field.cqe_prev = (elm); \
(listelm)->field.cqe_next = (elm); \
} while (0)
#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm); \
(elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
if ((listelm)->field.cqe_prev == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm); \
else \
(listelm)->field.cqe_prev->field.cqe_next = (elm); \
(listelm)->field.cqe_prev = (elm); \
} while (0)
#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
(elm)->field.cqe_next = (head)->cqh_first; \
(elm)->field.cqe_prev = CIRCLEQ_END(head); \
if ((head)->cqh_last == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm); \
else \
(head)->cqh_first->field.cqe_prev = (elm); \
(head)->cqh_first = (elm); \
} while (0)
#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.cqe_next = CIRCLEQ_END(head); \
(elm)->field.cqe_prev = (head)->cqh_last; \
if ((head)->cqh_first == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm); \
else \
(head)->cqh_last->field.cqe_next = (elm); \
(head)->cqh_last = (elm); \
} while (0)
#define CIRCLEQ_REMOVE(head, elm, field) do { \
if ((elm)->field.cqe_next == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm)->field.cqe_prev; \
else \
(elm)->field.cqe_next->field.cqe_prev = \
(elm)->field.cqe_prev; \
if ((elm)->field.cqe_prev == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm)->field.cqe_next; \
else \
(elm)->field.cqe_prev->field.cqe_next = \
(elm)->field.cqe_next; \
} while (0)
#define CIRCLEQ_REPLACE(head, elm, elm2, field) do { \
if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \
CIRCLEQ_END(head)) \
(head).cqh_last = (elm2); \
else \
(elm2)->field.cqe_next->field.cqe_prev = (elm2); \
if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \
CIRCLEQ_END(head)) \
(head).cqh_first = (elm2); \
else \
(elm2)->field.cqe_prev->field.cqe_next = (elm2); \
} while (0)
#endif /* !_SYS_QUEUE_H_ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,100 +0,0 @@
/*
* Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DEFER_INTERNAL_H_
#define _DEFER_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "event2/event-config.h"
#include <sys/queue.h>
struct deferred_cb;
typedef void (*deferred_cb_fn)(struct deferred_cb *, void *);
/** A deferred_cb is a callback that can be scheduled to run as part of
* an event_base's event_loop, rather than running immediately. */
struct deferred_cb {
/** Links to the adjacent active (pending) deferred_cb objects. */
TAILQ_ENTRY (deferred_cb) cb_next;
/** True iff this deferred_cb is pending in an event_base. */
unsigned queued : 1;
/** The function to execute when the callback runs. */
deferred_cb_fn cb;
/** The function's second argument. */
void *arg;
};
/** A deferred_cb_queue is a list of deferred_cb that we can add to and run. */
struct deferred_cb_queue {
/** Lock used to protect the queue. */
void *lock;
/** How many entries are in the queue? */
int active_count;
/** Function called when adding to the queue from another thread. */
void (*notify_fn)(struct deferred_cb_queue *, void *);
void *notify_arg;
/** Deferred callback management: a list of deferred callbacks to
* run active the active events. */
TAILQ_HEAD (deferred_cb_list, deferred_cb) deferred_cb_list;
};
/**
Initialize an empty, non-pending deferred_cb.
@param deferred The deferred_cb structure to initialize.
@param cb The function to run when the deferred_cb executes.
@param arg The function's second argument.
*/
void event_deferred_cb_init(struct deferred_cb *, deferred_cb_fn, void *);
/**
Cancel a deferred_cb if it is currently scheduled in an event_base.
*/
void event_deferred_cb_cancel(struct deferred_cb_queue *, struct deferred_cb *);
/**
Activate a deferred_cb if it is not currently scheduled in an event_base.
*/
void event_deferred_cb_schedule(struct deferred_cb_queue *, struct deferred_cb *);
#define LOCK_DEFERRED_QUEUE(q) \
EVLOCK_LOCK((q)->lock, 0)
#define UNLOCK_DEFERRED_QUEUE(q) \
EVLOCK_UNLOCK((q)->lock, 0)
#ifdef __cplusplus
}
#endif
void event_deferred_cb_queue_init(struct deferred_cb_queue *);
struct deferred_cb_queue *event_base_get_deferred_cb_queue(struct event_base *);
#endif /* _EVENT_INTERNAL_H_ */

Просмотреть файл

@ -1,306 +0,0 @@
/*
* Copyright 2000-2009 Niels Provos <provos@citi.umich.edu>
* Copyright 2009-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#include <sys/types.h>
#include <sys/resource.h>
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <sys/queue.h>
#include <sys/devpoll.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include "event2/event.h"
#include "event2/event_struct.h"
#include "event2/thread.h"
#include "event-internal.h"
#include "evsignal-internal.h"
#include "log-internal.h"
#include "evmap-internal.h"
#include "evthread-internal.h"
struct devpollop {
struct pollfd *events;
int nevents;
int dpfd;
struct pollfd *changes;
int nchanges;
};
static void *devpoll_init(struct event_base *);
static int devpoll_add(struct event_base *, int fd, short old, short events, void *);
static int devpoll_del(struct event_base *, int fd, short old, short events, void *);
static int devpoll_dispatch(struct event_base *, struct timeval *);
static void devpoll_dealloc(struct event_base *);
const struct eventop devpollops = {
"devpoll",
devpoll_init,
devpoll_add,
devpoll_del,
devpoll_dispatch,
devpoll_dealloc,
1, /* need reinit */
EV_FEATURE_FDS|EV_FEATURE_O1,
0
};
#define NEVENT 32000
static int
devpoll_commit(struct devpollop *devpollop)
{
/*
* Due to a bug in Solaris, we have to use pwrite with an offset of 0.
* Write is limited to 2GB of data, until it will fail.
*/
if (pwrite(devpollop->dpfd, devpollop->changes,
sizeof(struct pollfd) * devpollop->nchanges, 0) == -1)
return (-1);
devpollop->nchanges = 0;
return (0);
}
static int
devpoll_queue(struct devpollop *devpollop, int fd, int events) {
struct pollfd *pfd;
if (devpollop->nchanges >= devpollop->nevents) {
/*
* Change buffer is full, must commit it to /dev/poll before
* adding more
*/
if (devpoll_commit(devpollop) != 0)
return (-1);
}
pfd = &devpollop->changes[devpollop->nchanges++];
pfd->fd = fd;
pfd->events = events;
pfd->revents = 0;
return (0);
}
static void *
devpoll_init(struct event_base *base)
{
int dpfd, nfiles = NEVENT;
struct rlimit rl;
struct devpollop *devpollop;
if (!(devpollop = mm_calloc(1, sizeof(struct devpollop))))
return (NULL);
if (getrlimit(RLIMIT_NOFILE, &rl) == 0 &&
rl.rlim_cur != RLIM_INFINITY)
nfiles = rl.rlim_cur;
/* Initialize the kernel queue */
if ((dpfd = evutil_open_closeonexec("/dev/poll", O_RDWR, 0)) == -1) {
event_warn("open: /dev/poll");
mm_free(devpollop);
return (NULL);
}
devpollop->dpfd = dpfd;
/* Initialize fields */
/* FIXME: allocating 'nfiles' worth of space here can be
* expensive and unnecessary. See how epoll.c does it instead. */
devpollop->events = mm_calloc(nfiles, sizeof(struct pollfd));
if (devpollop->events == NULL) {
mm_free(devpollop);
close(dpfd);
return (NULL);
}
devpollop->nevents = nfiles;
devpollop->changes = mm_calloc(nfiles, sizeof(struct pollfd));
if (devpollop->changes == NULL) {
mm_free(devpollop->events);
mm_free(devpollop);
close(dpfd);
return (NULL);
}
evsig_init(base);
return (devpollop);
}
static int
devpoll_dispatch(struct event_base *base, struct timeval *tv)
{
struct devpollop *devpollop = base->evbase;
struct pollfd *events = devpollop->events;
struct dvpoll dvp;
int i, res, timeout = -1;
if (devpollop->nchanges)
devpoll_commit(devpollop);
if (tv != NULL)
timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
dvp.dp_fds = devpollop->events;
dvp.dp_nfds = devpollop->nevents;
dvp.dp_timeout = timeout;
EVBASE_RELEASE_LOCK(base, th_base_lock);
res = ioctl(devpollop->dpfd, DP_POLL, &dvp);
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
if (res == -1) {
if (errno != EINTR) {
event_warn("ioctl: DP_POLL");
return (-1);
}
return (0);
}
event_debug(("%s: devpoll_wait reports %d", __func__, res));
for (i = 0; i < res; i++) {
int which = 0;
int what = events[i].revents;
if (what & POLLHUP)
what |= POLLIN | POLLOUT;
else if (what & POLLERR)
what |= POLLIN | POLLOUT;
if (what & POLLIN)
which |= EV_READ;
if (what & POLLOUT)
which |= EV_WRITE;
if (!which)
continue;
/* XXX(niels): not sure if this works for devpoll */
evmap_io_active(base, events[i].fd, which);
}
return (0);
}
static int
devpoll_add(struct event_base *base, int fd, short old, short events, void *p)
{
struct devpollop *devpollop = base->evbase;
int res;
(void)p;
/*
* It's not necessary to OR the existing read/write events that we
* are currently interested in with the new event we are adding.
* The /dev/poll driver ORs any new events with the existing events
* that it has cached for the fd.
*/
res = 0;
if (events & EV_READ)
res |= POLLIN;
if (events & EV_WRITE)
res |= POLLOUT;
if (devpoll_queue(devpollop, fd, res) != 0)
return (-1);
return (0);
}
static int
devpoll_del(struct event_base *base, int fd, short old, short events, void *p)
{
struct devpollop *devpollop = base->evbase;
int res;
(void)p;
res = 0;
if (events & EV_READ)
res |= POLLIN;
if (events & EV_WRITE)
res |= POLLOUT;
/*
* The only way to remove an fd from the /dev/poll monitored set is
* to use POLLREMOVE by itself. This removes ALL events for the fd
* provided so if we care about two events and are only removing one
* we must re-add the other event after POLLREMOVE.
*/
if (devpoll_queue(devpollop, fd, POLLREMOVE) != 0)
return (-1);
if ((res & (POLLIN|POLLOUT)) != (POLLIN|POLLOUT)) {
/*
* We're not deleting all events, so we must resubmit the
* event that we are still interested in if one exists.
*/
if ((res & POLLIN) && (old & EV_WRITE)) {
/* Deleting read, still care about write */
devpoll_queue(devpollop, fd, POLLOUT);
} else if ((res & POLLOUT) && (old & EV_READ)) {
/* Deleting write, still care about read */
devpoll_queue(devpollop, fd, POLLIN);
}
}
return (0);
}
static void
devpoll_dealloc(struct event_base *base)
{
struct devpollop *devpollop = base->evbase;
evsig_dealloc(base);
if (devpollop->events)
mm_free(devpollop->events);
if (devpollop->changes)
mm_free(devpollop->changes);
if (devpollop->dpfd >= 0)
close(devpollop->dpfd);
memset(devpollop, 0, sizeof(struct devpollop));
mm_free(devpollop);
}

Просмотреть файл

@ -1,473 +0,0 @@
/*
* Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright 2007-2012 Niels Provos, Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#include <stdint.h>
#include <sys/types.h>
#include <sys/resource.h>
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <sys/queue.h>
#include <sys/epoll.h>
#include <signal.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#ifdef _EVENT_HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include "event-internal.h"
#include "evsignal-internal.h"
#include "event2/thread.h"
#include "evthread-internal.h"
#include "log-internal.h"
#include "evmap-internal.h"
#include "changelist-internal.h"
struct epollop {
struct epoll_event *events;
int nevents;
int epfd;
};
static void *epoll_init(struct event_base *);
static int epoll_dispatch(struct event_base *, struct timeval *);
static void epoll_dealloc(struct event_base *);
static const struct eventop epollops_changelist = {
"epoll (with changelist)",
epoll_init,
event_changelist_add,
event_changelist_del,
epoll_dispatch,
epoll_dealloc,
1, /* need reinit */
EV_FEATURE_ET|EV_FEATURE_O1,
EVENT_CHANGELIST_FDINFO_SIZE
};
static int epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd,
short old, short events, void *p);
static int epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd,
short old, short events, void *p);
const struct eventop epollops = {
"epoll",
epoll_init,
epoll_nochangelist_add,
epoll_nochangelist_del,
epoll_dispatch,
epoll_dealloc,
1, /* need reinit */
EV_FEATURE_ET|EV_FEATURE_O1,
0
};
#define INITIAL_NEVENT 32
#define MAX_NEVENT 4096
/* On Linux kernels at least up to 2.6.24.4, epoll can't handle timeout
* values bigger than (LONG_MAX - 999ULL)/HZ. HZ in the wild can be
* as big as 1000, and LONG_MAX can be as small as (1<<31)-1, so the
* largest number of msec we can support here is 2147482. Let's
* round that down by 47 seconds.
*/
#define MAX_EPOLL_TIMEOUT_MSEC (35*60*1000)
static void *
epoll_init(struct event_base *base)
{
int epfd;
struct epollop *epollop;
/* Initialize the kernel queue. (The size field is ignored since
* 2.6.8.) */
if ((epfd = epoll_create(32000)) == -1) {
if (errno != ENOSYS)
event_warn("epoll_create");
return (NULL);
}
evutil_make_socket_closeonexec(epfd);
if (!(epollop = mm_calloc(1, sizeof(struct epollop)))) {
close(epfd);
return (NULL);
}
epollop->epfd = epfd;
/* Initialize fields */
epollop->events = mm_calloc(INITIAL_NEVENT, sizeof(struct epoll_event));
if (epollop->events == NULL) {
mm_free(epollop);
close(epfd);
return (NULL);
}
epollop->nevents = INITIAL_NEVENT;
if ((base->flags & EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST) != 0 ||
((base->flags & EVENT_BASE_FLAG_IGNORE_ENV) == 0 &&
evutil_getenv("EVENT_EPOLL_USE_CHANGELIST") != NULL))
base->evsel = &epollops_changelist;
evsig_init(base);
return (epollop);
}
static const char *
change_to_string(int change)
{
change &= (EV_CHANGE_ADD|EV_CHANGE_DEL);
if (change == EV_CHANGE_ADD) {
return "add";
} else if (change == EV_CHANGE_DEL) {
return "del";
} else if (change == 0) {
return "none";
} else {
return "???";
}
}
static const char *
epoll_op_to_string(int op)
{
return op == EPOLL_CTL_ADD?"ADD":
op == EPOLL_CTL_DEL?"DEL":
op == EPOLL_CTL_MOD?"MOD":
"???";
}
static int
epoll_apply_one_change(struct event_base *base,
struct epollop *epollop,
const struct event_change *ch)
{
struct epoll_event epev;
int op, events = 0;
if (1) {
/* The logic here is a little tricky. If we had no events set
on the fd before, we need to set op="ADD" and set
events=the events we want to add. If we had any events set
on the fd before, and we want any events to remain on the
fd, we need to say op="MOD" and set events=the events we
want to remain. But if we want to delete the last event,
we say op="DEL" and set events=the remaining events. What
fun!
*/
/* TODO: Turn this into a switch or a table lookup. */
if ((ch->read_change & EV_CHANGE_ADD) ||
(ch->write_change & EV_CHANGE_ADD)) {
/* If we are adding anything at all, we'll want to do
* either an ADD or a MOD. */
events = 0;
op = EPOLL_CTL_ADD;
if (ch->read_change & EV_CHANGE_ADD) {
events |= EPOLLIN;
} else if (ch->read_change & EV_CHANGE_DEL) {
;
} else if (ch->old_events & EV_READ) {
events |= EPOLLIN;
}
if (ch->write_change & EV_CHANGE_ADD) {
events |= EPOLLOUT;
} else if (ch->write_change & EV_CHANGE_DEL) {
;
} else if (ch->old_events & EV_WRITE) {
events |= EPOLLOUT;
}
if ((ch->read_change|ch->write_change) & EV_ET)
events |= EPOLLET;
if (ch->old_events) {
/* If MOD fails, we retry as an ADD, and if
* ADD fails we will retry as a MOD. So the
* only hard part here is to guess which one
* will work. As a heuristic, we'll try
* MOD first if we think there were old
* events and ADD if we think there were none.
*
* We can be wrong about the MOD if the file
* has in fact been closed and re-opened.
*
* We can be wrong about the ADD if the
* the fd has been re-created with a dup()
* of the same file that it was before.
*/
op = EPOLL_CTL_MOD;
}
} else if ((ch->read_change & EV_CHANGE_DEL) ||
(ch->write_change & EV_CHANGE_DEL)) {
/* If we're deleting anything, we'll want to do a MOD
* or a DEL. */
op = EPOLL_CTL_DEL;
if (ch->read_change & EV_CHANGE_DEL) {
if (ch->write_change & EV_CHANGE_DEL) {
events = EPOLLIN|EPOLLOUT;
} else if (ch->old_events & EV_WRITE) {
events = EPOLLOUT;
op = EPOLL_CTL_MOD;
} else {
events = EPOLLIN;
}
} else if (ch->write_change & EV_CHANGE_DEL) {
if (ch->old_events & EV_READ) {
events = EPOLLIN;
op = EPOLL_CTL_MOD;
} else {
events = EPOLLOUT;
}
}
}
if (!events)
return 0;
memset(&epev, 0, sizeof(epev));
epev.data.fd = ch->fd;
epev.events = events;
if (epoll_ctl(epollop->epfd, op, ch->fd, &epev) == -1) {
if (op == EPOLL_CTL_MOD && errno == ENOENT) {
/* If a MOD operation fails with ENOENT, the
* fd was probably closed and re-opened. We
* should retry the operation as an ADD.
*/
if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, ch->fd, &epev) == -1) {
event_warn("Epoll MOD(%d) on %d retried as ADD; that failed too",
(int)epev.events, ch->fd);
return -1;
} else {
event_debug(("Epoll MOD(%d) on %d retried as ADD; succeeded.",
(int)epev.events,
ch->fd));
}
} else if (op == EPOLL_CTL_ADD && errno == EEXIST) {
/* If an ADD operation fails with EEXIST,
* either the operation was redundant (as with a
* precautionary add), or we ran into a fun
* kernel bug where using dup*() to duplicate the
* same file into the same fd gives you the same epitem
* rather than a fresh one. For the second case,
* we must retry with MOD. */
if (epoll_ctl(epollop->epfd, EPOLL_CTL_MOD, ch->fd, &epev) == -1) {
event_warn("Epoll ADD(%d) on %d retried as MOD; that failed too",
(int)epev.events, ch->fd);
return -1;
} else {
event_debug(("Epoll ADD(%d) on %d retried as MOD; succeeded.",
(int)epev.events,
ch->fd));
}
} else if (op == EPOLL_CTL_DEL &&
(errno == ENOENT || errno == EBADF ||
errno == EPERM)) {
/* If a delete fails with one of these errors,
* that's fine too: we closed the fd before we
* got around to calling epoll_dispatch. */
event_debug(("Epoll DEL(%d) on fd %d gave %s: DEL was unnecessary.",
(int)epev.events,
ch->fd,
strerror(errno)));
} else {
event_warn("Epoll %s(%d) on fd %d failed. Old events were %d; read change was %d (%s); write change was %d (%s)",
epoll_op_to_string(op),
(int)epev.events,
ch->fd,
ch->old_events,
ch->read_change,
change_to_string(ch->read_change),
ch->write_change,
change_to_string(ch->write_change));
return -1;
}
} else {
event_debug(("Epoll %s(%d) on fd %d okay. [old events were %d; read change was %d; write change was %d]",
epoll_op_to_string(op),
(int)epev.events,
(int)ch->fd,
ch->old_events,
ch->read_change,
ch->write_change));
}
}
return 0;
}
static int
epoll_apply_changes(struct event_base *base)
{
struct event_changelist *changelist = &base->changelist;
struct epollop *epollop = base->evbase;
struct event_change *ch;
int r = 0;
int i;
for (i = 0; i < changelist->n_changes; ++i) {
ch = &changelist->changes[i];
if (epoll_apply_one_change(base, epollop, ch) < 0)
r = -1;
}
return (r);
}
static int
epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd,
short old, short events, void *p)
{
struct event_change ch;
ch.fd = fd;
ch.old_events = old;
ch.read_change = ch.write_change = 0;
if (events & EV_WRITE)
ch.write_change = EV_CHANGE_ADD |
(events & EV_ET);
if (events & EV_READ)
ch.read_change = EV_CHANGE_ADD |
(events & EV_ET);
return epoll_apply_one_change(base, base->evbase, &ch);
}
static int
epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd,
short old, short events, void *p)
{
struct event_change ch;
ch.fd = fd;
ch.old_events = old;
ch.read_change = ch.write_change = 0;
if (events & EV_WRITE)
ch.write_change = EV_CHANGE_DEL;
if (events & EV_READ)
ch.read_change = EV_CHANGE_DEL;
return epoll_apply_one_change(base, base->evbase, &ch);
}
static int
epoll_dispatch(struct event_base *base, struct timeval *tv)
{
struct epollop *epollop = base->evbase;
struct epoll_event *events = epollop->events;
int i, res;
long timeout = -1;
if (tv != NULL) {
timeout = evutil_tv_to_msec(tv);
if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) {
/* Linux kernels can wait forever if the timeout is
* too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */
timeout = MAX_EPOLL_TIMEOUT_MSEC;
}
}
epoll_apply_changes(base);
event_changelist_remove_all(&base->changelist, base);
EVBASE_RELEASE_LOCK(base, th_base_lock);
res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
if (res == -1) {
if (errno != EINTR) {
event_warn("epoll_wait");
return (-1);
}
return (0);
}
event_debug(("%s: epoll_wait reports %d", __func__, res));
EVUTIL_ASSERT(res <= epollop->nevents);
for (i = 0; i < res; i++) {
int what = events[i].events;
short ev = 0;
if (what & (EPOLLHUP|EPOLLERR)) {
ev = EV_READ | EV_WRITE;
} else {
if (what & EPOLLIN)
ev |= EV_READ;
if (what & EPOLLOUT)
ev |= EV_WRITE;
}
if (!ev)
continue;
evmap_io_active(base, events[i].data.fd, ev | EV_ET);
}
if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) {
/* We used all of the event space this time. We should
be ready for more events next time. */
int new_nevents = epollop->nevents * 2;
struct epoll_event *new_events;
new_events = mm_realloc(epollop->events,
new_nevents * sizeof(struct epoll_event));
if (new_events) {
epollop->events = new_events;
epollop->nevents = new_nevents;
}
}
return (0);
}
static void
epoll_dealloc(struct event_base *base)
{
struct epollop *epollop = base->evbase;
evsig_dealloc(base);
if (epollop->events)
mm_free(epollop->events);
if (epollop->epfd >= 0)
close(epollop->epfd);
memset(epollop, 0, sizeof(struct epollop));
mm_free(epollop);
}

Просмотреть файл

@ -1,65 +0,0 @@
/*
* Copyright 2003-2009 Niels Provos <provos@citi.umich.edu>
* Copyright 2009-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdint.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include <sys/epoll.h>
#include <unistd.h>
#include <errno.h>
int
epoll_create(int size)
{
#if !defined(__NR_epoll_create) && defined(__NR_epoll_create1)
if (size <= 0) {
errno = EINVAL;
return -1;
}
return (syscall(__NR_epoll_create1, 0));
#else
return (syscall(__NR_epoll_create, size));
#endif
}
int
epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
{
return (syscall(__NR_epoll_ctl, epfd, op, fd, event));
}
int
epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
{
#if !defined(__NR_epoll_wait) && defined(__NR_epoll_pwait)
return (syscall(__NR_epoll_pwait, epfd, events, maxevents, timeout, NULL, 0));
#else
return (syscall(__NR_epoll_wait, epfd, events, maxevents, timeout));
#endif
}

Просмотреть файл

@ -1,293 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVBUFFER_INTERNAL_H_
#define _EVBUFFER_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "event2/event-config.h"
#include "event2/util.h"
#include "util-internal.h"
#include "defer-internal.h"
/* Experimental cb flag: "never deferred." Implementation note:
* these callbacks may get an inaccurate view of n_del/n_added in their
* arguments. */
#define EVBUFFER_CB_NODEFER 2
#ifdef WIN32
#include <winsock2.h>
#endif
#include <sys/queue.h>
/* Minimum allocation for a chain. We define this so that we're burning no
* more than 5% of each allocation on overhead. It would be nice to lose even
* less space, though. */
#if _EVENT_SIZEOF_VOID_P < 8
#define MIN_BUFFER_SIZE 512
#else
#define MIN_BUFFER_SIZE 1024
#endif
/** A single evbuffer callback for an evbuffer. This function will be invoked
* when bytes are added to or removed from the evbuffer. */
struct evbuffer_cb_entry {
/** Structures to implement a doubly-linked queue of callbacks */
TAILQ_ENTRY(evbuffer_cb_entry) next;
/** The callback function to invoke when this callback is called.
If EVBUFFER_CB_OBSOLETE is set in flags, the cb_obsolete field is
valid; otherwise, cb_func is valid. */
union {
evbuffer_cb_func cb_func;
evbuffer_cb cb_obsolete;
} cb;
/** Argument to pass to cb. */
void *cbarg;
/** Currently set flags on this callback. */
ev_uint32_t flags;
};
struct bufferevent;
struct evbuffer_chain;
struct evbuffer {
/** The first chain in this buffer's linked list of chains. */
struct evbuffer_chain *first;
/** The last chain in this buffer's linked list of chains. */
struct evbuffer_chain *last;
/** Pointer to the next pointer pointing at the 'last_with_data' chain.
*
* To unpack:
*
* The last_with_data chain is the last chain that has any data in it.
* If all chains in the buffer are empty, it is the first chain.
* If the buffer has no chains, it is NULL.
*
* The last_with_datap pointer points at _whatever 'next' pointer_
* points at the last_with_datap chain. If the last_with_data chain
* is the first chain, or it is NULL, then the last_with_datap pointer
* is &buf->first.
*/
struct evbuffer_chain **last_with_datap;
/** Total amount of bytes stored in all chains.*/
size_t total_len;
/** Number of bytes we have added to the buffer since we last tried to
* invoke callbacks. */
size_t n_add_for_cb;
/** Number of bytes we have removed from the buffer since we last
* tried to invoke callbacks. */
size_t n_del_for_cb;
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
/** A lock used to mediate access to this buffer. */
void *lock;
#endif
/** True iff we should free the lock field when we free this
* evbuffer. */
unsigned own_lock : 1;
/** True iff we should not allow changes to the front of the buffer
* (drains or prepends). */
unsigned freeze_start : 1;
/** True iff we should not allow changes to the end of the buffer
* (appends) */
unsigned freeze_end : 1;
/** True iff this evbuffer's callbacks are not invoked immediately
* upon a change in the buffer, but instead are deferred to be invoked
* from the event_base's loop. Useful for preventing enormous stack
* overflows when we have mutually recursive callbacks, and for
* serializing callbacks in a single thread. */
unsigned deferred_cbs : 1;
#ifdef WIN32
/** True iff this buffer is set up for overlapped IO. */
unsigned is_overlapped : 1;
#endif
/** Zero or more EVBUFFER_FLAG_* bits */
ev_uint32_t flags;
/** Used to implement deferred callbacks. */
struct deferred_cb_queue *cb_queue;
/** A reference count on this evbuffer. When the reference count
* reaches 0, the buffer is destroyed. Manipulated with
* evbuffer_incref and evbuffer_decref_and_unlock and
* evbuffer_free. */
int refcnt;
/** A deferred_cb handle to make all of this buffer's callbacks
* invoked from the event loop. */
struct deferred_cb deferred;
/** A doubly-linked-list of callback functions */
TAILQ_HEAD(evbuffer_cb_queue, evbuffer_cb_entry) callbacks;
/** The parent bufferevent object this evbuffer belongs to.
* NULL if the evbuffer stands alone. */
struct bufferevent *parent;
};
#if _EVENT_SIZEOF_OFF_T < _EVENT_SIZEOF_SIZE_T
typedef ev_ssize_t ev_misalign_t;
#define EVBUFFER_CHAIN_MAX ((size_t)EV_SSIZE_MAX)
#else
typedef ev_off_t ev_misalign_t;
#if _EVENT_SIZEOF_OFF_T > _EVENT_SIZEOF_SIZE_T
#define EVBUFFER_CHAIN_MAX EV_SIZE_MAX
#else
#define EVBUFFER_CHAIN_MAX ((size_t)EV_SSIZE_MAX)
#endif
#endif
/** A single item in an evbuffer. */
struct evbuffer_chain {
/** points to next buffer in the chain */
struct evbuffer_chain *next;
/** total allocation available in the buffer field. */
size_t buffer_len;
/** unused space at the beginning of buffer or an offset into a
* file for sendfile buffers. */
ev_misalign_t misalign;
/** Offset into buffer + misalign at which to start writing.
* In other words, the total number of bytes actually stored
* in buffer. */
size_t off;
/** Set if special handling is required for this chain */
unsigned flags;
#define EVBUFFER_MMAP 0x0001 /**< memory in buffer is mmaped */
#define EVBUFFER_SENDFILE 0x0002 /**< a chain used for sendfile */
#define EVBUFFER_REFERENCE 0x0004 /**< a chain with a mem reference */
#define EVBUFFER_IMMUTABLE 0x0008 /**< read-only chain */
/** a chain that mustn't be reallocated or freed, or have its contents
* memmoved, until the chain is un-pinned. */
#define EVBUFFER_MEM_PINNED_R 0x0010
#define EVBUFFER_MEM_PINNED_W 0x0020
#define EVBUFFER_MEM_PINNED_ANY (EVBUFFER_MEM_PINNED_R|EVBUFFER_MEM_PINNED_W)
/** a chain that should be freed, but can't be freed until it is
* un-pinned. */
#define EVBUFFER_DANGLING 0x0040
/** Usually points to the read-write memory belonging to this
* buffer allocated as part of the evbuffer_chain allocation.
* For mmap, this can be a read-only buffer and
* EVBUFFER_IMMUTABLE will be set in flags. For sendfile, it
* may point to NULL.
*/
unsigned char *buffer;
};
/* this is currently used by both mmap and sendfile */
/* TODO(niels): something strange needs to happen for Windows here, I am not
* sure what that is, but it needs to get looked into.
*/
struct evbuffer_chain_fd {
int fd; /**< the fd associated with this chain */
};
/** callback for a reference buffer; lets us know what to do with it when
* we're done with it. */
struct evbuffer_chain_reference {
evbuffer_ref_cleanup_cb cleanupfn;
void *extra;
};
#define EVBUFFER_CHAIN_SIZE sizeof(struct evbuffer_chain)
/** Return a pointer to extra data allocated along with an evbuffer. */
#define EVBUFFER_CHAIN_EXTRA(t, c) (t *)((struct evbuffer_chain *)(c) + 1)
/** Assert that we are holding the lock on an evbuffer */
#define ASSERT_EVBUFFER_LOCKED(buffer) \
EVLOCK_ASSERT_LOCKED((buffer)->lock)
#define EVBUFFER_LOCK(buffer) \
do { \
EVLOCK_LOCK((buffer)->lock, 0); \
} while (0)
#define EVBUFFER_UNLOCK(buffer) \
do { \
EVLOCK_UNLOCK((buffer)->lock, 0); \
} while (0)
#define EVBUFFER_LOCK2(buffer1, buffer2) \
do { \
EVLOCK_LOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \
} while (0)
#define EVBUFFER_UNLOCK2(buffer1, buffer2) \
do { \
EVLOCK_UNLOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \
} while (0)
/** Increase the reference count of buf by one. */
void _evbuffer_incref(struct evbuffer *buf);
/** Increase the reference count of buf by one and acquire the lock. */
void _evbuffer_incref_and_lock(struct evbuffer *buf);
/** Pin a single buffer chain using a given flag. A pinned chunk may not be
* moved or freed until it is unpinned. */
void _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag);
/** Unpin a single buffer chain using a given flag. */
void _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag);
/** As evbuffer_free, but requires that we hold a lock on the buffer, and
* releases the lock before freeing it and the buffer. */
void _evbuffer_decref_and_unlock(struct evbuffer *buffer);
/** As evbuffer_expand, but does not guarantee that the newly allocated memory
* is contiguous. Instead, it may be split across two or more chunks. */
int _evbuffer_expand_fast(struct evbuffer *, size_t, int);
/** Helper: prepares for a readv/WSARecv call by expanding the buffer to
* hold enough memory to read 'howmuch' bytes in possibly noncontiguous memory.
* Sets up the one or two iovecs in 'vecs' to point to the free memory and its
* extent, and *chainp to point to the first chain that we'll try to read into.
* Returns the number of vecs used.
*/
int _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
struct evbuffer_iovec *vecs, int n_vecs, struct evbuffer_chain ***chainp,
int exact);
/* Helper macro: copies an evbuffer_iovec in ei to a win32 WSABUF in i. */
#define WSABUF_FROM_EVBUFFER_IOV(i,ei) do { \
(i)->buf = (ei)->iov_base; \
(i)->len = (unsigned long)(ei)->iov_len; \
} while (0)
/* XXXX the cast above is safe for now, but not if we allow mmaps on win64.
* See note in buffer_iocp's launch_write function */
/** Set the parent bufferevent object for buf to bev */
void evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev);
void evbuffer_invoke_callbacks(struct evbuffer *buf);
#ifdef __cplusplus
}
#endif
#endif /* _EVBUFFER_INTERNAL_H_ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,45 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVDNS_H_
#define _EVDNS_H_
/** @file evdns.h
A dns subsystem for Libevent.
The <evdns.h> header is deprecated in Libevent 2.0 and later; please
use <event2/evdns.h> instead. Depending on what functionality you
need, you may also want to include more of the other <event2/...>
headers.
*/
#include <event.h>
#include <event2/dns.h>
#include <event2/dns_compat.h>
#include <event2/dns_struct.h>
#endif /* _EVDNS_H_ */

Просмотреть файл

@ -1,368 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT_INTERNAL_H_
#define _EVENT_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "event2/event-config.h"
#include <time.h>
#include <sys/queue.h>
#include "event2/event_struct.h"
#include "minheap-internal.h"
#include "evsignal-internal.h"
#include "mm-internal.h"
#include "defer-internal.h"
/* map union members back */
/* mutually exclusive */
#define ev_signal_next _ev.ev_signal.ev_signal_next
#define ev_io_next _ev.ev_io.ev_io_next
#define ev_io_timeout _ev.ev_io.ev_timeout
/* used only by signals */
#define ev_ncalls _ev.ev_signal.ev_ncalls
#define ev_pncalls _ev.ev_signal.ev_pncalls
/* Possible values for ev_closure in struct event. */
#define EV_CLOSURE_NONE 0
#define EV_CLOSURE_SIGNAL 1
#define EV_CLOSURE_PERSIST 2
/** Structure to define the backend of a given event_base. */
struct eventop {
/** The name of this backend. */
const char *name;
/** Function to set up an event_base to use this backend. It should
* create a new structure holding whatever information is needed to
* run the backend, and return it. The returned pointer will get
* stored by event_init into the event_base.evbase field. On failure,
* this function should return NULL. */
void *(*init)(struct event_base *);
/** Enable reading/writing on a given fd or signal. 'events' will be
* the events that we're trying to enable: one or more of EV_READ,
* EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that
* were enabled on this fd previously. 'fdinfo' will be a structure
* associated with the fd by the evmap; its size is defined by the
* fdinfo field below. It will be set to 0 the first time the fd is
* added. The function should return 0 on success and -1 on error.
*/
int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
/** As "add", except 'events' contains the events we mean to disable. */
int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
/** Function to implement the core of an event loop. It must see which
added events are ready, and cause event_active to be called for each
active event (usually via event_io_active or such). It should
return 0 on success and -1 on error.
*/
int (*dispatch)(struct event_base *, struct timeval *);
/** Function to clean up and free our data from the event_base. */
void (*dealloc)(struct event_base *);
/** Flag: set if we need to reinitialize the event base after we fork.
*/
int need_reinit;
/** Bit-array of supported event_method_features that this backend can
* provide. */
enum event_method_feature features;
/** Length of the extra information we should record for each fd that
has one or more active events. This information is recorded
as part of the evmap entry for each fd, and passed as an argument
to the add and del functions above.
*/
size_t fdinfo_len;
};
#ifdef WIN32
/* If we're on win32, then file descriptors are not nice low densely packed
integers. Instead, they are pointer-like windows handles, and we want to
use a hashtable instead of an array to map fds to events.
*/
#define EVMAP_USE_HT
#endif
/* #define HT_CACHE_HASH_VALS */
#ifdef EVMAP_USE_HT
#include "ht-internal.h"
struct event_map_entry;
HT_HEAD(event_io_map, event_map_entry);
#else
#define event_io_map event_signal_map
#endif
/* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not
defined, this structure is also used as event_io_map, which maps fds to a
list of events.
*/
struct event_signal_map {
/* An array of evmap_io * or of evmap_signal *; empty entries are
* set to NULL. */
void **entries;
/* The number of entries available in entries */
int nentries;
};
/* A list of events waiting on a given 'common' timeout value. Ordinarily,
* events waiting for a timeout wait on a minheap. Sometimes, however, a
* queue can be faster.
**/
struct common_timeout_list {
/* List of events currently waiting in the queue. */
struct event_list events;
/* 'magic' timeval used to indicate the duration of events in this
* queue. */
struct timeval duration;
/* Event that triggers whenever one of the events in the queue is
* ready to activate */
struct event timeout_event;
/* The event_base that this timeout list is part of */
struct event_base *base;
};
/** Mask used to get the real tv_usec value from a common timeout. */
#define COMMON_TIMEOUT_MICROSECONDS_MASK 0x000fffff
struct event_change;
/* List of 'changes' since the last call to eventop.dispatch. Only maintained
* if the backend is using changesets. */
struct event_changelist {
struct event_change *changes;
int n_changes;
int changes_size;
};
#ifndef _EVENT_DISABLE_DEBUG_MODE
/* Global internal flag: set to one if debug mode is on. */
extern int ompi__event_debug_mode_on;
#define EVENT_DEBUG_MODE_IS_ON() (ompi__event_debug_mode_on)
#else
#define EVENT_DEBUG_MODE_IS_ON() (0)
#endif
struct event_base {
/** Function pointers and other data to describe this event_base's
* backend. */
const struct eventop *evsel;
/** Pointer to backend-specific data. */
void *evbase;
/** List of changes to tell backend about at next dispatch. Only used
* by the O(1) backends. */
struct event_changelist changelist;
/** Function pointers used to describe the backend that this event_base
* uses for signals */
const struct eventop *evsigsel;
/** Data to implement the common signal handelr code. */
struct evsig_info sig;
/** Number of virtual events */
int virtual_event_count;
/** Number of total events added to this event_base */
int event_count;
/** Number of total events active in this event_base */
int event_count_active;
/** Set if we should terminate the loop once we're done processing
* events. */
int event_gotterm;
/** Set if we should terminate the loop immediately */
int event_break;
/** Set if we should start a new instance of the loop immediately. */
int event_continue;
/** The currently running priority of events */
int event_running_priority;
/** Set if we're running the event_base_loop function, to prevent
* reentrant invocation. */
int running_loop;
/* Active event management. */
/** An array of nactivequeues queues for active events (ones that
* have triggered, and whose callbacks need to be called). Low
* priority numbers are more important, and stall higher ones.
*/
struct event_list *activequeues;
/** The length of the activequeues array */
int nactivequeues;
/* common timeout logic */
/** An array of common_timeout_list* for all of the common timeout
* values we know. */
struct common_timeout_list **common_timeout_queues;
/** The number of entries used in common_timeout_queues */
int n_common_timeouts;
/** The total size of common_timeout_queues. */
int n_common_timeouts_allocated;
/** List of defered_cb that are active. We run these after the active
* events. */
struct deferred_cb_queue defer_queue;
/** Mapping from file descriptors to enabled (added) events */
struct event_io_map io;
/** Mapping from signal numbers to enabled (added) events. */
struct event_signal_map sigmap;
/** All events that have been enabled (added) in this event_base */
struct event_list eventqueue;
/** Stored timeval; used to detect when time is running backwards. */
struct timeval event_tv;
/** Priority queue of events with timeouts. */
struct min_heap timeheap;
/** Stored timeval: used to avoid calling gettimeofday/clock_gettime
* too often. */
struct timeval tv_cache;
#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
/** Difference between internal time (maybe from clock_gettime) and
* gettimeofday. */
struct timeval tv_clock_diff;
/** Second in which we last updated tv_clock_diff, in monotonic time. */
time_t last_updated_clock_diff;
#endif
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
/* threading support */
/** The thread currently running the event_loop for this base */
unsigned long th_owner_id;
/** A lock to prevent conflicting accesses to this event_base */
void *th_base_lock;
/** The event whose callback is executing right now */
struct event *current_event;
/** A condition that gets signalled when we're done processing an
* event with waiters on it. */
void *current_event_cond;
/** Number of threads blocking on current_event_cond. */
int current_event_waiters;
#endif
#ifdef WIN32
/** IOCP support structure, if IOCP is enabled. */
struct event_iocp_port *iocp;
#endif
/** Flags that this base was configured with */
enum event_base_config_flag flags;
/* Notify main thread to wake up break, etc. */
/** True if the base already has a pending notify, and we don't need
* to add any more. */
int is_notify_pending;
/** A socketpair used by some th_notify functions to wake up the main
* thread. */
evutil_socket_t th_notify_fd[2];
/** An event used by some th_notify functions to wake up the main
* thread. */
struct event th_notify;
/** A function used to wake up the main thread from another thread. */
int (*th_notify_fn)(struct event_base *base);
};
struct event_config_entry {
TAILQ_ENTRY(event_config_entry) next;
const char *avoid_method;
};
/** Internal structure: describes the configuration we want for an event_base
* that we're about to allocate. */
struct event_config {
TAILQ_HEAD(event_configq, event_config_entry) entries;
int n_cpus_hint;
enum event_method_feature require_features;
enum event_base_config_flag flags;
};
/* Internal use only: Functions that might be missing from <sys/queue.h> */
#if defined(_EVENT_HAVE_SYS_QUEUE_H) && !defined(_EVENT_HAVE_TAILQFOREACH)
#ifndef TAILQ_FIRST
#define TAILQ_FIRST(head) ((head)->tqh_first)
#endif
#ifndef TAILQ_END
#define TAILQ_END(head) NULL
#endif
#ifndef TAILQ_NEXT
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#endif
#ifndef TAILQ_FOREACH
#define TAILQ_FOREACH(var, head, field) \
for ((var) = TAILQ_FIRST(head); \
(var) != TAILQ_END(head); \
(var) = TAILQ_NEXT(var, field))
#endif
#ifndef TAILQ_INSERT_BEFORE
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (0)
#endif
#endif /* TAILQ_FOREACH */
#define N_ACTIVE_CALLBACKS(base) \
((base)->event_count_active + (base)->defer_queue.active_count)
int _evsig_set_handler(struct event_base *base, int evsignal,
void (*fn)(int));
int _evsig_restore_handler(struct event_base *base, int evsignal);
void event_active_nolock(struct event *ev, int res, short count);
/* FIXME document. */
void event_base_add_virtual(struct event_base *base);
void event_base_del_virtual(struct event_base *base);
/** For debugging: unless assertions are disabled, verify the referential
integrity of the internal data structures of 'base'. This operation can
be expensive.
Returns on success; aborts on failure.
*/
void event_base_assert_ok(struct event_base *base);
#ifdef __cplusplus
}
#endif
#endif /* _EVENT_INTERNAL_H_ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,85 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT_H_
#define _EVENT_H_
/** @file event.h
A library for writing event-driven network servers.
The <event.h> header is deprecated in Libevent 2.0 and later; please
use <event2/event.h> instead. Depending on what functionality you
need, you may also want to include more of the other event2/
headers.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef _EVENT_HAVE_STDINT_H
#include <stdint.h>
#endif
#include <stdarg.h>
/* For int types. */
#include <evutil.h>
#ifdef WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <winsock2.h>
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
typedef unsigned char u_char;
typedef unsigned short u_short;
#endif
#include <event2/event_struct.h>
#include <event2/event.h>
#include <event2/event_compat.h>
#include <event2/buffer.h>
#include <event2/buffer_compat.h>
#include <event2/bufferevent.h>
#include <event2/bufferevent_struct.h>
#include <event2/bufferevent_compat.h>
#include <event2/tag.h>
#include <event2/tag_compat.h>
#ifdef __cplusplus
}
#endif
#endif /* _EVENT_H_ */

Просмотреть файл

@ -1,293 +0,0 @@
/*
* Copyright (c) 2009-2012 Niels Provos, Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _WIN32_WINNT
/* Minimum required for InitializeCriticalSectionAndSpinCount */
#define _WIN32_WINNT 0x0403
#endif
#include <winsock2.h>
#include <windows.h>
#include <process.h>
#include <stdio.h>
#include <mswsock.h>
#include "event2/util.h"
#include "util-internal.h"
#include "iocp-internal.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "event-internal.h"
#include "evthread-internal.h"
#define NOTIFICATION_KEY ((ULONG_PTR)-1)
void
event_overlapped_init(struct event_overlapped *o, iocp_callback cb)
{
memset(o, 0, sizeof(struct event_overlapped));
o->cb = cb;
}
static void
handle_entry(OVERLAPPED *o, ULONG_PTR completion_key, DWORD nBytes, int ok)
{
struct event_overlapped *eo =
EVUTIL_UPCAST(o, struct event_overlapped, overlapped);
eo->cb(eo, completion_key, nBytes, ok);
}
static void
loop(void *_port)
{
struct event_iocp_port *port = _port;
long ms = port->ms;
HANDLE p = port->port;
if (ms <= 0)
ms = INFINITE;
while (1) {
OVERLAPPED *overlapped=NULL;
ULONG_PTR key=0;
DWORD bytes=0;
int ok = GetQueuedCompletionStatus(p, &bytes, &key,
&overlapped, ms);
EnterCriticalSection(&port->lock);
if (port->shutdown) {
if (--port->n_live_threads == 0)
ReleaseSemaphore(port->shutdownSemaphore, 1,
NULL);
LeaveCriticalSection(&port->lock);
return;
}
LeaveCriticalSection(&port->lock);
if (key != NOTIFICATION_KEY && overlapped)
handle_entry(overlapped, key, bytes, ok);
else if (!overlapped)
break;
}
event_warnx("GetQueuedCompletionStatus exited with no event.");
EnterCriticalSection(&port->lock);
if (--port->n_live_threads == 0)
ReleaseSemaphore(port->shutdownSemaphore, 1, NULL);
LeaveCriticalSection(&port->lock);
}
int
event_iocp_port_associate(struct event_iocp_port *port, evutil_socket_t fd,
ev_uintptr_t key)
{
HANDLE h;
h = CreateIoCompletionPort((HANDLE)fd, port->port, key, port->n_threads);
if (!h)
return -1;
return 0;
}
static void *
get_extension_function(SOCKET s, const GUID *which_fn)
{
void *ptr = NULL;
DWORD bytes=0;
WSAIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER,
(GUID*)which_fn, sizeof(*which_fn),
&ptr, sizeof(ptr),
&bytes, NULL, NULL);
/* No need to detect errors here: if ptr is set, then we have a good
function pointer. Otherwise, we should behave as if we had no
function pointer.
*/
return ptr;
}
/* Mingw doesn't have these in its mswsock.h. The values are copied from
wine.h. Perhaps if we copy them exactly, the cargo will come again.
*/
#ifndef WSAID_ACCEPTEX
#define WSAID_ACCEPTEX \
{0xb5367df1,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}}
#endif
#ifndef WSAID_CONNECTEX
#define WSAID_CONNECTEX \
{0x25a207b9,0xddf3,0x4660,{0x8e,0xe9,0x76,0xe5,0x8c,0x74,0x06,0x3e}}
#endif
#ifndef WSAID_GETACCEPTEXSOCKADDRS
#define WSAID_GETACCEPTEXSOCKADDRS \
{0xb5367df2,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}}
#endif
static int extension_fns_initialized = 0;
static void
init_extension_functions(struct win32_extension_fns *ext)
{
const GUID acceptex = WSAID_ACCEPTEX;
const GUID connectex = WSAID_CONNECTEX;
const GUID getacceptexsockaddrs = WSAID_GETACCEPTEXSOCKADDRS;
SOCKET s = socket(AF_INET, SOCK_STREAM, 0);
if (s == INVALID_SOCKET)
return;
ext->AcceptEx = get_extension_function(s, &acceptex);
ext->ConnectEx = get_extension_function(s, &connectex);
ext->GetAcceptExSockaddrs = get_extension_function(s,
&getacceptexsockaddrs);
closesocket(s);
extension_fns_initialized = 1;
}
static struct win32_extension_fns the_extension_fns;
const struct win32_extension_fns *
event_get_win32_extension_fns(void)
{
return &the_extension_fns;
}
#define N_CPUS_DEFAULT 2
struct event_iocp_port *
event_iocp_port_launch(int n_cpus)
{
struct event_iocp_port *port;
int i;
if (!extension_fns_initialized)
init_extension_functions(&the_extension_fns);
if (!(port = mm_calloc(1, sizeof(struct event_iocp_port))))
return NULL;
if (n_cpus <= 0)
n_cpus = N_CPUS_DEFAULT;
port->n_threads = n_cpus * 2;
port->threads = mm_calloc(port->n_threads, sizeof(HANDLE));
if (!port->threads)
goto err;
port->port = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0,
n_cpus);
port->ms = -1;
if (!port->port)
goto err;
port->shutdownSemaphore = CreateSemaphore(NULL, 0, 1, NULL);
if (!port->shutdownSemaphore)
goto err;
for (i=0; i<port->n_threads; ++i) {
ev_uintptr_t th = _beginthread(loop, 0, port);
if (th == (ev_uintptr_t)-1)
goto err;
port->threads[i] = (HANDLE)th;
++port->n_live_threads;
}
InitializeCriticalSectionAndSpinCount(&port->lock, 1000);
return port;
err:
if (port->port)
CloseHandle(port->port);
if (port->threads)
mm_free(port->threads);
if (port->shutdownSemaphore)
CloseHandle(port->shutdownSemaphore);
mm_free(port);
return NULL;
}
static void
_event_iocp_port_unlock_and_free(struct event_iocp_port *port)
{
DeleteCriticalSection(&port->lock);
CloseHandle(port->port);
CloseHandle(port->shutdownSemaphore);
mm_free(port->threads);
mm_free(port);
}
static int
event_iocp_notify_all(struct event_iocp_port *port)
{
int i, r, ok=1;
for (i=0; i<port->n_threads; ++i) {
r = PostQueuedCompletionStatus(port->port, 0, NOTIFICATION_KEY,
NULL);
if (!r)
ok = 0;
}
return ok ? 0 : -1;
}
int
event_iocp_shutdown(struct event_iocp_port *port, long waitMsec)
{
DWORD ms = INFINITE;
int n;
EnterCriticalSection(&port->lock);
port->shutdown = 1;
LeaveCriticalSection(&port->lock);
event_iocp_notify_all(port);
if (waitMsec >= 0)
ms = waitMsec;
WaitForSingleObject(port->shutdownSemaphore, ms);
EnterCriticalSection(&port->lock);
n = port->n_live_threads;
LeaveCriticalSection(&port->lock);
if (n == 0) {
_event_iocp_port_unlock_and_free(port);
return 0;
} else {
return -1;
}
}
int
event_iocp_activate_overlapped(
struct event_iocp_port *port, struct event_overlapped *o,
ev_uintptr_t key, ev_uint32_t n)
{
BOOL r;
r = PostQueuedCompletionStatus(port->port, n, key, &o->overlapped);
return (r==0) ? -1 : 0;
}
struct event_iocp_port *
event_base_get_iocp(struct event_base *base)
{
#ifdef WIN32
return base->iocp;
#else
return NULL;
#endif
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,596 +0,0 @@
/*
* Copyright (c) 2003-2009 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <winsock2.h>
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#else
#include <sys/ioctl.h>
#endif
#include <sys/queue.h>
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef WIN32
#include <syslog.h>
#endif
#ifdef _EVENT_HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <limits.h>
#include "event2/event.h"
#include "event2/tag.h"
#include "event2/buffer.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "util-internal.h"
/*
Here's our wire format:
Stream = TaggedData*
TaggedData = Tag Length Data
where the integer value of 'Length' is the length of 'data'.
Tag = HByte* LByte
where HByte is a byte with the high bit set, and LByte is a byte
with the high bit clear. The integer value of the tag is taken
by concatenating the lower 7 bits from all the tags. So for example,
the tag 0x66 is encoded as [66], whereas the tag 0x166 is encoded as
[82 66]
Length = Integer
Integer = NNibbles Nibble* Padding?
where NNibbles is a 4-bit value encoding the number of nibbles-1,
and each Nibble is 4 bits worth of encoded integer, in big-endian
order. If the total encoded integer size is an odd number of nibbles,
a final padding nibble with value 0 is appended.
*/
int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf);
int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag);
int evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf);
void
evtag_init(void)
{
}
/*
* We encode integers by nibbles; the first nibble contains the number
* of significant nibbles - 1; this allows us to encode up to 64-bit
* integers. This function is byte-order independent.
*
* @param number a 32-bit unsigned integer to encode
* @param data a pointer to where the data should be written. Must
* have at least 5 bytes free.
* @return the number of bytes written into data.
*/
#define ENCODE_INT_INTERNAL(data, number) do { \
int off = 1, nibbles = 0; \
\
memset(data, 0, sizeof(number)+1); \
while (number) { \
if (off & 0x1) \
data[off/2] = (data[off/2] & 0xf0) | (number & 0x0f); \
else \
data[off/2] = (data[off/2] & 0x0f) | \
((number & 0x0f) << 4); \
number >>= 4; \
off++; \
} \
\
if (off > 2) \
nibbles = off - 2; \
\
/* Off - 1 is the number of encoded nibbles */ \
data[0] = (data[0] & 0x0f) | ((nibbles & 0x0f) << 4); \
\
return ((off + 1) / 2); \
} while (0)
static inline int
encode_int_internal(ev_uint8_t *data, ev_uint32_t number)
{
ENCODE_INT_INTERNAL(data, number);
}
static inline int
encode_int64_internal(ev_uint8_t *data, ev_uint64_t number)
{
ENCODE_INT_INTERNAL(data, number);
}
void
evtag_encode_int(struct evbuffer *evbuf, ev_uint32_t number)
{
ev_uint8_t data[5];
int len = encode_int_internal(data, number);
evbuffer_add(evbuf, data, len);
}
void
evtag_encode_int64(struct evbuffer *evbuf, ev_uint64_t number)
{
ev_uint8_t data[9];
int len = encode_int64_internal(data, number);
evbuffer_add(evbuf, data, len);
}
/*
* Support variable length encoding of tags; we use the high bit in each
* octet as a continuation signal.
*/
int
evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag)
{
int bytes = 0;
ev_uint8_t data[5];
memset(data, 0, sizeof(data));
do {
ev_uint8_t lower = tag & 0x7f;
tag >>= 7;
if (tag)
lower |= 0x80;
data[bytes++] = lower;
} while (tag);
if (evbuf != NULL)
evbuffer_add(evbuf, data, bytes);
return (bytes);
}
static int
decode_tag_internal(ev_uint32_t *ptag, struct evbuffer *evbuf, int dodrain)
{
ev_uint32_t number = 0;
size_t len = evbuffer_get_length(evbuf);
ev_uint8_t *data;
size_t count = 0;
int shift = 0, done = 0;
/*
* the encoding of a number is at most one byte more than its
* storage size. however, it may also be much smaller.
*/
data = evbuffer_pullup(
evbuf, len < sizeof(number) + 1 ? len : sizeof(number) + 1);
if (!data)
return (-1);
while (count++ < len) {
ev_uint8_t lower = *data++;
number |= (lower & 0x7f) << shift;
shift += 7;
if (!(lower & 0x80)) {
done = 1;
break;
}
}
if (!done)
return (-1);
if (dodrain)
evbuffer_drain(evbuf, count);
if (ptag != NULL)
*ptag = number;
return count > INT_MAX ? INT_MAX : (int)(count);
}
int
evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf)
{
return (decode_tag_internal(ptag, evbuf, 1 /* dodrain */));
}
/*
* Marshal a data type, the general format is as follows:
*
* tag number: one byte; length: var bytes; payload: var bytes
*/
void
evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag,
const void *data, ev_uint32_t len)
{
evtag_encode_tag(evbuf, tag);
evtag_encode_int(evbuf, len);
evbuffer_add(evbuf, (void *)data, len);
}
void
evtag_marshal_buffer(struct evbuffer *evbuf, ev_uint32_t tag,
struct evbuffer *data)
{
evtag_encode_tag(evbuf, tag);
/* XXX support more than UINT32_MAX data */
evtag_encode_int(evbuf, (ev_uint32_t)evbuffer_get_length(data));
evbuffer_add_buffer(evbuf, data);
}
/* Marshaling for integers */
void
evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag, ev_uint32_t integer)
{
ev_uint8_t data[5];
int len = encode_int_internal(data, integer);
evtag_encode_tag(evbuf, tag);
evtag_encode_int(evbuf, len);
evbuffer_add(evbuf, data, len);
}
void
evtag_marshal_int64(struct evbuffer *evbuf, ev_uint32_t tag,
ev_uint64_t integer)
{
ev_uint8_t data[9];
int len = encode_int64_internal(data, integer);
evtag_encode_tag(evbuf, tag);
evtag_encode_int(evbuf, len);
evbuffer_add(evbuf, data, len);
}
void
evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag, const char *string)
{
/* TODO support strings longer than UINT32_MAX ? */
evtag_marshal(buf, tag, string, (ev_uint32_t)strlen(string));
}
void
evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag, struct timeval *tv)
{
ev_uint8_t data[10];
int len = encode_int_internal(data, tv->tv_sec);
len += encode_int_internal(data + len, tv->tv_usec);
evtag_marshal(evbuf, tag, data, len);
}
#define DECODE_INT_INTERNAL(number, maxnibbles, pnumber, evbuf, offset) \
do { \
ev_uint8_t *data; \
ev_ssize_t len = evbuffer_get_length(evbuf) - offset; \
int nibbles = 0; \
\
if (len <= 0) \
return (-1); \
\
/* XXX(niels): faster? */ \
data = evbuffer_pullup(evbuf, offset + 1) + offset; \
if (!data) \
return (-1); \
\
nibbles = ((data[0] & 0xf0) >> 4) + 1; \
if (nibbles > maxnibbles || (nibbles >> 1) + 1 > len) \
return (-1); \
len = (nibbles >> 1) + 1; \
\
data = evbuffer_pullup(evbuf, offset + len) + offset; \
if (!data) \
return (-1); \
\
while (nibbles > 0) { \
number <<= 4; \
if (nibbles & 0x1) \
number |= data[nibbles >> 1] & 0x0f; \
else \
number |= (data[nibbles >> 1] & 0xf0) >> 4; \
nibbles--; \
} \
\
*pnumber = number; \
\
return (int)(len); \
} while (0)
/* Internal: decode an integer from an evbuffer, without draining it.
* Only integers up to 32-bits are supported.
*
* @param evbuf the buffer to read from
* @param offset an index into the buffer at which we should start reading.
* @param pnumber a pointer to receive the integer.
* @return The length of the number as encoded, or -1 on error.
*/
static int
decode_int_internal(ev_uint32_t *pnumber, struct evbuffer *evbuf, int offset)
{
ev_uint32_t number = 0;
DECODE_INT_INTERNAL(number, 8, pnumber, evbuf, offset);
}
static int
decode_int64_internal(ev_uint64_t *pnumber, struct evbuffer *evbuf, int offset)
{
ev_uint64_t number = 0;
DECODE_INT_INTERNAL(number, 16, pnumber, evbuf, offset);
}
int
evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf)
{
int res = decode_int_internal(pnumber, evbuf, 0);
if (res != -1)
evbuffer_drain(evbuf, res);
return (res == -1 ? -1 : 0);
}
int
evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf)
{
int res = decode_int64_internal(pnumber, evbuf, 0);
if (res != -1)
evbuffer_drain(evbuf, res);
return (res == -1 ? -1 : 0);
}
int
evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag)
{
return (decode_tag_internal(ptag, evbuf, 0 /* dodrain */));
}
int
evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength)
{
int res, len;
len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
if (len == -1)
return (-1);
res = decode_int_internal(plength, evbuf, len);
if (res == -1)
return (-1);
*plength += res + len;
return (0);
}
int
evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength)
{
int res, len;
len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
if (len == -1)
return (-1);
res = decode_int_internal(plength, evbuf, len);
if (res == -1)
return (-1);
return (0);
}
/* just unmarshals the header and returns the length of the remaining data */
int
evtag_unmarshal_header(struct evbuffer *evbuf, ev_uint32_t *ptag)
{
ev_uint32_t len;
if (decode_tag_internal(ptag, evbuf, 1 /* dodrain */) == -1)
return (-1);
if (evtag_decode_int(&len, evbuf) == -1)
return (-1);
if (evbuffer_get_length(evbuf) < len)
return (-1);
return (len);
}
int
evtag_consume(struct evbuffer *evbuf)
{
int len;
if ((len = evtag_unmarshal_header(evbuf, NULL)) == -1)
return (-1);
evbuffer_drain(evbuf, len);
return (0);
}
/* Reads the data type from an event buffer */
int
evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag, struct evbuffer *dst)
{
int len;
if ((len = evtag_unmarshal_header(src, ptag)) == -1)
return (-1);
if (evbuffer_add(dst, evbuffer_pullup(src, len), len) == -1)
return (-1);
evbuffer_drain(src, len);
return (len);
}
/* Marshaling for integers */
int
evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
ev_uint32_t *pinteger)
{
ev_uint32_t tag;
ev_uint32_t len;
int result;
if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
return (-1);
if (need_tag != tag)
return (-1);
if (evtag_decode_int(&len, evbuf) == -1)
return (-1);
if (evbuffer_get_length(evbuf) < len)
return (-1);
result = decode_int_internal(pinteger, evbuf, 0);
evbuffer_drain(evbuf, len);
if (result < 0 || (size_t)result > len) /* XXX Should this be != rather than > ?*/
return (-1);
else
return result;
}
int
evtag_unmarshal_int64(struct evbuffer *evbuf, ev_uint32_t need_tag,
ev_uint64_t *pinteger)
{
ev_uint32_t tag;
ev_uint32_t len;
int result;
if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
return (-1);
if (need_tag != tag)
return (-1);
if (evtag_decode_int(&len, evbuf) == -1)
return (-1);
if (evbuffer_get_length(evbuf) < len)
return (-1);
result = decode_int64_internal(pinteger, evbuf, 0);
evbuffer_drain(evbuf, len);
if (result < 0 || (size_t)result > len) /* XXX Should this be != rather than > ?*/
return (-1);
else
return result;
}
/* Unmarshal a fixed length tag */
int
evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag, void *data,
size_t len)
{
ev_uint32_t tag;
int tag_len;
/* Now unmarshal a tag and check that it matches the tag we want */
if ((tag_len = evtag_unmarshal_header(src, &tag)) < 0 ||
tag != need_tag)
return (-1);
if ((size_t)tag_len != len)
return (-1);
evbuffer_remove(src, data, len);
return (0);
}
int
evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
char **pstring)
{
ev_uint32_t tag;
int tag_len;
if ((tag_len = evtag_unmarshal_header(evbuf, &tag)) == -1 ||
tag != need_tag)
return (-1);
*pstring = mm_malloc(tag_len + 1);
if (*pstring == NULL) {
event_warn("%s: malloc", __func__);
return -1;
}
evbuffer_remove(evbuf, *pstring, tag_len);
(*pstring)[tag_len] = '\0';
return (0);
}
int
evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
struct timeval *ptv)
{
ev_uint32_t tag;
ev_uint32_t integer;
int len, offset, offset2;
int result = -1;
if ((len = evtag_unmarshal_header(evbuf, &tag)) == -1)
return (-1);
if (tag != need_tag)
goto done;
if ((offset = decode_int_internal(&integer, evbuf, 0)) == -1)
goto done;
ptv->tv_sec = integer;
if ((offset2 = decode_int_internal(&integer, evbuf, offset)) == -1)
goto done;
ptv->tv_usec = integer;
if (offset + offset2 > len) /* XXX Should this be != instead of > ? */
goto done;
result = 0;
done:
evbuffer_drain(evbuf, len);
return result;
}

Просмотреть файл

@ -1,45 +0,0 @@
/*
* Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright 2007-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVHTTP_H_
#define _EVHTTP_H_
/** @file evhttp.h
An http implementation subsystem for Libevent.
The <evhttp.h> header is deprecated in Libevent 2.0 and later; please
use <event2/http.h> instead. Depending on what functionality you
need, you may also want to include more of the other <event2/...>
headers.
*/
#include <event.h>
#include <event2/http.h>
#include <event2/http_struct.h>
#include <event2/http_compat.h>
#endif /* _EVHTTP_H_ */

Просмотреть файл

@ -1,92 +0,0 @@
/*
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVMAP_H_
#define _EVMAP_H_
/** @file evmap-internal.h
*
* An event_map is a utility structure to map each fd or signal to zero or
* more events. Functions to manipulate event_maps should only be used from
* inside libevent. They generally need to hold the lock on the corresponding
* event_base.
**/
struct event_base;
struct event;
/** Initialize an event_map for use.
*/
void evmap_io_initmap(struct event_io_map* ctx);
void evmap_signal_initmap(struct event_signal_map* ctx);
/** Remove all entries from an event_map.
@param ctx the map to clear.
*/
void evmap_io_clear(struct event_io_map* ctx);
void evmap_signal_clear(struct event_signal_map* ctx);
/** Add an IO event (some combination of EV_READ or EV_WRITE) to an
event_base's list of events on a given file descriptor, and tell the
underlying ompi_eventops about the fd if its state has changed.
Requires that ev is not already added.
@param base the event_base to operate on.
@param fd the file descriptor corresponding to ev.
@param ev the event to add.
*/
int evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev);
/** Remove an IO event (some combination of EV_READ or EV_WRITE) to an
event_base's list of events on a given file descriptor, and tell the
underlying ompi_eventops about the fd if its state has changed.
@param base the event_base to operate on.
@param fd the file descriptor corresponding to ev.
@param ev the event to remove.
*/
int evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev);
/** Active the set of events waiting on an event_base for a given fd.
@param base the event_base to operate on.
@param fd the file descriptor that has become active.
@param events a bitmask of EV_READ|EV_WRITE|EV_ET.
*/
void evmap_io_active(struct event_base *base, evutil_socket_t fd, short events);
/* These functions behave in the same way as evmap_io_*, except they work on
* signals rather than fds. signals use a linear map everywhere; fds use
* either a linear map or a hashtable. */
int evmap_signal_add(struct event_base *base, int signum, struct event *ev);
int evmap_signal_del(struct event_base *base, int signum, struct event *ev);
void evmap_signal_active(struct event_base *base, evutil_socket_t signum, int ncalls);
void *evmap_io_get_fdinfo(struct event_io_map *ctx, evutil_socket_t fd);
void evmap_check_integrity(struct event_base *base);
#endif /* _EVMAP_H_ */

Просмотреть файл

@ -1,799 +0,0 @@
/*
* Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#ifdef WIN32
#include <winsock2.h>
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#endif
#include <sys/types.h>
#if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
#include <sys/time.h>
#endif
#include <sys/queue.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef WIN32
#include <unistd.h>
#endif
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <time.h>
#include "event-internal.h"
#include "evmap-internal.h"
#include "mm-internal.h"
#include "changelist-internal.h"
/** An entry for an evmap_io list: notes all the events that want to read or
write on a given fd, and the number of each.
*/
struct evmap_io {
struct event_list events;
ev_uint16_t nread;
ev_uint16_t nwrite;
};
/* An entry for an evmap_signal list: notes all the events that want to know
when a signal triggers. */
struct evmap_signal {
struct event_list events;
};
/* On some platforms, fds start at 0 and increment by 1 as they are
allocated, and old numbers get used. For these platforms, we
implement io maps just like signal maps: as an array of pointers to
struct evmap_io. But on other platforms (windows), sockets are not
0-indexed, not necessarily consecutive, and not necessarily reused.
There, we use a hashtable to implement evmap_io.
*/
#ifdef EVMAP_USE_HT
struct event_map_entry {
HT_ENTRY(event_map_entry) map_node;
evutil_socket_t fd;
union { /* This is a union in case we need to make more things that can
be in the hashtable. */
struct evmap_io evmap_io;
} ent;
};
/* Helper used by the event_io_map hashtable code; tries to return a good hash
* of the fd in e->fd. */
static inline unsigned
hashsocket(struct event_map_entry *e)
{
/* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
* matter. Our hashtable implementation really likes low-order bits,
* though, so let's do the rotate-and-add trick. */
unsigned h = (unsigned) e->fd;
h += (h >> 2) | (h << 30);
return h;
}
/* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
* have the same e->fd. */
static inline int
eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
{
return e1->fd == e2->fd;
}
HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
0.5, mm_malloc, mm_realloc, mm_free)
#define GET_IO_SLOT(x, map, slot, type) \
do { \
struct event_map_entry _key, *_ent; \
_key.fd = slot; \
_ent = HT_FIND(event_io_map, map, &_key); \
(x) = _ent ? &_ent->ent.type : NULL; \
} while (0);
#define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
do { \
struct event_map_entry _key, *_ent; \
_key.fd = slot; \
_HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
event_map_entry, &_key, ptr, \
{ \
_ent = *ptr; \
}, \
{ \
_ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
if (EVUTIL_UNLIKELY(_ent == NULL)) \
return (-1); \
_ent->fd = slot; \
(ctor)(&_ent->ent.type); \
_HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
}); \
(x) = &_ent->ent.type; \
} while (0)
void evmap_io_initmap(struct event_io_map *ctx)
{
HT_INIT(event_io_map, ctx);
}
void evmap_io_clear(struct event_io_map *ctx)
{
struct event_map_entry **ent, **next, *this;
for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
this = *ent;
next = HT_NEXT_RMV(event_io_map, ctx, ent);
mm_free(this);
}
HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
}
#endif
/* Set the variable 'x' to the field in event_map 'map' with fields of type
'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
if there are no entries for 'slot'. Does no bounds-checking. */
#define GET_SIGNAL_SLOT(x, map, slot, type) \
(x) = (struct type *)((map)->entries[slot])
/* As GET_SLOT, but construct the entry for 'slot' if it is not present,
by allocating enough memory for a 'struct type', and initializing the new
value by calling the function 'ctor' on it. Makes the function
return -1 on allocation failure.
*/
#define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
do { \
if ((map)->entries[slot] == NULL) { \
(map)->entries[slot] = \
mm_calloc(1,sizeof(struct type)+fdinfo_len); \
if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
return (-1); \
(ctor)((struct type *)(map)->entries[slot]); \
} \
(x) = (struct type *)((map)->entries[slot]); \
} while (0)
/* If we aren't using hashtables, then define the IO_SLOT macros and functions
as thin aliases over the SIGNAL_SLOT versions. */
#ifndef EVMAP_USE_HT
#define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
#define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
#define FDINFO_OFFSET sizeof(struct evmap_io)
void
evmap_io_initmap(struct event_io_map* ctx)
{
evmap_signal_initmap(ctx);
}
void
evmap_io_clear(struct event_io_map* ctx)
{
evmap_signal_clear(ctx);
}
#endif
/** Expand 'map' with new entries of width 'msize' until it is big enough
to store a value in 'slot'.
*/
static int
evmap_make_space(struct event_signal_map *map, int slot, int msize)
{
if (map->nentries <= slot) {
int nentries = map->nentries ? map->nentries : 32;
void **tmp;
while (nentries <= slot)
nentries <<= 1;
tmp = (void **)mm_realloc(map->entries, nentries * msize);
if (tmp == NULL)
return (-1);
memset(&tmp[map->nentries], 0,
(nentries - map->nentries) * msize);
map->nentries = nentries;
map->entries = tmp;
}
return (0);
}
void
evmap_signal_initmap(struct event_signal_map *ctx)
{
ctx->nentries = 0;
ctx->entries = NULL;
}
void
evmap_signal_clear(struct event_signal_map *ctx)
{
if (ctx->entries != NULL) {
int i;
for (i = 0; i < ctx->nentries; ++i) {
if (ctx->entries[i] != NULL)
mm_free(ctx->entries[i]);
}
mm_free(ctx->entries);
ctx->entries = NULL;
}
ctx->nentries = 0;
}
/* code specific to file descriptors */
/** Constructor for struct evmap_io */
static void
evmap_io_init(struct evmap_io *entry)
{
TAILQ_INIT(&entry->events);
entry->nread = 0;
entry->nwrite = 0;
}
/* return -1 on error, 0 on success if nothing changed in the event backend,
* and 1 on success if something did. */
int
evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev)
{
const struct eventop *evsel = base->evsel;
struct event_io_map *io = &base->io;
struct evmap_io *ctx = NULL;
int nread, nwrite, retval = 0;
short res = 0, old = 0;
struct event *old_ev;
EVUTIL_ASSERT(fd == ev->ev_fd);
if (fd < 0)
return 0;
#ifndef EVMAP_USE_HT
if (fd >= io->nentries) {
if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
return (-1);
}
#endif
GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
evsel->fdinfo_len);
nread = ctx->nread;
nwrite = ctx->nwrite;
if (nread)
old |= EV_READ;
if (nwrite)
old |= EV_WRITE;
if (ev->ev_events & EV_READ) {
if (++nread == 1)
res |= EV_READ;
}
if (ev->ev_events & EV_WRITE) {
if (++nwrite == 1)
res |= EV_WRITE;
}
if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) {
event_warnx("Too many events reading or writing on fd %d",
(int)fd);
return -1;
}
if (EVENT_DEBUG_MODE_IS_ON() &&
(old_ev = TAILQ_FIRST(&ctx->events)) &&
(old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
event_warnx("Tried to mix edge-triggered and non-edge-triggered"
" events on fd %d", (int)fd);
return -1;
}
if (res) {
void *extra = ((char*)ctx) + sizeof(struct evmap_io);
/* XXX(niels): we cannot mix edge-triggered and
* level-triggered, we should probably assert on
* this. */
if (evsel->add(base, ev->ev_fd,
old, (ev->ev_events & EV_ET) | res, extra) == -1)
return (-1);
retval = 1;
}
ctx->nread = (ev_uint16_t) nread;
ctx->nwrite = (ev_uint16_t) nwrite;
TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next);
return (retval);
}
/* return -1 on error, 0 on success if nothing changed in the event backend,
* and 1 on success if something did. */
int
evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev)
{
const struct eventop *evsel = base->evsel;
struct event_io_map *io = &base->io;
struct evmap_io *ctx;
int nread, nwrite, retval = 0;
short res = 0, old = 0;
if (fd < 0)
return 0;
EVUTIL_ASSERT(fd == ev->ev_fd);
#ifndef EVMAP_USE_HT
if (fd >= io->nentries)
return (-1);
#endif
GET_IO_SLOT(ctx, io, fd, evmap_io);
nread = ctx->nread;
nwrite = ctx->nwrite;
if (nread)
old |= EV_READ;
if (nwrite)
old |= EV_WRITE;
if (ev->ev_events & EV_READ) {
if (--nread == 0)
res |= EV_READ;
EVUTIL_ASSERT(nread >= 0);
}
if (ev->ev_events & EV_WRITE) {
if (--nwrite == 0)
res |= EV_WRITE;
EVUTIL_ASSERT(nwrite >= 0);
}
if (res) {
void *extra = ((char*)ctx) + sizeof(struct evmap_io);
if (evsel->del(base, ev->ev_fd, old, res, extra) == -1)
return (-1);
retval = 1;
}
ctx->nread = nread;
ctx->nwrite = nwrite;
TAILQ_REMOVE(&ctx->events, ev, ev_io_next);
return (retval);
}
void
evmap_io_active(struct event_base *base, evutil_socket_t fd, short events)
{
struct event_io_map *io = &base->io;
struct evmap_io *ctx;
struct event *ev;
#ifndef EVMAP_USE_HT
EVUTIL_ASSERT(fd < io->nentries);
#endif
GET_IO_SLOT(ctx, io, fd, evmap_io);
EVUTIL_ASSERT(ctx);
TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
if (ev->ev_events & events)
event_active_nolock(ev, ev->ev_events & events, 1);
}
}
/* code specific to signals */
static void
evmap_signal_init(struct evmap_signal *entry)
{
TAILQ_INIT(&entry->events);
}
int
evmap_signal_add(struct event_base *base, int sig, struct event *ev)
{
const struct eventop *evsel = base->evsigsel;
struct event_signal_map *map = &base->sigmap;
struct evmap_signal *ctx = NULL;
if (sig >= map->nentries) {
if (evmap_make_space(
map, sig, sizeof(struct evmap_signal *)) == -1)
return (-1);
}
GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
base->evsigsel->fdinfo_len);
if (TAILQ_EMPTY(&ctx->events)) {
if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
== -1)
return (-1);
}
TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next);
return (1);
}
int
evmap_signal_del(struct event_base *base, int sig, struct event *ev)
{
const struct eventop *evsel = base->evsigsel;
struct event_signal_map *map = &base->sigmap;
struct evmap_signal *ctx;
if (sig >= map->nentries)
return (-1);
GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) {
if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
return (-1);
}
TAILQ_REMOVE(&ctx->events, ev, ev_signal_next);
return (1);
}
void
evmap_signal_active(struct event_base *base, evutil_socket_t sig, int ncalls)
{
struct event_signal_map *map = &base->sigmap;
struct evmap_signal *ctx;
struct event *ev;
EVUTIL_ASSERT(sig < map->nentries);
GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
TAILQ_FOREACH(ev, &ctx->events, ev_signal_next)
event_active_nolock(ev, EV_SIGNAL, ncalls);
}
void *
evmap_io_get_fdinfo(struct event_io_map *map, evutil_socket_t fd)
{
struct evmap_io *ctx;
GET_IO_SLOT(ctx, map, fd, evmap_io);
if (ctx)
return ((char*)ctx) + sizeof(struct evmap_io);
else
return NULL;
}
/** Per-fd structure for use with changelists. It keeps track, for each fd or
* signal using the changelist, of where its entry in the changelist is.
*/
struct event_changelist_fdinfo {
int idxplus1; /* this is the index +1, so that memset(0) will make it
* a no-such-element */
};
void
event_changelist_init(struct event_changelist *changelist)
{
changelist->changes = NULL;
changelist->changes_size = 0;
changelist->n_changes = 0;
}
/** Helper: return the changelist_fdinfo corresponding to a given change. */
static inline struct event_changelist_fdinfo *
event_change_get_fdinfo(struct event_base *base,
const struct event_change *change)
{
char *ptr;
if (change->read_change & EV_CHANGE_SIGNAL) {
struct evmap_signal *ctx;
GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
ptr = ((char*)ctx) + sizeof(struct evmap_signal);
} else {
struct evmap_io *ctx;
GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
ptr = ((char*)ctx) + sizeof(struct evmap_io);
}
return (void*)ptr;
}
#ifdef DEBUG_CHANGELIST
/** Make sure that the changelist is consistent with the evmap structures. */
static void
event_changelist_check(struct event_base *base)
{
int i;
struct event_changelist *changelist = &base->changelist;
EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
for (i = 0; i < changelist->n_changes; ++i) {
struct event_change *c = &changelist->changes[i];
struct event_changelist_fdinfo *f;
EVUTIL_ASSERT(c->fd >= 0);
f = event_change_get_fdinfo(base, c);
EVUTIL_ASSERT(f);
EVUTIL_ASSERT(f->idxplus1 == i + 1);
}
for (i = 0; i < base->io.nentries; ++i) {
struct evmap_io *io = base->io.entries[i];
struct event_changelist_fdinfo *f;
if (!io)
continue;
f = (void*)
( ((char*)io) + sizeof(struct evmap_io) );
if (f->idxplus1) {
struct event_change *c = &changelist->changes[f->idxplus1 - 1];
EVUTIL_ASSERT(c->fd == i);
}
}
}
#else
#define event_changelist_check(base) ((void)0)
#endif
void
event_changelist_remove_all(struct event_changelist *changelist,
struct event_base *base)
{
int i;
event_changelist_check(base);
for (i = 0; i < changelist->n_changes; ++i) {
struct event_change *ch = &changelist->changes[i];
struct event_changelist_fdinfo *fdinfo =
event_change_get_fdinfo(base, ch);
EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
fdinfo->idxplus1 = 0;
}
changelist->n_changes = 0;
event_changelist_check(base);
}
void
event_changelist_freemem(struct event_changelist *changelist)
{
if (changelist->changes)
mm_free(changelist->changes);
event_changelist_init(changelist); /* zero it all out. */
}
/** Increase the size of 'changelist' to hold more changes. */
static int
event_changelist_grow(struct event_changelist *changelist)
{
int new_size;
struct event_change *new_changes;
if (changelist->changes_size < 64)
new_size = 64;
else
new_size = changelist->changes_size * 2;
new_changes = mm_realloc(changelist->changes,
new_size * sizeof(struct event_change));
if (EVUTIL_UNLIKELY(new_changes == NULL))
return (-1);
changelist->changes = new_changes;
changelist->changes_size = new_size;
return (0);
}
/** Return a pointer to the changelist entry for the file descriptor or signal
* 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
* old_events field to old_events.
*/
static struct event_change *
event_changelist_get_or_construct(struct event_changelist *changelist,
evutil_socket_t fd,
short old_events,
struct event_changelist_fdinfo *fdinfo)
{
struct event_change *change;
if (fdinfo->idxplus1 == 0) {
int idx;
EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
if (changelist->n_changes == changelist->changes_size) {
if (event_changelist_grow(changelist) < 0)
return NULL;
}
idx = changelist->n_changes++;
change = &changelist->changes[idx];
fdinfo->idxplus1 = idx + 1;
memset(change, 0, sizeof(struct event_change));
change->fd = fd;
change->old_events = old_events;
} else {
change = &changelist->changes[fdinfo->idxplus1 - 1];
EVUTIL_ASSERT(change->fd == fd);
}
return change;
}
int
event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events,
void *p)
{
struct event_changelist *changelist = &base->changelist;
struct event_changelist_fdinfo *fdinfo = p;
struct event_change *change;
event_changelist_check(base);
change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
if (!change)
return -1;
/* An add replaces any previous delete, but doesn't result in a no-op,
* since the delete might fail (because the fd had been closed since
* the last add, for instance. */
if (events & (EV_READ|EV_SIGNAL)) {
change->read_change = EV_CHANGE_ADD |
(events & (EV_ET|EV_PERSIST|EV_SIGNAL));
}
if (events & EV_WRITE) {
change->write_change = EV_CHANGE_ADD |
(events & (EV_ET|EV_PERSIST|EV_SIGNAL));
}
event_changelist_check(base);
return (0);
}
int
event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events,
void *p)
{
struct event_changelist *changelist = &base->changelist;
struct event_changelist_fdinfo *fdinfo = p;
struct event_change *change;
event_changelist_check(base);
change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
event_changelist_check(base);
if (!change)
return -1;
/* A delete removes any previous add, rather than replacing it:
on those platforms where "add, delete, dispatch" is not the same
as "no-op, dispatch", we want the no-op behavior.
As well as checking the current operation we should also check
the original set of events to make sure were not ignoring
the case where the add operation is present on an event that
was already set.
If we have a no-op item, we could remove it it from the list
entirely, but really there's not much point: skipping the no-op
change when we do the dispatch later is far cheaper than rejuggling
the array now.
As this stands, it also lets through deletions of events that are
not currently set.
*/
if (events & (EV_READ|EV_SIGNAL)) {
if (!(change->old_events & (EV_READ | EV_SIGNAL)) &&
(change->read_change & EV_CHANGE_ADD))
change->read_change = 0;
else
change->read_change = EV_CHANGE_DEL;
}
if (events & EV_WRITE) {
if (!(change->old_events & EV_WRITE) &&
(change->write_change & EV_CHANGE_ADD))
change->write_change = 0;
else
change->write_change = EV_CHANGE_DEL;
}
event_changelist_check(base);
return (0);
}
void
evmap_check_integrity(struct event_base *base)
{
#define EVLIST_X_SIGFOUND 0x1000
#define EVLIST_X_IOFOUND 0x2000
evutil_socket_t i;
struct event *ev;
struct event_io_map *io = &base->io;
struct event_signal_map *sigmap = &base->sigmap;
#ifdef EVMAP_USE_HT
struct event_map_entry **mapent;
#endif
int nsignals, ntimers, nio;
nsignals = ntimers = nio = 0;
TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
EVUTIL_ASSERT(ev->ev_flags & EVLIST_INIT);
ev->ev_flags &= ~(EVLIST_X_SIGFOUND|EVLIST_X_IOFOUND);
}
#ifdef EVMAP_USE_HT
HT_FOREACH(mapent, event_io_map, io) {
struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
i = (*mapent)->fd;
#else
for (i = 0; i < io->nentries; ++i) {
struct evmap_io *ctx = io->entries[i];
if (!ctx)
continue;
#endif
TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_IOFOUND));
EVUTIL_ASSERT(ev->ev_fd == i);
ev->ev_flags |= EVLIST_X_IOFOUND;
nio++;
}
}
for (i = 0; i < sigmap->nentries; ++i) {
struct evmap_signal *ctx = sigmap->entries[i];
if (!ctx)
continue;
TAILQ_FOREACH(ev, &ctx->events, ev_signal_next) {
EVUTIL_ASSERT(!(ev->ev_flags & EVLIST_X_SIGFOUND));
EVUTIL_ASSERT(ev->ev_fd == i);
ev->ev_flags |= EVLIST_X_SIGFOUND;
nsignals++;
}
}
TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
if (ev->ev_events & (EV_READ|EV_WRITE)) {
EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_IOFOUND);
--nio;
}
if (ev->ev_events & EV_SIGNAL) {
EVUTIL_ASSERT(ev->ev_flags & EVLIST_X_SIGFOUND);
--nsignals;
}
}
EVUTIL_ASSERT(nio == 0);
EVUTIL_ASSERT(nsignals == 0);
/* There is no "EVUTIL_ASSERT(ntimers == 0)": eventqueue is only for
* pending signals and io events.
*/
}

Просмотреть файл

@ -1,473 +0,0 @@
/*
* Submitted by David Pacheco (dp.spambait@gmail.com)
*
* Copyright 2006-2007 Niels Provos
* Copyright 2007-2012 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2007 Sun Microsystems. All rights reserved.
* Use is subject to license terms.
*/
/*
* evport.c: event backend using Solaris 10 event ports. See port_create(3C).
* This implementation is loosely modeled after the one used for select(2) (in
* select.c).
*
* The outstanding events are tracked in a data structure called evport_data.
* Each entry in the ed_fds array corresponds to a file descriptor, and contains
* pointers to the read and write events that correspond to that fd. (That is,
* when the file is readable, the "read" event should handle it, etc.)
*
* evport_add and evport_del update this data structure. evport_dispatch uses it
* to determine where to callback when an event occurs (which it gets from
* port_getn).
*
* Helper functions are used: grow() grows the file descriptor array as
* necessary when large fd's come in. reassociate() takes care of maintaining
* the proper file-descriptor/event-port associations.
*
* As in the select(2) implementation, signals are handled by evsignal.
*/
#include "event2/event-config.h"
#include <sys/time.h>
#include <sys/queue.h>
#include <errno.h>
#include <poll.h>
#include <port.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include "event2/thread.h"
#include "evthread-internal.h"
#include "event-internal.h"
#include "log-internal.h"
#include "evsignal-internal.h"
#include "evmap-internal.h"
/*
* Default value for ed_nevents, which is the maximum file descriptor number we
* can handle. If an event comes in for a file descriptor F > nevents, we will
* grow the array of file descriptors, doubling its size.
*/
#define DEFAULT_NFDS 16
/*
* EVENTS_PER_GETN is the maximum number of events to retrieve from port_getn on
* any particular call. You can speed things up by increasing this, but it will
* (obviously) require more memory.
*/
#define EVENTS_PER_GETN 8
/*
* Per-file-descriptor information about what events we're subscribed to. These
* fields are NULL if no event is subscribed to either of them.
*/
struct fd_info {
short fdi_what; /* combinations of EV_READ and EV_WRITE */
};
#define FDI_HAS_READ(fdi) ((fdi)->fdi_what & EV_READ)
#define FDI_HAS_WRITE(fdi) ((fdi)->fdi_what & EV_WRITE)
#define FDI_HAS_EVENTS(fdi) (FDI_HAS_READ(fdi) || FDI_HAS_WRITE(fdi))
#define FDI_TO_SYSEVENTS(fdi) (FDI_HAS_READ(fdi) ? POLLIN : 0) | \
(FDI_HAS_WRITE(fdi) ? POLLOUT : 0)
struct evport_data {
int ed_port; /* event port for system events */
int ed_nevents; /* number of allocated fdi's */
struct fd_info *ed_fds; /* allocated fdi table */
/* fdi's that we need to reassoc */
int ed_pending[EVENTS_PER_GETN]; /* fd's with pending events */
};
static void* evport_init(struct event_base *);
static int evport_add(struct event_base *, int fd, short old, short events, void *);
static int evport_del(struct event_base *, int fd, short old, short events, void *);
static int evport_dispatch(struct event_base *, struct timeval *);
static void evport_dealloc(struct event_base *);
const struct eventop evportops = {
"evport",
evport_init,
evport_add,
evport_del,
evport_dispatch,
evport_dealloc,
1, /* need reinit */
0, /* features */
0, /* fdinfo length */
};
/*
* Initialize the event port implementation.
*/
static void*
evport_init(struct event_base *base)
{
struct evport_data *evpd;
int i;
if (!(evpd = mm_calloc(1, sizeof(struct evport_data))))
return (NULL);
if ((evpd->ed_port = port_create()) == -1) {
mm_free(evpd);
return (NULL);
}
/*
* Initialize file descriptor structure
*/
evpd->ed_fds = mm_calloc(DEFAULT_NFDS, sizeof(struct fd_info));
if (evpd->ed_fds == NULL) {
close(evpd->ed_port);
mm_free(evpd);
return (NULL);
}
evpd->ed_nevents = DEFAULT_NFDS;
for (i = 0; i < EVENTS_PER_GETN; i++)
evpd->ed_pending[i] = -1;
evsig_init(base);
return (evpd);
}
#ifdef CHECK_INVARIANTS
/*
* Checks some basic properties about the evport_data structure. Because it
* checks all file descriptors, this function can be expensive when the maximum
* file descriptor ever used is rather large.
*/
static void
check_evportop(struct evport_data *evpd)
{
EVUTIL_ASSERT(evpd);
EVUTIL_ASSERT(evpd->ed_nevents > 0);
EVUTIL_ASSERT(evpd->ed_port > 0);
EVUTIL_ASSERT(evpd->ed_fds > 0);
}
/*
* Verifies very basic integrity of a given port_event.
*/
static void
check_event(port_event_t* pevt)
{
/*
* We've only registered for PORT_SOURCE_FD events. The only
* other thing we can legitimately receive is PORT_SOURCE_ALERT,
* but since we're not using port_alert either, we can assume
* PORT_SOURCE_FD.
*/
EVUTIL_ASSERT(pevt->portev_source == PORT_SOURCE_FD);
EVUTIL_ASSERT(pevt->portev_user == NULL);
}
#else
#define check_evportop(epop)
#define check_event(pevt)
#endif /* CHECK_INVARIANTS */
/*
* Doubles the size of the allocated file descriptor array.
*/
static int
grow(struct evport_data *epdp, int factor)
{
struct fd_info *tmp;
int oldsize = epdp->ed_nevents;
int newsize = factor * oldsize;
EVUTIL_ASSERT(factor > 1);
check_evportop(epdp);
tmp = mm_realloc(epdp->ed_fds, sizeof(struct fd_info) * newsize);
if (NULL == tmp)
return -1;
epdp->ed_fds = tmp;
memset((char*) (epdp->ed_fds + oldsize), 0,
(newsize - oldsize)*sizeof(struct fd_info));
epdp->ed_nevents = newsize;
check_evportop(epdp);
return 0;
}
/*
* (Re)associates the given file descriptor with the event port. The OS events
* are specified (implicitly) from the fd_info struct.
*/
static int
reassociate(struct evport_data *epdp, struct fd_info *fdip, int fd)
{
int sysevents = FDI_TO_SYSEVENTS(fdip);
if (sysevents != 0) {
if (port_associate(epdp->ed_port, PORT_SOURCE_FD,
fd, sysevents, NULL) == -1) {
event_warn("port_associate");
return (-1);
}
}
check_evportop(epdp);
return (0);
}
/*
* Main event loop - polls port_getn for some number of events, and processes
* them.
*/
static int
evport_dispatch(struct event_base *base, struct timeval *tv)
{
int i, res;
struct evport_data *epdp = base->evbase;
port_event_t pevtlist[EVENTS_PER_GETN];
/*
* port_getn will block until it has at least nevents events. It will
* also return how many it's given us (which may be more than we asked
* for, as long as it's less than our maximum (EVENTS_PER_GETN)) in
* nevents.
*/
int nevents = 1;
/*
* We have to convert a struct timeval to a struct timespec
* (only difference is nanoseconds vs. microseconds). If no time-based
* events are active, we should wait for I/O (and tv == NULL).
*/
struct timespec ts;
struct timespec *ts_p = NULL;
if (tv != NULL) {
ts.tv_sec = tv->tv_sec;
ts.tv_nsec = tv->tv_usec * 1000;
ts_p = &ts;
}
/*
* Before doing anything else, we need to reassociate the events we hit
* last time which need reassociation. See comment at the end of the
* loop below.
*/
for (i = 0; i < EVENTS_PER_GETN; ++i) {
struct fd_info *fdi = NULL;
if (epdp->ed_pending[i] != -1) {
fdi = &(epdp->ed_fds[epdp->ed_pending[i]]);
}
if (fdi != NULL && FDI_HAS_EVENTS(fdi)) {
int fd = epdp->ed_pending[i];
reassociate(epdp, fdi, fd);
epdp->ed_pending[i] = -1;
}
}
EVBASE_RELEASE_LOCK(base, th_base_lock);
res = port_getn(epdp->ed_port, pevtlist, EVENTS_PER_GETN,
(unsigned int *) &nevents, ts_p);
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
if (res == -1) {
if (errno == EINTR || errno == EAGAIN) {
return (0);
} else if (errno == ETIME) {
if (nevents == 0)
return (0);
} else {
event_warn("port_getn");
return (-1);
}
}
event_debug(("%s: port_getn reports %d events", __func__, nevents));
for (i = 0; i < nevents; ++i) {
struct fd_info *fdi;
port_event_t *pevt = &pevtlist[i];
int fd = (int) pevt->portev_object;
check_evportop(epdp);
check_event(pevt);
epdp->ed_pending[i] = fd;
/*
* Figure out what kind of event it was
* (because we have to pass this to the callback)
*/
res = 0;
if (pevt->portev_events & (POLLERR|POLLHUP)) {
res = EV_READ | EV_WRITE;
} else {
if (pevt->portev_events & POLLIN)
res |= EV_READ;
if (pevt->portev_events & POLLOUT)
res |= EV_WRITE;
}
/*
* Check for the error situations or a hangup situation
*/
if (pevt->portev_events & (POLLERR|POLLHUP|POLLNVAL))
res |= EV_READ|EV_WRITE;
EVUTIL_ASSERT(epdp->ed_nevents > fd);
fdi = &(epdp->ed_fds[fd]);
evmap_io_active(base, fd, res);
} /* end of all events gotten */
check_evportop(epdp);
return (0);
}
/*
* Adds the given event (so that you will be notified when it happens via
* the callback function).
*/
static int
evport_add(struct event_base *base, int fd, short old, short events, void *p)
{
struct evport_data *evpd = base->evbase;
struct fd_info *fdi;
int factor;
(void)p;
check_evportop(evpd);
/*
* If necessary, grow the file descriptor info table
*/
factor = 1;
while (fd >= factor * evpd->ed_nevents)
factor *= 2;
if (factor > 1) {
if (-1 == grow(evpd, factor)) {
return (-1);
}
}
fdi = &evpd->ed_fds[fd];
fdi->fdi_what |= events;
return reassociate(evpd, fdi, fd);
}
/*
* Removes the given event from the list of events to wait for.
*/
static int
evport_del(struct event_base *base, int fd, short old, short events, void *p)
{
struct evport_data *evpd = base->evbase;
struct fd_info *fdi;
int i;
int associated = 1;
(void)p;
check_evportop(evpd);
if (evpd->ed_nevents < fd) {
return (-1);
}
for (i = 0; i < EVENTS_PER_GETN; ++i) {
if (evpd->ed_pending[i] == fd) {
associated = 0;
break;
}
}
fdi = &evpd->ed_fds[fd];
if (events & EV_READ)
fdi->fdi_what &= ~EV_READ;
if (events & EV_WRITE)
fdi->fdi_what &= ~EV_WRITE;
if (associated) {
if (!FDI_HAS_EVENTS(fdi) &&
port_dissociate(evpd->ed_port, PORT_SOURCE_FD, fd) == -1) {
/*
* Ignore EBADFD error the fd could have been closed
* before event_del() was called.
*/
if (errno != EBADFD) {
event_warn("port_dissociate");
return (-1);
}
} else {
if (FDI_HAS_EVENTS(fdi)) {
return (reassociate(evpd, fdi, fd));
}
}
} else {
if ((fdi->fdi_what & (EV_READ|EV_WRITE)) == 0) {
evpd->ed_pending[i] = -1;
}
}
return 0;
}
static void
evport_dealloc(struct event_base *base)
{
struct evport_data *evpd = base->evbase;
evsig_dealloc(base);
close(evpd->ed_port);
if (evpd->ed_fds)
mm_free(evpd->ed_fds);
mm_free(evpd);
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше