1
1

Redo a patch from late last night that replaces libevent 2.0.7 with libevent 2.0.13 as our default event library. Cleanup the libevent renames to correctly state 2013 as our new version.

This commit was SVN r25457.
Этот коммит содержится в:
Ralph Castain 2011-11-08 01:36:15 +00:00
родитель 3d318a4c26
Коммит a931c5b1eb
113 изменённых файлов: 327 добавлений и 48837 удалений

Просмотреть файл

Просмотреть файл

@ -1,3 +0,0 @@
jsquyres
hjelmn
rhc

Просмотреть файл

@ -33,8 +33,8 @@ AC_DEFUN([MCA_opal_event_libevent2013_CONFIG],[
AC_MSG_CHECKING([libevent configuration args])
str=`event_args="--disable-dns --disable-http --disable-rpc --disable-openssl --enable-hidden-symbols --includedir=$includedir/openmpi/opal/event/libevent/include"`
eval $str
eval "str=\"${includedir}\""
event_args="--disable-dns --disable-http --disable-rpc --disable-openssl --enable-hidden-symbols --includedir=$str/openmpi/opal/event/libevent/include"
unset str
AC_ARG_ENABLE(event-rtsig,

Просмотреть файл

@ -19,356 +19,356 @@ extern "C" {
#endif
/* buffer.c */
#define _evbuffer_chain_pin opal_libevent2012_evbuffer_chain_pin
#define _evbuffer_chain_unpin opal_libevent2012_evbuffer_chain_unpin
#define _evbuffer_decref_and_unlock opal_libevent2012_evbuffer_decref_and_unlock
#define _evbuffer_expand_fast opal_libevent2012_evbuffer_expand_fast
#define _evbuffer_incref opal_libevent2012_evbuffer_incref
#define _evbuffer_incref_and_lock opal_libevent2012_evbuffer_incref_and_lock
#define _evbuffer_read_setup_vecs opal_libevent2012_evbuffer_read_setup_vecs
#define _evbuffer_testing_use_linear_file_access opal_libevent2012_evbuffer_testing_use_linear_file_access
#define _evbuffer_testing_use_mmap opal_libevent2012_evbuffer_testing_use_mmap
#define _evbuffer_testing_use_sendfile opal_libevent2012_evbuffer_testing_use_sendfile
#define evbuffer_add opal_libevent2012_evbuffer_add
#define evbuffer_add_buffer opal_libevent2012_evbuffer_add_buffer
#define evbuffer_add_cb opal_libevent2012_evbuffer_add_cb
#define evbuffer_add_file opal_libevent2012_evbuffer_add_file
#define evbuffer_add_printf opal_libevent2012_evbuffer_add_printf
#define evbuffer_add_reference opal_libevent2012_evbuffer_add_reference
#define evbuffer_add_vprintf opal_libevent2012_evbuffer_add_vprintf
#define evbuffer_cb_clear_flags opal_libevent2012_evbuffer_cb_clear_flags
#define evbuffer_cb_set_flags opal_libevent2012_evbuffer_cb_set_flags
#define evbuffer_commit_space opal_libevent2012_evbuffer_commit_space
#define evbuffer_copyout opal_libevent2012_evbuffer_copyout
#define evbuffer_defer_callbacks opal_libevent2012_evbuffer_defer_callbacks
#define evbuffer_drain opal_libevent2012_evbuffer_drain
#define evbuffer_enable_locking opal_libevent2012_evbuffer_enable_locking
#define evbuffer_expand opal_libevent2012_evbuffer_expand
#define evbuffer_find opal_libevent2012_evbuffer_find
#define evbuffer_free opal_libevent2012_evbuffer_free
#define evbuffer_freeze opal_libevent2012_evbuffer_freeze
#define evbuffer_get_contiguous_space opal_libevent2012_evbuffer_get_contiguous_space
#define evbuffer_get_length opal_libevent2012_evbuffer_get_length
#define evbuffer_lock opal_libevent2012_evbuffer_lock
#define evbuffer_new opal_libevent2012_evbuffer_new
#define evbuffer_peek opal_libevent2012_evbuffer_peek
#define evbuffer_prepend opal_libevent2012_evbuffer_prepend
#define evbuffer_prepend_buffer opal_libevent2012_evbuffer_prepend_buffer
#define evbuffer_ptr_set opal_libevent2012_evbuffer_ptr_set
#define evbuffer_pullup opal_libevent2012_evbuffer_pullup
#define evbuffer_read opal_libevent2012_evbuffer_read
#define evbuffer_readline opal_libevent2012_evbuffer_readline
#define evbuffer_readln opal_libevent2012_evbuffer_readln
#define evbuffer_remove opal_libevent2012_evbuffer_remove
#define evbuffer_remove_buffer opal_libevent2012_evbuffer_remove_buffer
#define evbuffer_remove_cb opal_libevent2012_evbuffer_remove_cb
#define evbuffer_remove_cb_entry opal_libevent2012_evbuffer_remove_cb_entry
#define evbuffer_reserve_space opal_libevent2012_evbuffer_reserve_space
#define evbuffer_search opal_libevent2012_evbuffer_search
#define evbuffer_search_eol opal_libevent2012_evbuffer_search_eol
#define evbuffer_search_range opal_libevent2012_evbuffer_search_range
#define evbuffer_set_parent opal_libevent2012_evbuffer_set_parent
#define evbuffer_setcb opal_libevent2012_evbuffer_setcb
#define evbuffer_unfreeze opal_libevent2012_evbuffer_unfreeze
#define evbuffer_unlock opal_libevent2012_evbuffer_unlock
#define evbuffer_write opal_libevent2012_evbuffer_write
#define evbuffer_write_atmost opal_libevent2012_evbuffer_write_atmost
#define _bufferevent_add_event opal_libevent2012__bufferevent_add_event
#define _bufferevent_decref_and_unlock opal_libevent2012__bufferevent_decref_and_unlock
#define _bufferevent_del_generic_timeout_cbs opal_libevent2012__bufferevent_del_generic_timeout_cbs
#define _bufferevent_generic_adj_timeouts opal_libevent2012__bufferevent_generic_adj_timeouts
#define _bufferevent_incref_and_lock opal_libevent2012__bufferevent_incref_and_lock
#define _bufferevent_init_generic_timeout_cbs opal_libevent2012__bufferevent_init_generic_timeout_cbs
#define _bufferevent_run_eventcb opal_libevent2012__bufferevent_run_eventcb
#define _bufferevent_run_readcb opal_libevent2012__bufferevent_run_readcb
#define _bufferevent_run_writecb opal_libevent2012__bufferevent_run_writecb
#define bufferevent_decref opal_libevent2012_bufferevent_decref
#define bufferevent_disable opal_libevent2012_bufferevent_disable
#define bufferevent_enable opal_libevent2012_bufferevent_enable
#define bufferevent_enable_locking opal_libevent2012_bufferevent_enable_locking
#define bufferevent_flush opal_libevent2012_bufferevent_flush
#define bufferevent_free opal_libevent2012_bufferevent_free
#define bufferevent_get_enabled opal_libevent2012_bufferevent_get_enabled
#define bufferevent_get_input opal_libevent2012_bufferevent_get_input
#define bufferevent_get_output opal_libevent2012_bufferevent_get_output
#define bufferevent_get_underlying opal_libevent2012_bufferevent_get_underlying
#define bufferevent_getfd opal_libevent2012_bufferevent_getfd
#define bufferevent_incref opal_libevent2012_bufferevent_incref
#define bufferevent_init_common opal_libevent2012_bufferevent_init_common
#define bufferevent_lock opal_libevent2012_bufferevent_lock
#define bufferevent_read opal_libevent2012_bufferevent_read
#define bufferevent_read_buffer opal_libevent2012_bufferevent_read_buffer
#define bufferevent_set_timeouts opal_libevent2012_bufferevent_set_timeouts
#define bufferevent_setcb opal_libevent2012_bufferevent_setcb
#define bufferevent_setfd opal_libevent2012_bufferevent_setfd
#define bufferevent_settimeout opal_libevent2012_bufferevent_settimeout
#define bufferevent_setwatermark opal_libevent2012_bufferevent_setwatermark
#define bufferevent_suspend_read opal_libevent2012_bufferevent_suspend_read
#define bufferevent_suspend_write opal_libevent2012_bufferevent_suspend_write
#define bufferevent_unlock opal_libevent2012_bufferevent_unlock
#define bufferevent_unsuspend_read opal_libevent2012_bufferevent_unsuspend_read
#define bufferevent_unsuspend_write opal_libevent2012_bufferevent_unsuspend_write
#define bufferevent_write opal_libevent2012_bufferevent_write
#define bufferevent_write_buffer opal_libevent2012_bufferevent_write_buffer
#define bufferevent_filter_new opal_libevent2012_bufferevent_filter_new
#define bufferevent_get_openssl_error opal_libevent2012_bufferevent_get_openssl_error
#define bufferevent_openssl_filter_new opal_libevent2012_bufferevent_openssl_filter_new
#define bufferevent_openssl_get_ssl opal_libevent2012_bufferevent_openssl_get_ssl
#define bufferevent_openssl_socket_new opal_libevent2012_bufferevent_openssl_socket_new
#define bufferevent_ssl_renegotiate opal_libevent2012_bufferevent_ssl_renegotiate
#define bufferevent_pair_get_partner opal_libevent2012_bufferevent_pair_get_partner
#define bufferevent_pair_new opal_libevent2012_bufferevent_pair_new
#define _bufferevent_decrement_read_buckets opal_libevent2012__bufferevent_decrement_read_buckets
#define _bufferevent_decrement_write_buckets opal_libevent2012__bufferevent_decrement_write_buckets
#define _bufferevent_get_read_max opal_libevent2012__bufferevent_get_read_max
#define _bufferevent_get_write_max opal_libevent2012__bufferevent_get_write_max
#define bufferevent_add_to_rate_limit_group opal_libevent2012_bufferevent_add_to_rate_limit_group
#define bufferevent_decrement_read_limit opal_libevent2012_bufferevent_decrement_read_limit
#define bufferevent_decrement_write_limit opal_libevent2012_bufferevent_decrement_write_limit
#define bufferevent_get_max_to_read opal_libevent2012_bufferevent_get_max_to_read
#define bufferevent_get_max_to_write opal_libevent2012_bufferevent_get_max_to_write
#define bufferevent_get_read_limit opal_libevent2012_bufferevent_get_read_limit
#define bufferevent_get_write_limit opal_libevent2012_bufferevent_get_write_limit
#define bufferevent_rate_limit_group_decrement_read opal_libevent2012_bufferevent_rate_limit_group_decrement_read
#define bufferevent_rate_limit_group_decrement_write opal_libevent2012_bufferevent_rate_limit_group_decrement_write
#define bufferevent_rate_limit_group_free opal_libevent2012_bufferevent_rate_limit_group_free
#define bufferevent_rate_limit_group_get_read_limit opal_libevent2012_bufferevent_rate_limit_group_get_read_limit
#define bufferevent_rate_limit_group_get_totals opal_libevent2012_bufferevent_rate_limit_group_get_totals
#define bufferevent_rate_limit_group_get_write_limit opal_libevent2012_bufferevent_rate_limit_group_get_write_limit
#define bufferevent_rate_limit_group_new opal_libevent2012_bufferevent_rate_limit_group_new
#define bufferevent_rate_limit_group_reset_totals opal_libevent2012_bufferevent_rate_limit_group_reset_totals
#define bufferevent_rate_limit_group_set_cfg opal_libevent2012_bufferevent_rate_limit_group_set_cfg
#define bufferevent_rate_limit_group_set_min_share opal_libevent2012_bufferevent_rate_limit_group_set_min_share
#define bufferevent_remove_from_rate_limit_group opal_libevent2012_bufferevent_remove_from_rate_limit_group
#define bufferevent_remove_from_rate_limit_group_internal opal_libevent2012_bufferevent_remove_from_rate_limit_group_internal
#define bufferevent_set_rate_limit opal_libevent2012_bufferevent_set_rate_limit
#define bufferevent_base_set opal_libevent2012_bufferevent_base_set
#define bufferevent_new opal_libevent2012_bufferevent_new
#define bufferevent_priority_set opal_libevent2012_bufferevent_priority_set
#define bufferevent_socket_connect opal_libevent2012_bufferevent_socket_connect
#define bufferevent_socket_connect_hostname opal_libevent2012_bufferevent_socket_connect_hostname
#define bufferevent_socket_get_dns_error opal_libevent2012_bufferevent_socket_get_dns_error
#define bufferevent_socket_new opal_libevent2012_bufferevent_socket_new
#define _evbuffer_chain_pin opal_libevent2013_evbuffer_chain_pin
#define _evbuffer_chain_unpin opal_libevent2013_evbuffer_chain_unpin
#define _evbuffer_decref_and_unlock opal_libevent2013_evbuffer_decref_and_unlock
#define _evbuffer_expand_fast opal_libevent2013_evbuffer_expand_fast
#define _evbuffer_incref opal_libevent2013_evbuffer_incref
#define _evbuffer_incref_and_lock opal_libevent2013_evbuffer_incref_and_lock
#define _evbuffer_read_setup_vecs opal_libevent2013_evbuffer_read_setup_vecs
#define _evbuffer_testing_use_linear_file_access opal_libevent2013_evbuffer_testing_use_linear_file_access
#define _evbuffer_testing_use_mmap opal_libevent2013_evbuffer_testing_use_mmap
#define _evbuffer_testing_use_sendfile opal_libevent2013_evbuffer_testing_use_sendfile
#define evbuffer_add opal_libevent2013_evbuffer_add
#define evbuffer_add_buffer opal_libevent2013_evbuffer_add_buffer
#define evbuffer_add_cb opal_libevent2013_evbuffer_add_cb
#define evbuffer_add_file opal_libevent2013_evbuffer_add_file
#define evbuffer_add_printf opal_libevent2013_evbuffer_add_printf
#define evbuffer_add_reference opal_libevent2013_evbuffer_add_reference
#define evbuffer_add_vprintf opal_libevent2013_evbuffer_add_vprintf
#define evbuffer_cb_clear_flags opal_libevent2013_evbuffer_cb_clear_flags
#define evbuffer_cb_set_flags opal_libevent2013_evbuffer_cb_set_flags
#define evbuffer_commit_space opal_libevent2013_evbuffer_commit_space
#define evbuffer_copyout opal_libevent2013_evbuffer_copyout
#define evbuffer_defer_callbacks opal_libevent2013_evbuffer_defer_callbacks
#define evbuffer_drain opal_libevent2013_evbuffer_drain
#define evbuffer_enable_locking opal_libevent2013_evbuffer_enable_locking
#define evbuffer_expand opal_libevent2013_evbuffer_expand
#define evbuffer_find opal_libevent2013_evbuffer_find
#define evbuffer_free opal_libevent2013_evbuffer_free
#define evbuffer_freeze opal_libevent2013_evbuffer_freeze
#define evbuffer_get_contiguous_space opal_libevent2013_evbuffer_get_contiguous_space
#define evbuffer_get_length opal_libevent2013_evbuffer_get_length
#define evbuffer_lock opal_libevent2013_evbuffer_lock
#define evbuffer_new opal_libevent2013_evbuffer_new
#define evbuffer_peek opal_libevent2013_evbuffer_peek
#define evbuffer_prepend opal_libevent2013_evbuffer_prepend
#define evbuffer_prepend_buffer opal_libevent2013_evbuffer_prepend_buffer
#define evbuffer_ptr_set opal_libevent2013_evbuffer_ptr_set
#define evbuffer_pullup opal_libevent2013_evbuffer_pullup
#define evbuffer_read opal_libevent2013_evbuffer_read
#define evbuffer_readline opal_libevent2013_evbuffer_readline
#define evbuffer_readln opal_libevent2013_evbuffer_readln
#define evbuffer_remove opal_libevent2013_evbuffer_remove
#define evbuffer_remove_buffer opal_libevent2013_evbuffer_remove_buffer
#define evbuffer_remove_cb opal_libevent2013_evbuffer_remove_cb
#define evbuffer_remove_cb_entry opal_libevent2013_evbuffer_remove_cb_entry
#define evbuffer_reserve_space opal_libevent2013_evbuffer_reserve_space
#define evbuffer_search opal_libevent2013_evbuffer_search
#define evbuffer_search_eol opal_libevent2013_evbuffer_search_eol
#define evbuffer_search_range opal_libevent2013_evbuffer_search_range
#define evbuffer_set_parent opal_libevent2013_evbuffer_set_parent
#define evbuffer_setcb opal_libevent2013_evbuffer_setcb
#define evbuffer_unfreeze opal_libevent2013_evbuffer_unfreeze
#define evbuffer_unlock opal_libevent2013_evbuffer_unlock
#define evbuffer_write opal_libevent2013_evbuffer_write
#define evbuffer_write_atmost opal_libevent2013_evbuffer_write_atmost
#define _bufferevent_add_event opal_libevent2013__bufferevent_add_event
#define _bufferevent_decref_and_unlock opal_libevent2013__bufferevent_decref_and_unlock
#define _bufferevent_del_generic_timeout_cbs opal_libevent2013__bufferevent_del_generic_timeout_cbs
#define _bufferevent_generic_adj_timeouts opal_libevent2013__bufferevent_generic_adj_timeouts
#define _bufferevent_incref_and_lock opal_libevent2013__bufferevent_incref_and_lock
#define _bufferevent_init_generic_timeout_cbs opal_libevent2013__bufferevent_init_generic_timeout_cbs
#define _bufferevent_run_eventcb opal_libevent2013__bufferevent_run_eventcb
#define _bufferevent_run_readcb opal_libevent2013__bufferevent_run_readcb
#define _bufferevent_run_writecb opal_libevent2013__bufferevent_run_writecb
#define bufferevent_decref opal_libevent2013_bufferevent_decref
#define bufferevent_disable opal_libevent2013_bufferevent_disable
#define bufferevent_enable opal_libevent2013_bufferevent_enable
#define bufferevent_enable_locking opal_libevent2013_bufferevent_enable_locking
#define bufferevent_flush opal_libevent2013_bufferevent_flush
#define bufferevent_free opal_libevent2013_bufferevent_free
#define bufferevent_get_enabled opal_libevent2013_bufferevent_get_enabled
#define bufferevent_get_input opal_libevent2013_bufferevent_get_input
#define bufferevent_get_output opal_libevent2013_bufferevent_get_output
#define bufferevent_get_underlying opal_libevent2013_bufferevent_get_underlying
#define bufferevent_getfd opal_libevent2013_bufferevent_getfd
#define bufferevent_incref opal_libevent2013_bufferevent_incref
#define bufferevent_init_common opal_libevent2013_bufferevent_init_common
#define bufferevent_lock opal_libevent2013_bufferevent_lock
#define bufferevent_read opal_libevent2013_bufferevent_read
#define bufferevent_read_buffer opal_libevent2013_bufferevent_read_buffer
#define bufferevent_set_timeouts opal_libevent2013_bufferevent_set_timeouts
#define bufferevent_setcb opal_libevent2013_bufferevent_setcb
#define bufferevent_setfd opal_libevent2013_bufferevent_setfd
#define bufferevent_settimeout opal_libevent2013_bufferevent_settimeout
#define bufferevent_setwatermark opal_libevent2013_bufferevent_setwatermark
#define bufferevent_suspend_read opal_libevent2013_bufferevent_suspend_read
#define bufferevent_suspend_write opal_libevent2013_bufferevent_suspend_write
#define bufferevent_unlock opal_libevent2013_bufferevent_unlock
#define bufferevent_unsuspend_read opal_libevent2013_bufferevent_unsuspend_read
#define bufferevent_unsuspend_write opal_libevent2013_bufferevent_unsuspend_write
#define bufferevent_write opal_libevent2013_bufferevent_write
#define bufferevent_write_buffer opal_libevent2013_bufferevent_write_buffer
#define bufferevent_filter_new opal_libevent2013_bufferevent_filter_new
#define bufferevent_get_openssl_error opal_libevent2013_bufferevent_get_openssl_error
#define bufferevent_openssl_filter_new opal_libevent2013_bufferevent_openssl_filter_new
#define bufferevent_openssl_get_ssl opal_libevent2013_bufferevent_openssl_get_ssl
#define bufferevent_openssl_socket_new opal_libevent2013_bufferevent_openssl_socket_new
#define bufferevent_ssl_renegotiate opal_libevent2013_bufferevent_ssl_renegotiate
#define bufferevent_pair_get_partner opal_libevent2013_bufferevent_pair_get_partner
#define bufferevent_pair_new opal_libevent2013_bufferevent_pair_new
#define _bufferevent_decrement_read_buckets opal_libevent2013__bufferevent_decrement_read_buckets
#define _bufferevent_decrement_write_buckets opal_libevent2013__bufferevent_decrement_write_buckets
#define _bufferevent_get_read_max opal_libevent2013__bufferevent_get_read_max
#define _bufferevent_get_write_max opal_libevent2013__bufferevent_get_write_max
#define bufferevent_add_to_rate_limit_group opal_libevent2013_bufferevent_add_to_rate_limit_group
#define bufferevent_decrement_read_limit opal_libevent2013_bufferevent_decrement_read_limit
#define bufferevent_decrement_write_limit opal_libevent2013_bufferevent_decrement_write_limit
#define bufferevent_get_max_to_read opal_libevent2013_bufferevent_get_max_to_read
#define bufferevent_get_max_to_write opal_libevent2013_bufferevent_get_max_to_write
#define bufferevent_get_read_limit opal_libevent2013_bufferevent_get_read_limit
#define bufferevent_get_write_limit opal_libevent2013_bufferevent_get_write_limit
#define bufferevent_rate_limit_group_decrement_read opal_libevent2013_bufferevent_rate_limit_group_decrement_read
#define bufferevent_rate_limit_group_decrement_write opal_libevent2013_bufferevent_rate_limit_group_decrement_write
#define bufferevent_rate_limit_group_free opal_libevent2013_bufferevent_rate_limit_group_free
#define bufferevent_rate_limit_group_get_read_limit opal_libevent2013_bufferevent_rate_limit_group_get_read_limit
#define bufferevent_rate_limit_group_get_totals opal_libevent2013_bufferevent_rate_limit_group_get_totals
#define bufferevent_rate_limit_group_get_write_limit opal_libevent2013_bufferevent_rate_limit_group_get_write_limit
#define bufferevent_rate_limit_group_new opal_libevent2013_bufferevent_rate_limit_group_new
#define bufferevent_rate_limit_group_reset_totals opal_libevent2013_bufferevent_rate_limit_group_reset_totals
#define bufferevent_rate_limit_group_set_cfg opal_libevent2013_bufferevent_rate_limit_group_set_cfg
#define bufferevent_rate_limit_group_set_min_share opal_libevent2013_bufferevent_rate_limit_group_set_min_share
#define bufferevent_remove_from_rate_limit_group opal_libevent2013_bufferevent_remove_from_rate_limit_group
#define bufferevent_remove_from_rate_limit_group_internal opal_libevent2013_bufferevent_remove_from_rate_limit_group_internal
#define bufferevent_set_rate_limit opal_libevent2013_bufferevent_set_rate_limit
#define bufferevent_base_set opal_libevent2013_bufferevent_base_set
#define bufferevent_new opal_libevent2013_bufferevent_new
#define bufferevent_priority_set opal_libevent2013_bufferevent_priority_set
#define bufferevent_socket_connect opal_libevent2013_bufferevent_socket_connect
#define bufferevent_socket_connect_hostname opal_libevent2013_bufferevent_socket_connect_hostname
#define bufferevent_socket_get_dns_error opal_libevent2013_bufferevent_socket_get_dns_error
#define bufferevent_socket_new opal_libevent2013_bufferevent_socket_new
/* tokens */
#define ev_token_bucket_cfg_free opal_libevent2012_ev_token_bucket_cfg_free
#define ev_token_bucket_cfg_new opal_libevent2012_ev_token_bucket_cfg_new
#define ev_token_bucket_get_tick opal_libevent2012_ev_token_bucket_get_tick
#define ev_token_bucket_init opal_libevent2012_ev_token_bucket_init
#define ev_token_bucket_update opal_libevent2012_ev_token_bucket_update
#define ev_token_bucket_cfg_free opal_libevent2013_ev_token_bucket_cfg_free
#define ev_token_bucket_cfg_new opal_libevent2013_ev_token_bucket_cfg_new
#define ev_token_bucket_get_tick opal_libevent2013_ev_token_bucket_get_tick
#define ev_token_bucket_init opal_libevent2013_ev_token_bucket_init
#define ev_token_bucket_update opal_libevent2013_ev_token_bucket_update
/* debug */
#define _event_debug_map_HT_REP_IS_BAD opal_libevent2012__event_debug_map_HT_REP_IS_BAD
#define event_debug_map_HT_CLEAR opal_libevent2012_event_debug_map_HT_CLEAR
#define event_debug_map_HT_GROW opal_libevent2012_event_debug_map_HT_GROW
#define event_debug_unassign opal_libevent2012_event_debug_unassign
#define _event_debugx opal_libevent2012__event_debugx
#define _event_debug_map_HT_REP_IS_BAD opal_libevent2013__event_debug_map_HT_REP_IS_BAD
#define event_debug_map_HT_CLEAR opal_libevent2013_event_debug_map_HT_CLEAR
#define event_debug_map_HT_GROW opal_libevent2013_event_debug_map_HT_GROW
#define event_debug_unassign opal_libevent2013_event_debug_unassign
#define _event_debugx opal_libevent2013__event_debugx
/* event.c */
#define event_active opal_libevent2012_event_active
#define event_active_nolock opal_libevent2012_event_active_nolock
#define event_add opal_libevent2012_event_add
#define event_assign opal_libevent2012_event_assign
#define event_base_add_virtual opal_libevent2012_event_base_add_virtual
#define event_base_del_virtual opal_libevent2012_event_base_del_virtual
#define event_base_dispatch opal_libevent2012_event_base_dispatch
#define event_base_dump_events opal_libevent2012_event_base_dump_events
#define event_base_free opal_libevent2012_event_base_free
#define event_base_get_deferred_cb_queue opal_libevent2012_event_base_get_deferred_cb_queue
#define event_base_get_features opal_libevent2012_event_base_get_features
#define event_base_get_method opal_libevent2012_event_base_get_method
#define event_base_gettimeofday_cached opal_libevent2012_event_base_gettimeofday_cached
#define event_base_got_break opal_libevent2012_event_base_got_break
#define event_base_got_exit opal_libevent2012_event_base_got_exit
#define event_base_init_common_timeout opal_libevent2012_event_base_init_common_timeout
#define event_base_loop opal_libevent2012_event_base_loop
#define event_base_loopbreak opal_libevent2012_event_base_loopbreak
#define event_base_loopexit opal_libevent2012_event_base_loopexit
#define event_base_new opal_libevent2012_event_base_new
#define event_base_new_with_config opal_libevent2012_event_base_new_with_config
#define event_base_once opal_libevent2012_event_base_once
#define event_base_priority_init opal_libevent2012_event_base_priority_init
#define event_base_set opal_libevent2012_event_base_set
#define event_base_start_iocp opal_libevent2012_event_base_start_iocp
#define event_base_stop_iocp opal_libevent2012_event_base_stop_iocp
#define event_config_avoid_method opal_libevent2012_event_config_avoid_method
#define event_config_free opal_libevent2012_event_config_free
#define event_config_new opal_libevent2012_event_config_new
#define event_config_require_features opal_libevent2012_event_config_require_features
#define event_config_set_flag opal_libevent2012_event_config_set_flag
#define event_config_set_num_cpus_hint opal_libevent2012_event_config_set_num_cpus_hint
#define event_deferred_cb_cancel opal_libevent2012_event_deferred_cb_cancel
#define event_deferred_cb_init opal_libevent2012_event_deferred_cb_init
#define event_deferred_cb_queue_init opal_libevent2012_event_deferred_cb_queue_init
#define event_deferred_cb_schedule opal_libevent2012_event_deferred_cb_schedule
#define event_del opal_libevent2012_event_del
#define event_dispatch opal_libevent2012_event_dispatch
#define event_enable_debug_mode opal_libevent2012_event_enable_debug_mode
#define event_free opal_libevent2012_event_free
#define event_get_assignment opal_libevent2012_event_get_assignment
#define event_get_base opal_libevent2012_event_get_base
#define event_get_callback opal_libevent2012_event_get_callback
#define event_get_callback_arg opal_libevent2012_event_get_callback_arg
#define event_get_events opal_libevent2012_event_get_events
#define event_get_fd opal_libevent2012_event_get_fd
#define event_get_method opal_libevent2012_event_get_method
#define event_get_struct_event_size opal_libevent2012_event_get_struct_event_size
#define event_get_supported_methods opal_libevent2012_event_get_supported_methods
#define event_get_version opal_libevent2012_event_get_version
#define event_get_version_number opal_libevent2012_event_get_version_number
#define event_init opal_libevent2012_event_init
#define _event_initialized opal_libevent2012__event_initialized
#define event_loop opal_libevent2012_event_loop
#define event_loopbreak opal_libevent2012_event_loopbreak
#define event_loopexit opal_libevent2012_event_loopexit
#define event_mm_calloc_ opal_libevent2012_event_mm_calloc_
#define event_mm_free_ opal_libevent2012_event_mm_free_
#define event_mm_malloc_ opal_libevent2012_event_mm_malloc_
#define event_mm_realloc_ opal_libevent2012_event_mm_realloc_
#define event_mm_strdup_ opal_libevent2012_event_mm_strdup_
#define event_new opal_libevent2012_event_new
#define event_once opal_libevent2012_event_once
#define event_pending opal_libevent2012_event_pending
#define event_priority_init opal_libevent2012_event_priority_init
#define event_priority_set opal_libevent2012_event_priority_set
#define event_reinit opal_libevent2012_event_reinit
#define event_set opal_libevent2012_event_set
#define event_set_debug_output opal_libevent2012_event_set_debug_output
#define event_set_mem_functions opal_libevent2012_event_set_mem_functions
#define event_changelist_add opal_libevent2012_event_changelist_add
#define event_changelist_del opal_libevent2012_event_changelist_del
#define event_changelist_freemem opal_libevent2012_event_changelist_freemem
#define event_changelist_init opal_libevent2012_event_changelist_init
#define event_changelist_remove_all opal_libevent2012_event_changelist_remove_all
#define event_err opal_libevent2012_event_err
#define event_errx opal_libevent2012_event_errx
#define event_msgx opal_libevent2012_event_msgx
#define event_set_fatal_callback opal_libevent2012_event_set_fatal_callback
#define event_set_log_callback opal_libevent2012_event_set_log_callback
#define event_sock_err opal_libevent2012_event_sock_err
#define event_sock_warn opal_libevent2012_event_sock_warn
#define event_warn opal_libevent2012_event_warn
#define event_warnx opal_libevent2012_event_warnx
#define event_active opal_libevent2013_event_active
#define event_active_nolock opal_libevent2013_event_active_nolock
#define event_add opal_libevent2013_event_add
#define event_assign opal_libevent2013_event_assign
#define event_base_add_virtual opal_libevent2013_event_base_add_virtual
#define event_base_del_virtual opal_libevent2013_event_base_del_virtual
#define event_base_dispatch opal_libevent2013_event_base_dispatch
#define event_base_dump_events opal_libevent2013_event_base_dump_events
#define event_base_free opal_libevent2013_event_base_free
#define event_base_get_deferred_cb_queue opal_libevent2013_event_base_get_deferred_cb_queue
#define event_base_get_features opal_libevent2013_event_base_get_features
#define event_base_get_method opal_libevent2013_event_base_get_method
#define event_base_gettimeofday_cached opal_libevent2013_event_base_gettimeofday_cached
#define event_base_got_break opal_libevent2013_event_base_got_break
#define event_base_got_exit opal_libevent2013_event_base_got_exit
#define event_base_init_common_timeout opal_libevent2013_event_base_init_common_timeout
#define event_base_loop opal_libevent2013_event_base_loop
#define event_base_loopbreak opal_libevent2013_event_base_loopbreak
#define event_base_loopexit opal_libevent2013_event_base_loopexit
#define event_base_new opal_libevent2013_event_base_new
#define event_base_new_with_config opal_libevent2013_event_base_new_with_config
#define event_base_once opal_libevent2013_event_base_once
#define event_base_priority_init opal_libevent2013_event_base_priority_init
#define event_base_set opal_libevent2013_event_base_set
#define event_base_start_iocp opal_libevent2013_event_base_start_iocp
#define event_base_stop_iocp opal_libevent2013_event_base_stop_iocp
#define event_config_avoid_method opal_libevent2013_event_config_avoid_method
#define event_config_free opal_libevent2013_event_config_free
#define event_config_new opal_libevent2013_event_config_new
#define event_config_require_features opal_libevent2013_event_config_require_features
#define event_config_set_flag opal_libevent2013_event_config_set_flag
#define event_config_set_num_cpus_hint opal_libevent2013_event_config_set_num_cpus_hint
#define event_deferred_cb_cancel opal_libevent2013_event_deferred_cb_cancel
#define event_deferred_cb_init opal_libevent2013_event_deferred_cb_init
#define event_deferred_cb_queue_init opal_libevent2013_event_deferred_cb_queue_init
#define event_deferred_cb_schedule opal_libevent2013_event_deferred_cb_schedule
#define event_del opal_libevent2013_event_del
#define event_dispatch opal_libevent2013_event_dispatch
#define event_enable_debug_mode opal_libevent2013_event_enable_debug_mode
#define event_free opal_libevent2013_event_free
#define event_get_assignment opal_libevent2013_event_get_assignment
#define event_get_base opal_libevent2013_event_get_base
#define event_get_callback opal_libevent2013_event_get_callback
#define event_get_callback_arg opal_libevent2013_event_get_callback_arg
#define event_get_events opal_libevent2013_event_get_events
#define event_get_fd opal_libevent2013_event_get_fd
#define event_get_method opal_libevent2013_event_get_method
#define event_get_struct_event_size opal_libevent2013_event_get_struct_event_size
#define event_get_supported_methods opal_libevent2013_event_get_supported_methods
#define event_get_version opal_libevent2013_event_get_version
#define event_get_version_number opal_libevent2013_event_get_version_number
#define event_init opal_libevent2013_event_init
#define _event_initialized opal_libevent2013__event_initialized
#define event_loop opal_libevent2013_event_loop
#define event_loopbreak opal_libevent2013_event_loopbreak
#define event_loopexit opal_libevent2013_event_loopexit
#define event_mm_calloc_ opal_libevent2013_event_mm_calloc_
#define event_mm_free_ opal_libevent2013_event_mm_free_
#define event_mm_malloc_ opal_libevent2013_event_mm_malloc_
#define event_mm_realloc_ opal_libevent2013_event_mm_realloc_
#define event_mm_strdup_ opal_libevent2013_event_mm_strdup_
#define event_new opal_libevent2013_event_new
#define event_once opal_libevent2013_event_once
#define event_pending opal_libevent2013_event_pending
#define event_priority_init opal_libevent2013_event_priority_init
#define event_priority_set opal_libevent2013_event_priority_set
#define event_reinit opal_libevent2013_event_reinit
#define event_set opal_libevent2013_event_set
#define event_set_debug_output opal_libevent2013_event_set_debug_output
#define event_set_mem_functions opal_libevent2013_event_set_mem_functions
#define event_changelist_add opal_libevent2013_event_changelist_add
#define event_changelist_del opal_libevent2013_event_changelist_del
#define event_changelist_freemem opal_libevent2013_event_changelist_freemem
#define event_changelist_init opal_libevent2013_event_changelist_init
#define event_changelist_remove_all opal_libevent2013_event_changelist_remove_all
#define event_err opal_libevent2013_event_err
#define event_errx opal_libevent2013_event_errx
#define event_msgx opal_libevent2013_event_msgx
#define event_set_fatal_callback opal_libevent2013_event_set_fatal_callback
#define event_set_log_callback opal_libevent2013_event_set_log_callback
#define event_sock_err opal_libevent2013_event_sock_err
#define event_sock_warn opal_libevent2013_event_sock_warn
#define event_warn opal_libevent2013_event_warn
#define event_warnx opal_libevent2013_event_warnx
/* evutil.c*/
#define EVUTIL_ISALNUM opal_libevent2012_EVUTIL_ISALNUM
#define EVUTIL_ISALPHA opal_libevent2012_EVUTIL_ISALPHA
#define EVUTIL_ISDIGIT opal_libevent2012_EVUTIL_ISDIGIT
#define EVUTIL_ISLOWER opal_libevent2012_EVUTIL_ISLOWER
#define EVUTIL_ISPRINT opal_libevent2012_EVUTIL_ISPRINT
#define EVUTIL_ISSPACE opal_libevent2012_EVUTIL_ISSPACE
#define EVUTIL_ISUPPER opal_libevent2012_EVUTIL_ISUPPER
#define EVUTIL_ISXDIGIT opal_libevent2012_EVUTIL_ISXDIGIT
#define EVUTIL_TOLOWER opal_libevent2012_EVUTIL_TOLOWER
#define EVUTIL_TOUPPER opal_libevent2012_EVUTIL_TOUPPER
#define _evutil_weakrand opal_libevent2012__evutil_weakrand
#define evutil_addrinfo_append opal_libevent2012_evutil_addrinfo_append
#define evutil_adjust_hints_for_addrconfig opal_libevent2012_evutil_adjust_hints_for_addrconfig
#define evutil_ascii_strcasecmp opal_libevent2012_evutil_ascii_strcasecmp
#define evutil_ascii_strncasecmp opal_libevent2012_evutil_ascii_strncasecmp
#define evutil_closesocket opal_libevent2012_evutil_closesocket
#define evutil_ersatz_socketpair opal_libevent2012_evutil_ersatz_socketpair
#define evutil_format_sockaddr_port opal_libevent2012_evutil_format_sockaddr_port
#define evutil_freeaddrinfo opal_libevent2012_evutil_freeaddrinfo
#define evutil_gai_strerror opal_libevent2012_evutil_gai_strerror
#define evutil_getaddrinfo opal_libevent2012_evutil_getaddrinfo
#define evutil_getaddrinfo_async opal_libevent2012_evutil_getaddrinfo_async
#define evutil_getaddrinfo_common opal_libevent2012_evutil_getaddrinfo_common
#define evutil_getenv opal_libevent2012_evutil_getenv
#define evutil_hex_char_to_int opal_libevent2012_evutil_hex_char_to_int
#define evutil_inet_ntop opal_libevent2012_evutil_inet_ntop
#define evutil_inet_pton opal_libevent2012_evutil_inet_pton
#define evutil_make_listen_socket_reuseable opal_libevent2012_evutil_make_listen_socket_reuseable
#define evutil_make_socket_closeonexec opal_libevent2012_evutil_make_socket_closeonexec
#define evutil_make_socket_nonblocking opal_libevent2012_evutil_make_socket_nonblocking
#define evutil_new_addrinfo opal_libevent2012_evutil_new_addrinfo
#define evutil_parse_sockaddr_port opal_libevent2012_evutil_parse_sockaddr_port
#define evutil_read_file opal_libevent2012_evutil_read_file
#define evutil_set_evdns_getaddrinfo_fn opal_libevent2012_evutil_set_evdns_getaddrinfo_fn
#define evutil_snprintf opal_libevent2012_evutil_snprintf
#define evutil_sockaddr_cmp opal_libevent2012_evutil_sockaddr_cmp
#define evutil_sockaddr_is_loopback opal_libevent2012_evutil_sockaddr_is_loopback
#define evutil_socket_connect opal_libevent2012_evutil_socket_connect
#define evutil_socket_finished_connecting opal_libevent2012_evutil_socket_finished_connecting
#define evutil_socketpair opal_libevent2012_evutil_socketpair
#define evutil_strtoll opal_libevent2012_evutil_strtoll
#define evutil_tv_to_msec opal_libevent2012_evutil_tv_to_msec
#define evutil_vsnprintf opal_libevent2012_evutil_vsnprintf
#define evutil_secure_rng_add_bytes opal_libevent2012_evutil_secure_rng_add_bytes
#define evutil_secure_rng_get_bytes opal_libevent2012_evutil_secure_rng_get_bytes
#define evutil_secure_rng_init opal_libevent2012_evutil_secure_rng_init
#define EVUTIL_ISALNUM opal_libevent2013_EVUTIL_ISALNUM
#define EVUTIL_ISALPHA opal_libevent2013_EVUTIL_ISALPHA
#define EVUTIL_ISDIGIT opal_libevent2013_EVUTIL_ISDIGIT
#define EVUTIL_ISLOWER opal_libevent2013_EVUTIL_ISLOWER
#define EVUTIL_ISPRINT opal_libevent2013_EVUTIL_ISPRINT
#define EVUTIL_ISSPACE opal_libevent2013_EVUTIL_ISSPACE
#define EVUTIL_ISUPPER opal_libevent2013_EVUTIL_ISUPPER
#define EVUTIL_ISXDIGIT opal_libevent2013_EVUTIL_ISXDIGIT
#define EVUTIL_TOLOWER opal_libevent2013_EVUTIL_TOLOWER
#define EVUTIL_TOUPPER opal_libevent2013_EVUTIL_TOUPPER
#define _evutil_weakrand opal_libevent2013__evutil_weakrand
#define evutil_addrinfo_append opal_libevent2013_evutil_addrinfo_append
#define evutil_adjust_hints_for_addrconfig opal_libevent2013_evutil_adjust_hints_for_addrconfig
#define evutil_ascii_strcasecmp opal_libevent2013_evutil_ascii_strcasecmp
#define evutil_ascii_strncasecmp opal_libevent2013_evutil_ascii_strncasecmp
#define evutil_closesocket opal_libevent2013_evutil_closesocket
#define evutil_ersatz_socketpair opal_libevent2013_evutil_ersatz_socketpair
#define evutil_format_sockaddr_port opal_libevent2013_evutil_format_sockaddr_port
#define evutil_freeaddrinfo opal_libevent2013_evutil_freeaddrinfo
#define evutil_gai_strerror opal_libevent2013_evutil_gai_strerror
#define evutil_getaddrinfo opal_libevent2013_evutil_getaddrinfo
#define evutil_getaddrinfo_async opal_libevent2013_evutil_getaddrinfo_async
#define evutil_getaddrinfo_common opal_libevent2013_evutil_getaddrinfo_common
#define evutil_getenv opal_libevent2013_evutil_getenv
#define evutil_hex_char_to_int opal_libevent2013_evutil_hex_char_to_int
#define evutil_inet_ntop opal_libevent2013_evutil_inet_ntop
#define evutil_inet_pton opal_libevent2013_evutil_inet_pton
#define evutil_make_listen_socket_reuseable opal_libevent2013_evutil_make_listen_socket_reuseable
#define evutil_make_socket_closeonexec opal_libevent2013_evutil_make_socket_closeonexec
#define evutil_make_socket_nonblocking opal_libevent2013_evutil_make_socket_nonblocking
#define evutil_new_addrinfo opal_libevent2013_evutil_new_addrinfo
#define evutil_parse_sockaddr_port opal_libevent2013_evutil_parse_sockaddr_port
#define evutil_read_file opal_libevent2013_evutil_read_file
#define evutil_set_evdns_getaddrinfo_fn opal_libevent2013_evutil_set_evdns_getaddrinfo_fn
#define evutil_snprintf opal_libevent2013_evutil_snprintf
#define evutil_sockaddr_cmp opal_libevent2013_evutil_sockaddr_cmp
#define evutil_sockaddr_is_loopback opal_libevent2013_evutil_sockaddr_is_loopback
#define evutil_socket_connect opal_libevent2013_evutil_socket_connect
#define evutil_socket_finished_connecting opal_libevent2013_evutil_socket_finished_connecting
#define evutil_socketpair opal_libevent2013_evutil_socketpair
#define evutil_strtoll opal_libevent2013_evutil_strtoll
#define evutil_tv_to_msec opal_libevent2013_evutil_tv_to_msec
#define evutil_vsnprintf opal_libevent2013_evutil_vsnprintf
#define evutil_secure_rng_add_bytes opal_libevent2013_evutil_secure_rng_add_bytes
#define evutil_secure_rng_get_bytes opal_libevent2013_evutil_secure_rng_get_bytes
#define evutil_secure_rng_init opal_libevent2013_evutil_secure_rng_init
/* threads */
#define evthread_make_base_notifiable opal_libevent2012_evthread_make_base_notifiable
#define _evthread_debug_get_real_lock opal_libevent2012__evthread_debug_get_real_lock
#define _evthread_is_debug_lock_held opal_libevent2012__evthread_is_debug_lock_held
#define evthread_enable_lock_debuging opal_libevent2012_evthread_enable_lock_debuging
#define evthread_set_condition_callbacks opal_libevent2012_evthread_set_condition_callbacks
#define evthread_set_id_callback opal_libevent2012_evthread_set_id_callback
#define evthread_set_lock_callbacks opal_libevent2012_evthread_set_lock_callbacks
#define evthread_use_pthreads opal_libevent2012_evthread_use_pthreads
#define evthread_make_base_notifiable opal_libevent2013_evthread_make_base_notifiable
#define _evthread_debug_get_real_lock opal_libevent2013__evthread_debug_get_real_lock
#define _evthread_is_debug_lock_held opal_libevent2013__evthread_is_debug_lock_held
#define evthread_enable_lock_debuging opal_libevent2013_evthread_enable_lock_debuging
#define evthread_set_condition_callbacks opal_libevent2013_evthread_set_condition_callbacks
#define evthread_set_id_callback opal_libevent2013_evthread_set_id_callback
#define evthread_set_lock_callbacks opal_libevent2013_evthread_set_lock_callbacks
#define evthread_use_pthreads opal_libevent2013_evthread_use_pthreads
/* tags */
#define evtag_consume opal_libevent2012_evtag_consume
#define evtag_decode_int opal_libevent2012_evtag_decode_int
#define evtag_decode_int64 opal_libevent2012_evtag_decode_int64
#define evtag_decode_tag opal_libevent2012_evtag_decode_tag
#define evtag_encode_int opal_libevent2012_evtag_encode_int
#define evtag_encode_int64 opal_libevent2012_evtag_encode_int64
#define evtag_encode_tag opal_libevent2012_evtag_encode_tag
#define evtag_init opal_libevent2012_evtag_init
#define evtag_marshal opal_libevent2012_evtag_marshal
#define evtag_marshal_buffer opal_libevent2012_evtag_marshal_buffer
#define evtag_marshal_int opal_libevent2012_evtag_marshal_int
#define evtag_marshal_int64 opal_libevent2012_evtag_marshal_int64
#define evtag_marshal_string opal_libevent2012_evtag_marshal_string
#define evtag_marshal_timeval opal_libevent2012_evtag_marshal_timeval
#define evtag_payload_length opal_libevent2012_evtag_payload_length
#define evtag_peek opal_libevent2012_evtag_peek
#define evtag_peek_length opal_libevent2012_evtag_peek_length
#define evtag_unmarshal opal_libevent2012_evtag_unmarshal
#define evtag_unmarshal_fixed opal_libevent2012_evtag_unmarshal_fixed
#define evtag_unmarshal_header opal_libevent2012_evtag_unmarshal_header
#define evtag_unmarshal_int opal_libevent2012_evtag_unmarshal_int
#define evtag_unmarshal_int64 opal_libevent2012_evtag_unmarshal_int64
#define evtag_unmarshal_string opal_libevent2012_evtag_unmarshal_string
#define evtag_unmarshal_timeval opal_libevent2012_evtag_unmarshal_timeval
#define evtag_consume opal_libevent2013_evtag_consume
#define evtag_decode_int opal_libevent2013_evtag_decode_int
#define evtag_decode_int64 opal_libevent2013_evtag_decode_int64
#define evtag_decode_tag opal_libevent2013_evtag_decode_tag
#define evtag_encode_int opal_libevent2013_evtag_encode_int
#define evtag_encode_int64 opal_libevent2013_evtag_encode_int64
#define evtag_encode_tag opal_libevent2013_evtag_encode_tag
#define evtag_init opal_libevent2013_evtag_init
#define evtag_marshal opal_libevent2013_evtag_marshal
#define evtag_marshal_buffer opal_libevent2013_evtag_marshal_buffer
#define evtag_marshal_int opal_libevent2013_evtag_marshal_int
#define evtag_marshal_int64 opal_libevent2013_evtag_marshal_int64
#define evtag_marshal_string opal_libevent2013_evtag_marshal_string
#define evtag_marshal_timeval opal_libevent2013_evtag_marshal_timeval
#define evtag_payload_length opal_libevent2013_evtag_payload_length
#define evtag_peek opal_libevent2013_evtag_peek
#define evtag_peek_length opal_libevent2013_evtag_peek_length
#define evtag_unmarshal opal_libevent2013_evtag_unmarshal
#define evtag_unmarshal_fixed opal_libevent2013_evtag_unmarshal_fixed
#define evtag_unmarshal_header opal_libevent2013_evtag_unmarshal_header
#define evtag_unmarshal_int opal_libevent2013_evtag_unmarshal_int
#define evtag_unmarshal_int64 opal_libevent2013_evtag_unmarshal_int64
#define evtag_unmarshal_string opal_libevent2013_evtag_unmarshal_string
#define evtag_unmarshal_timeval opal_libevent2013_evtag_unmarshal_timeval
/* map */
#define evmap_io_active opal_libevent2012_evmap_io_active
#define evmap_io_add opal_libevent2012_evmap_io_add
#define evmap_io_clear opal_libevent2012_evmap_io_clear
#define evmap_io_del opal_libevent2012_evmap_io_del
#define evmap_io_get_fdinfo opal_libevent2012_evmap_io_get_fdinfo
#define evmap_io_initmap opal_libevent2012_evmap_io_initmap
#define evmap_signal_active opal_libevent2012_evmap_signal_active
#define evmap_signal_add opal_libevent2012_evmap_signal_add
#define evmap_signal_clear opal_libevent2012_evmap_signal_clear
#define evmap_signal_del opal_libevent2012_evmap_signal_del
#define evmap_signal_initmap opal_libevent2012_evmap_signal_initmap
#define evmap_io_active opal_libevent2013_evmap_io_active
#define evmap_io_add opal_libevent2013_evmap_io_add
#define evmap_io_clear opal_libevent2013_evmap_io_clear
#define evmap_io_del opal_libevent2013_evmap_io_del
#define evmap_io_get_fdinfo opal_libevent2013_evmap_io_get_fdinfo
#define evmap_io_initmap opal_libevent2013_evmap_io_initmap
#define evmap_signal_active opal_libevent2013_evmap_signal_active
#define evmap_signal_add opal_libevent2013_evmap_signal_add
#define evmap_signal_clear opal_libevent2013_evmap_signal_clear
#define evmap_signal_del opal_libevent2013_evmap_signal_del
#define evmap_signal_initmap opal_libevent2013_evmap_signal_initmap
/* connections */
#define evconnlistener_disable opal_libevent2012_evconnlistener_disable
#define evconnlistener_enable opal_libevent2012_evconnlistener_enable
#define evconnlistener_free opal_libevent2012_evconnlistener_free
#define evconnlistener_get_base opal_libevent2012_evconnlistener_get_base
#define evconnlistener_get_fd opal_libevent2012_evconnlistener_get_fd
#define evconnlistener_new opal_libevent2012_evconnlistener_new
#define evconnlistener_new_bind opal_libevent2012_evconnlistener_new_bind
#define evconnlistener_disable opal_libevent2013_evconnlistener_disable
#define evconnlistener_enable opal_libevent2013_evconnlistener_enable
#define evconnlistener_free opal_libevent2013_evconnlistener_free
#define evconnlistener_get_base opal_libevent2013_evconnlistener_get_base
#define evconnlistener_get_fd opal_libevent2013_evconnlistener_get_fd
#define evconnlistener_new opal_libevent2013_evconnlistener_new
#define evconnlistener_new_bind opal_libevent2013_evconnlistener_new_bind
/* signal */
#define _evsig_restore_handler opal_libevent2012__evsig_restore_handler
#define _evsig_set_handler opal_libevent2012__evsig_set_handler
#define evsig_dealloc opal_libevent2012_evsig_dealloc
#define evsig_init opal_libevent2012_evsig_init
#define evsig_process opal_libevent2012_evsig_process
#define _evsig_restore_handler opal_libevent2013__evsig_restore_handler
#define _evsig_set_handler opal_libevent2013__evsig_set_handler
#define evsig_dealloc opal_libevent2013_evsig_dealloc
#define evsig_init opal_libevent2013_evsig_init
#define evsig_process opal_libevent2013_evsig_process
#ifdef __cplusplus
}

Просмотреть файл

@ -1,13 +0,0 @@
#
# Copyright (c) 2010 High Performance Computing Center Stuttgart,
# University of Stuttgart. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
not_single_shared_lib=1
in_use=1
required_check=opal_event_config

Просмотреть файл

@ -1,85 +0,0 @@
#
# Copyright (c) 2010 Cisco Systems, Inc. All rights reserved.
# Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
EXTRA_DIST = .windows
AM_CPPFLAGS = -I$(srcdir)/libevent -I$(srcdir)/libevent/include -I$(builddir)/libevent/include -I$(srcdir)/libevent/compat
SUBDIRS = libevent
headers = libevent207.h
sources = \
libevent207_component.c \
libevent207_module.c
# Conditionally install the header files
if WANT_INSTALL_HEADERS
headers += libevent/opal_rename.h libevent/event.h libevent/evutil.h libevent/util-internal.h \
libevent/mm-internal.h libevent/ipv6-internal.h \
libevent/strlcpy-internal.h libevent/evbuffer-internal.h \
libevent/bufferevent-internal.h libevent/event-internal.h \
libevent/evthread-internal.h libevent/defer-internal.h \
libevent/minheap-internal.h libevent/log-internal.h \
libevent/evsignal-internal.h libevent/evmap-internal.h \
libevent/changelist-internal.h libevent/iocp-internal.h \
libevent/ratelim-internal.h \
libevent/WIN32-Code/event2/event-config.h \
libevent/WIN32-Code/tree.h \
libevent/compat/sys/queue.h \
libevent/evhttp.h libevent/http-internal.h libevent/ht-internal.h \
libevent/evrpc.h libevent/evrpc-internal.h \
libevent/evdns.h libevent/include/event2/buffer_compat.h \
libevent/include/event2/buffer.h \
libevent/include/event2/bufferevent_compat.h \
libevent/include/event2/bufferevent_ssl.h \
libevent/include/event2/bufferevent_struct.h \
libevent/include/event2/bufferevent.h \
libevent/include/event2/dns_compat.h \
libevent/include/event2/dns_struct.h \
libevent/include/event2/event_compat.h \
libevent/include/event2/event_struct.h \
libevent/include/event2/event.h \
libevent/include/event2/http_compat.h \
libevent/include/event2/http_struct.h \
libevent/include/event2/http.h \
libevent/include/event2/keyvalq_struct.h \
libevent/include/event2/listener.h \
libevent/include/event2/rpc_compat.h \
libevent/include/event2/rpc_struct.h \
libevent/include/event2/rpc.h \
libevent/include/event2/tag_compat.h \
libevent/include/event2/tag.h \
libevent/include/event2/thread.h \
libevent/include/event2/util.h
opaldir = $(includedir)/openmpi/$(subdir)
nobase_opal_HEADERS = $(headers)
nobase_nodist_opal_HEADERS = libevent/include/event2/event-config.h
endif
# Make the output library in this directory, and name it either
# mca_<type>_<name>.la (for DSO builds) or libmca_<type>_<name>.la
# (for static builds).
if MCA_BUILD_opal_event_libevent207_DSO
component_noinst =
component_install = mca_event_libevent207.la
else
component_noinst = libmca_event_libevent207.la
component_install =
endif
# We only ever build this component statically
noinst_LTLIBRARIES = $(component_noinst)
libmca_event_libevent207_la_SOURCES =$(sources)
libmca_event_libevent207_la_LDFLAGS = -module -avoid-version
libmca_event_libevent207_la_LIBADD = $(builddir)/libevent/libevent.la

Просмотреть файл

@ -1 +0,0 @@
libevent

Просмотреть файл

@ -1,189 +0,0 @@
# -*- shell-script -*-
#
# Copyright (c) 2009-2011 Cisco Systems, Inc. All rights reserved.
#
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
AC_DEFUN([MCA_opal_event_libevent207_PRIORITY], [60])
#
# Force this component to compile in static-only mode
#
AC_DEFUN([MCA_opal_event_libevent207_COMPILE_MODE], [
AC_MSG_CHECKING([for MCA component $2:$3 compile mode])
$4="static"
AC_MSG_RESULT([$$4])
])
# MCA_event_libevent207_CONFIG([action-if-can-compile],
# [action-if-cant-compile])
# ------------------------------------------------
AC_DEFUN([MCA_opal_event_libevent207_CONFIG],[
AC_CONFIG_FILES([opal/mca/event/libevent207/Makefile])
basedir="opal/mca/event/libevent207"
CFLAGS_save="$CFLAGS"
CFLAGS="$OMPI_CFLAGS_BEFORE_PICKY $OPAL_VISIBILITY_CFLAGS"
CPPFLAGS_save="$CPPFLAGS"
CPPFLAGS="-I$OMPI_TOP_SRCDIR -I$OMPI_TOP_BUILDDIR -I$OMPI_TOP_SRCDIR/opal/include $CPPFLAGS"
AC_MSG_CHECKING([libevent configuration args])
str=`event_args="--disable-dns --disable-http --disable-rpc --disable-openssl --enable-hidden-symbols --includedir=$includedir/openmpi/opal/event/libevent/include"`
eval $str
unset str
AC_ARG_ENABLE(event-rtsig,
AC_HELP_STRING([--enable-event-rtsig],
[enable support for real time signals (experimental)]))
if test "$enable_event_rtsig" = "yes"; then
event_args="$event_args --enable-rtsig"
fi
AC_ARG_ENABLE(event-select,
AC_HELP_STRING([--disable-event-select], [disable select support]))
if test "$enable_event_select" = "no"; then
event_args="$event_args --disable-select"
fi
AC_ARG_ENABLE(event-poll,
AC_HELP_STRING([--disable-event-poll], [disable poll support]))
if test "$enable_event_poll" = "no"; then
event_args="$event_args --disable-poll"
fi
AC_ARG_ENABLE(event-devpoll,
AC_HELP_STRING([--disable-event-devpoll], [disable devpoll support]))
if test "$enable_event_devpoll" = "no"; then
event_args="$event_args --disable-devpoll"
fi
AC_ARG_ENABLE(event-kqueue,
AC_HELP_STRING([--disable-event-kqueue], [disable kqueue support]))
if test "$enable_event_kqueue" = "no"; then
event_args="$event_args --disable-kqueue"
fi
AC_ARG_ENABLE(event-epoll,
AC_HELP_STRING([--disable-event-epoll], [disable epoll support]))
if test "$enable_event_epoll" = "no"; then
event_args="$event_args --disable-epoll"
fi
AC_ARG_ENABLE(event-evport,
AC_HELP_STRING([--enable-event-evport], [enable evport support]))
if test "$enable_event_evport" = "yes"; then
event_args="$event_args --enable-evport"
else
event_args="$event_args --disable-evport"
fi
AC_ARG_ENABLE(event-signal,
AC_HELP_STRING([--disable-event-signal], [disable signal support]))
if test "$enable_event_signal" = "no"; then
event_args="$event_args --disable-signal"
fi
AC_ARG_ENABLE(event-debug,
AC_HELP_STRING([--enable-event-debug], [enable event library debug output]))
if test "$enable_event_debug" = "no"; then
event_args="$event_args --disable-debug-mode"
fi
AC_ARG_ENABLE(event-thread-support,
AC_HELP_STRING([--enable-event-thread-support], [enable event library internal thread support]))
if test "$enable_event_thread_support" = "yes"; then
AC_DEFINE_UNQUOTED(OPAL_EVENT_HAVE_THREAD_SUPPORT, 1,
[Thread support was configured into the event library])
else
event_args="$event_args --disable-thread-support"
AC_DEFINE_UNQUOTED(OPAL_EVENT_HAVE_THREAD_SUPPORT, 0,
[Thread support was not configured into the event library])
fi
AC_MSG_RESULT([$event_args])
OMPI_CONFIG_SUBDIR([$basedir/libevent],
[$event_args $ompi_subdir_args],
[libevent_happy="yes"], [libevent_happy="no"])
if test "$libevent_happy" = "no"; then
AC_MSG_WARN([Event library failed to configure])
AC_MSG_ERROR([Cannot continue])
fi
CFLAGS="$CFLAGS_save"
CPPFLAGS="$CPPFLAGS_save"
# If we configured successfully, set OPAL_HAVE_WORKING_EVENTOPS to
# the value in the generated libevent/config.h (NOT
# libevent/include/event2/event-config.h!). Otherwise, set it to
# 0.
file=$basedir/libevent/config.h
AS_IF([test "$libevent_happy" = "yes" -a -r $file],
[OPAL_HAVE_WORKING_EVENTOPS=`grep HAVE_WORKING_EVENTOPS $file | awk '{print [$]3 }'`
# Build libevent/include/event2/event-config.h. If we
# don't do it here, then libevent's Makefile.am will build
# it during "make all", which is too late for us (because
# other things are built before the event framework that
# end up including event-config.h). The steps below were
# copied from libevent's Makefile.am.
AC_CONFIG_COMMANDS([opal/mca/event/libevent207/libevent/include/event2/event-config.h],
[basedir="opal/mca/event/libevent207"
file="$basedir/libevent/include/event2/event-config.h"
rm -f "$file.new"
cat > "$file.new" <<EOF
/* event2/event-config.h
*
* This file was generated by autoconf when libevent was built, and
* post- processed by Open MPI's component configure.m4 (so that
* Libevent wouldn't build it during "make all") so that its macros
* would have a uniform prefix.
*
* DO NOT EDIT THIS FILE.
*
* Do not rely on macros in this file existing in later versions
*/
#ifndef _EVENT2_EVENT_CONFIG_H_
#define _EVENT2_EVENT_CONFIG_H_
EOF
sed -e 's/#define /#define _EVENT_/' \
-e 's/#undef /#undef _EVENT_/' \
-e 's/#ifndef /#ifndef _EVENT_/' < "$basedir/libevent/config.h" >> "$file.new"
echo "#endif" >> "$file.new"
# Only make a new .h file if the
# contents haven't changed
diff -q $file "$file.new" > /dev/null 2> /dev/null
if test "$?" = "0"; then
echo $file is unchanged
else
cp "$file.new" $file
fi
rm -f "$file.new"])
# Must set this variable so that the framework m4 knows
# what file to include in opal/mca/event/event.h
opal_event_libevent207_include="libevent207/libevent207.h"
# Also pass some *_ADD_* flags upwards to the framework m4
# for various compile/link flags that are needed a) to
# build the rest of the source tree, and b) for the wrapper
# compilers (in the --with-devel-headers case).
file=$basedir/libevent
opal_event_libevent207_ADD_CPPFLAGS="-I$OMPI_TOP_SRCDIR/$file -I$OMPI_TOP_SRCDIR/$file/include"
AS_IF([test "$OMPI_TOP_BUILDDIR" != "$OMPI_TOP_SRCDIR"],
[opal_event_libevent207_ADD_CPPFLAGS="$opal_event_libevent207_ADD_CPPFLAGS -I$OMPI_TOP_BUILDDIR/$file/include"])
if test "$with_devel_headers" = "yes" ; then
opal_event_libevent207_ADD_WRAPPER_EXTRA_CPPFLAGS='-I${includedir}/openmpi/opal/mca/event/libevent207/libevent -I${includedir}/openmpi/opal/mca/event/libevent207/libevent/include'
fi
$1],
[$2
OPAL_HAVE_WORKING_EVENTOPS=0])
unset file
])

Просмотреть файл

@ -1,759 +0,0 @@
Changes in 2.0.7-rc:
[Autogenerated from the Git log, sorted and cleaned by hand.]
NEW APIS
o Expose a evdns_base_nameserver_sockaddr_add() function to add a nameserver by sockaddr (1952143)
o Add event_config_set_num_cpus_hint() for tuning win32 IOCP thread pools, etc. (2447fe8 Christopher Davis)
BUGFIXES
o Fix a nasty dangling-event bug when using rate-limiting groups (0bffe43)
o Clean up syntax on TAILQ_ENTRY() usage to build correctly with recent MSVC (60433a0 Gilad Benjamini)
o Make definition of WIN32_LEAN_AND_MEAN in event.h conditional (3920172 Gilad Benjamini)
o Correctly detect failure to delete bufferevent read-timeout event (da6e7cd)
o Set close-on-exec bit for filedescriptors created by dns subsystem (d0b8843)
o Fix kqueue correctness test on x84_64 (6123d12)
o Detect events with no ev_base; warn instead of crashing (f1074b7)
o Fix an issue with forking and signal socketpairs in select/poll backends (d61b2f3)
o Stop using global arrays to implement the EVUTIL_ctype functions (1fdec20)
o On windows, make lock/thread function tables static (5de2bcb)
o Close th_notify_fds and open a new pair on reinit (495ed66)
o Declare signal handler function as "__cdecl" on Windows (f0056d0)
o Use the _func() replacements for open, fstat, etc in evutil.c on win32 (e50c0fc)
o Only process up to MAX_DEFERRED deferred_cbs at a time (17a14f1 Christopher Davis)
THREADING BUGFIXES
o Avoid deadlock when activating signals (970e6ad)
o Add a condition variable backend, with implementations for pthreads and win32 (d4977b5)
o Use conditions instead of current_event_lock to fix a deadlock (e0972c2)
o Fix logic error in win32 TRY_LOCK that caused problems with rate-limiting (4c32b9d)
o Avoid needlessly calling evthread_notify_base() when the loop is not running (c7a06bf)
o Minimize calls to base_notify implementation functions, thereby avoiding needless syscalls (4632b78)
IOCP BUGFIXES
o IOCP-related evbuffer fixes (03afa20 Christopher Davis)
o Stop IOCP when freeing the event_base (d844242 Christopher Davis)
o Some IOCP bufferevent tweaks (76f7e7a Christopher Davis)
TESTS
o Make the regress_pthread.c tests work on windows with current test APIs (d74ae38)
o Add a unit test for conditions (5fb1095)
o Allow more than one copy of regression tests to run at once (a97320a)
o Fix event_del(0) instance in bench.c (b0f284c Shuo Chen)
o Fix a few memory leaks in the tests (1115366)
o IOCP-related unit test tweaks (499452f Christopher Davis)
o Improve testing of when thread-notification occurs (ce85280)
BUILD AND DISTRIBUTION
o Add pkgconfig files for libevent_{openssl,pthreads} (ebcb1f0)
o Change include order in Makefile.nmake (4022b28)
o Make include/event2/event-config.h not included in source dist (a4af9be)
o Honor NDEBUG; build without warnings with NDEBUG; make NDEBUG always-off in unit test code (743f866)
o Declare evkeyvalq and event_list even if event_struct.h comes before sys/queue.h (d3ceca8)
o Move evkeyvalq into a separate header for evhttp_parse_query users (ca9048f)
o Prefer autoreconf -ivf to manual autogen.sh (7ea8e89)
CLEANUP
o Completely remove the (mostly-removed) obsolete thread functions (3808168)
o Rename regress_pthread.c to regress_thread.c (041989f)
o Make defer-internal.h use lock macros, not direct calls to lock fns (5218d2a)
DOCUMENTATION
o Document that DNS_NO_SEARCH is an obsolete alias for DNS_QUERY_NO_SEARCH (33200e7)
o Update the whatsnew-2.0.txt document (4991669)
Changes in 2.0.6-rc:
[Autogenerated from the Git log, sorted by hand.]
DOCUMENTATION
o Document a change in the semantics of event_get_struct_event_size() (e21f5d1)
o Add a comment to describe our plan for library versioning (9659ece)
o Fix sentence fragment in docs for event_get_struct_event_size() (7b259b6)
NEW FEATURES AND INTERFACE CHANGES
o Remove the obsolete evthread interfaces (c5bab56)
o Let evhttp_send_error infer the right error reasons (3990669)
o Add a function to retrieve the other side of a bufferevent pair (17a8e2d)
o Add bufferevent_lock()/bufferevent_unlock() (215e629)
o Stop asserting when asked for a (unsupported) TCP dns port. Just return NULL. (7e87a59)
o Replace (unused,always 0) is_tcp argument to evdns_add_server_port*() with flags (e1c1167)
o Constify a couple of arguments to evdns_server_request_add_*_reply (cc2379d)
o Add an interface to expose min_share in ratelimiting groups (6ae53d6)
BUGFIXES
o Avoid event_del on uninitialized event in event_base_free (6d19510)
o Add some missing includes to fix Linux build again (75701e8)
o Avoid close of uninitialized socket in evbuffer unit test (bda21e7)
o Correctly recognize .255 addresses as link-local when looking for interfaces (8c3452b)
o If no evdns request can be launched, return NULL, not a handle (b14f151)
o Use generic win32 interfaces, not ASCII-only ones, where possible. (899b0a3)
o Fix the default HTTP error template (06bd056 Felix Nawothnig)
o Close the file in evutil_read_file whether there's an error or not. (0798dd1 Pierre Phaneuf)
o Fix possible nullptr dereference in evhttp_send_reply_end() (29b2e23 Felix Nawothnig)
o never let bufferevent_rlim functions return negative (0859870)
o Make sample/hello_world work on windows (d89fdba)
o Fix a deadlock related to event-base notification. Diagnosed by Zhou Li, Avi Bab, and Scott Lamb. (17522d2)
o Possible fix to 100% cpu usage with epoll and openssl (cf249e7 Mike Smellie)
o Don't race when calling event_active/event_add on a running signal event (fc5e0a2)
o Suppress a spurious EPERM warning in epoll.c (e73cbde)
o Fix wrong size calculation of iovec buffers when exact=1 (65abdc2 niks)
o Change bufferevent_openssl::do_write so it doesn't call SSL_write with a 0 length buffer (c991317 Mike Smellie)
o Fixed compilation of sample/le-proxy.c on win32 (13b912e Trond Norbye)
o Fix rate-limit calculation on openssl bufferevents. (009f300)
o Remember to initialize timeout events for bufferevent_async (de1f5d6 Christopher Davis)
BUILD AND DISTRIBUTION CHANGES
o Test the unlocked-deferred callback case of bufferevents (dfb75ab)
o Remove the now-unusable EVTHREAD_LOCK/UNLOCK constants (fdfc3fc)
o Use -Wlogical-op on gcc 4.5 or higher (d14bb92)
o Add the libtool-generated /m4/* stuff to .gitignore (c21c663)
o Remove some automake-generated files from version control. (9b14911)
o Have autogen.sh pass --force-missing to automake (8a44062)
o Set library version for libevent_pthreads correctly (b2d7440)
o Really only add libevent_core.la to LIBADD on mingw (1425003 Sebastian Hahn)
o Build more cleanly with NetBSDs that dislike toupper(char) (42a8c71)
o Fix unit tests with -DUSE_DEBUG enabled (28f31a4)
o Fix evdns build with -DUNICODE (5fa30d2)
o Move event-config.h to include/event2 (ec347b9)
TESTING
o Add options to test-ratelim.c to check its results (2b44dcc)
o Make test-ratelim clean up after itself better. (b5bfc44)
o Remove the now-obsolete setup_test() and cleanup_test() functions (e73f1d7)
o Remove all non-error prints from test/regress.c (8bc1e3d)
o Make test.sh exit with nonzero status if tests fail (faf2a04)
o Have the unit tests report errors from test.sh (3689bd2)
o Fix logic in correcting high values from FIONREAD (3467f2f)
o Add test for behavior on remote socket close (44d57ee)
o Unit test for event_get_struct_event_size() (7510aac)
o Make test/test.sh call test-changelist (7c92691)
o Fix badly-behaved subtest of dns/bufferevent_connect_hostname (840a72f Joachim Bauch)
o Add option to test-ratelim to test min_share (42f6b62)
o Fix an assertion bug in test-ratelim (b2c6202)
o Make tests quieter on local dns resolver failure (e996b3d)
o Increase the tolerance in our unit tests for sloppy clocks. (170ffd2)
o Use AF_INET socketpair to test sendfile on Solaris (9b60209)
o Make test-changelist count cpu usage right on win32 (ea1ea3d)
INTERNALS, PERFORMANCE, AND CODE CLEANUPS
o Mark the event_err() functions as __attribute__((noreturn)) (33bbbed)
o Do not check that event_base is set in EVBASE_ACQUIRE_LOCK (218a3c3)
o Replace (safe) use of strcpy with memcpy to appease OpenBSD (caca2f4)
o Remove some dead assignments (47c5dfb)
o Fix a pedantic gcc 4.4 warning in event2/event.h (276e7ee)
o Drain th_notify_fd[0] more bytes at a time. (a5bc15b)
o Tidy up the code in evthread_make_base_notifiable a little (61e1eee)
o Pass flags to fcntl(F_SETFL) and fcntl(F_SETFD) as int, not long (7c2dea1)
o Remove unused variables in test/test-changelist.c (b00d4c0)
o Fix whitespace. (cb927a5)
o Improve error message for failed epoll to make debugging easier. (9e725f7)
o Turn our socketpair() replacement into its own function (57b30cd)
Changes in 2.0.5-beta:
[Autogenerated from the Git log, sorted by hand.]
DOCUMENTATION
o Update all our copyright notices to say "2010" (17efc1c)
o Add Christopher Clark and Maxim Yegorushkin to the LICENSE file (38b7b57)
o Clarify Christopher Clark's status as writer of original ht code. (78772c3)
o Try to comment some of the event code more (cdd4c49)
o Add a few more evmap/changelist comments (c247adc)
o Add a comment to explain why evdns_request is now separte from request (ceefbe8)
o Document evutil_secure_rng_init() and evutil_secure_rng_add_bytes() (a5bf43a)
o Stop distributing and installing manpages: they were too inaccurate (7731ec8)
NEW FEATURES AND INTERFACE CHANGES
o Remove signal_assign() and signal_new() macros. (2fac0f7)
o Make evdns use the regular logging system by default (b2f2be6)
o Allow evbuffer_read() to split across more than 2 iovecs (e470ad3)
o Functions to manipulate existing rate limiting groups. (ee41aca)
o Functions to track the total bytes sent over a rate limit group. (fb366c1)
o Detect and refuse reentrant event_base_loop() calls (b557b17)
o Limit the maximum number of events on each socket to 65535 (819f949)
o Add evbuffer_copyout to copy data from an evbuffer without draining (eb86c8c)
o Expose the request and reply members of rpc_req_generic() (07edf78 Shuo Chen)
o Add void* arguments to request_new and reply_new evrpc hooks (755fbf1 Shuo Chen)
o Seed the RNG using sysctl() as well as /dev/urandom (71fc3eb)
o Make evutil_secure_rng_init() work even with builtin arc4random (f980716)
o Report DNS error when lookup fails during bufferevent_socket_connect_hostname. (0ef4070 Christopher Davis)
o Release locks on bufferevents while executing callbacks (a5208fe Joachim Bauch) o Make debug mode catch mixed ET and non-ET events on an fd (cb67074)
o Catch attempts to enable debug_mode too late (9ecf0d4)
o Refuse null keys in evhttp_parse_query() (953e229 Frank Denis)
BUGFIXES
o Avoid a spurious close(-1) on Linux (70a44b6)
o Do not close(-1) when freeing an uninitialized socket bufferevent (b34abf3)
o Free evdns_base->req_heads on evdns_base_free (859af67)
o Avoid an (untriggerable so far) crash bug in bufferevent_free() (0cf1431)
o Set mem_offset for every bufferevent type (657d1b6)
o Fix infrequent memory leak in bufferevent_init_common(). (8398641 Jardel Weyrich)
o Make evutil_signal_active() match declaration. (e1e703d Patrick Galbraith)
o Fix minheap code to use replacement malloc functions (a527618)
o Fix a free(NULL) in minheap-internal.h (6f20492)
o Fix critical bug in evbuffer_write when writev is not available (cda56ab)
o Make the no_iovecs case of write_atmost compile (8e227b0)
o Fix a memory leak when appending/prepending to a buffer with unused space. (45068a3)
o Clean up a mistake in pointer manipulation in evbuffer_remove (28bfed4 Christopher Davis)
o Always round up when there's a fractional number of msecs. (8f9e60c Christopher Davis)
o Fix compiler warnings under WIN32 (d469c50 Giuseppe Scrivano)
o Clean up properly when adding a signal handler fails. (b84b598 Gilad Benjamini) o Ensure that evdns_request is a persistent handle. (15bb82d Christopher Davis)
o Free search state when finished searching to avoid an infinite loop. (a625840 Christopher Davis)
o Assert for valid requests as necessary. (67072f3 Christopher Davis)
o do not leak the request object on persistent connections (9d8edf2)
o Make evdns logging threadsafe (b1c7950)
o Fix a couple of bugs in the BSD sysctl arc4seed logic (a47a4b7)
o Remove one last bug in last_with_datap logic. Found with valgrind (d49b92a)
o fix a leak when unpausing evrpc requests (94ee125)
o Fix a memory leak when unmarshalling RPC object arrays (f6ab2a2)
o Fix compilation when openssl support is disabled (40c301b)
o Allow empty reason line in HTTP status (739e688 Pierre Phaneuf)
o Fix a compile warning introduced in 739e688 (bd1ed5f Sebastian Hahn)
o Fix nonstandard TAILQ_FOREACH_REVERSE() definition (71afc52 Frank Denis)
o Try /proc on Linux as entropy fallback; use sysctl as last resort (20fda29)
o Fix symbol conflict between mm_*() macros and libmm (99e50e9)
o Fix some crazy macro mistakes in arc4random.c (90d4225)
o Make evbuffer_add_file() work on windows (dcdae6b)
o Fix unused-variable warning when building with threads disabled (ad811cd)
o Numerous opensolaris compilation fixes (c44de06)
o Fix getaddrinfo with protocol unset on Solaris 9. Found by Dagobert Michelsen (2cf2a28)
o Fix another nasty solaris getaddrinfo() behavior (3557071)
o Define _REENTRANT as needed on Solaris, elsewhere (c1cd32a)
o Fix some autoconf issues on OpenBSD (7c519df)
BUILD AND DISTRIBUTION CHANGES
o Distribute libevent.pc.in, not libevent.pc (22aff04)
o Avoid errors in evutil.c when building with _UNICODE defined (b677032 Brodie Thiesfield)
o Avoid errors in http.c when building with VC 2003 .NET (13e4f3b Brodie Thiesfield)
o Support the standard 'make check' target in place of 'make verify' (426c8fb)
o Remove redundant stuff from EXTRA_DIST (b660edf)
o Switch to using AM conditionals in place of AC_LIBOBJ (2e898f5)
o Remove an orphaned RELEASE flag in Makefile.am (0794b0d)
o Give a better warning for bad automake versions. (77c917d)
o Use dist_bin_SCRIPTS, not EXTRA_DIST, to distribute scripts (9eb2fd7)
o Never test for select() on windows (3eb044d Trond Norbye)
o Do not inhibit automake dependencies generation (10c4c90 Giuseppe Scrivano)
o Create shared libraries under Windows (3cbca86 Giuseppe Scrivano)
o Add ctags/etags files to .gitignore (0861d17)
o Only specify -no-undefined on mingw (25433b9)
o Only add libevent_core.la to LIBADD on mingw (fdc6297)
TESTING
o Get bench_http to work on Windows; add a switch to enable IOCP. (4ac38a5 Christopher Davis)
o VC has no getopt(), so do without in bench_http. (1273d2f Christopher Davis)
o Fix an obnoxious typo in the bufferevent_timeout_filter test (0d047c3)
o Fix a write of uninitialized RAM in regression tests (68dc742)
o Fix some memory leaks in the unit tests (274a7bd)
o Make 'main/many_events' test 70 fds, not 64. (33874b0)
o Unit-test every evbuffer_add_file() implementation. (06a4443)
o Add more unit tests for evbuffer_expand (8c83e99)
o Test another case of evbuffer_prepend (1234b95)
o Fix a possible double-free bug in SSL bufferevents with CLOSE_ON_FREE (7501895) o Add dns/search_cancel unit test. (39b870b Christopher Davis)
o Make http_base_test stop leaking an event_base. (96730d3)
o Detect broken unsetenv at unit-test runtime (f37cd4c)
o Implement regress_make_tempfile on win32 to test evbuffer_add_file (b4f12a1)
o add more (currently skipped) add_file tests on win32 (05de45d)
o Fix bench_http build on win32. (384d124)
o Make unit test for add_file able to tell "error" from "done" (88a543f)
o Make test for bufferevent_connect_hostname system-neutral (f89168e)
o Make test.sh support mingw/msys on win32 (0ee6f6c)
o Fix test.sh on freebsd (3d9e05b)
INTERNALS, PERFORMANCE, AND AND CODE CLEANUPS
o Improve the speed of evbuffer_readln() (cc1600a)
o more whitespace normalization (2c2618d)
o Revise evbuffer to add last_with_data (2a6d2a1)
o Use last_with_data in place of previous_to_last (c8ac57f)
o Remove previous_to_last from evbuffer (6f47bd1)
o Fix last_with_data compilation on windows (1e7b986)
o Add some glass-box tests for the last_with_data code. (17da042)
o Improve robustness for refcounting (f1bc125)
o Remove a needless min_heap_shift_up_() call (7204b91)
o Increase MIN_BUFFER_SIZE to 512 (1024 on 64-bit) (2014ae4)
o Do not use evbuffer_expand() to add the first chain to a buffer (5c0ebb3)
o Make evbuffer_prepend handle empty buffers better (c87272b)
o Replace last_with_data with a slightly smarter version (b7442f8)
o Turn the increasingly complex *_CHAIN() macros into functions (96865c4)
o Rewrite evbuffer_expand and its users (d5ebcf3)
o Add evutil_tv_to_msec for safe conversion of timevals to milliseconds. (850c3ff Christopher Davis)
o Initialize last_with_datap correctly in evbuffer_overlapped (a0983b6)
o Replace EVUTIL_CLOSESOCKET macro with a function (899c1dc Sebastian Sjöberg)
o Move domain search state to evdns_request. (beaa14a Christopher Davis)
o Remove redundant checks for lock!=NULL before calling EVLOCK_LOCK (50ec59f)
o Rename current_base symbol to event_global_current_base_ (c16e684)
o Fix whitespace in evutil.c (935e150)
o Replace users of "int fd" with "evutil_socket_t fd" in portable code (c7cf6f0)
Changes in 2.0.4-alpha:
[Autogenerated from the Git log, sorted by hand.]
DOCUMENTATION
o Add stub header for 2.0.4-alpha changelog. (94d0065)
o Improve the README with more information and links. (0b42726)
o Add more people who wrote patches to the acknowledgments (0af10d5)
o Add a warning about the use of event_initialized. (f32b575)
o Add a LICENSE file so people can find our license easily (7067006)
o Add a new "hello world" sample program (becb9f9)
o Clarify status of example programs (d60a1bd)
o Update time-test.c to use event2 (f4190bf)
o Add the arc4random.c license to the LICENSE file. (e15e1e9)
NEW FEATURES AND INTERFACE CHANGES
o Improved optional lock debugging. (0cd3bb9)
o Rate-limiting for bufferevents; group and individual limits are supported. (737c9cd)
o Testing code for bufferevent rate-limiting. (f0c0124)
o Make the initial nameserver probe timeout configurable. (1e56a32)
o Revise the locking API: deprecate the old locking callbacks and add trylock. (347952f)
o Do not make bufferevent_setfd implicitly disable EV_READ and EV_WRITE. (8274379)
o Do not ignore bufferevent_enable(EV_READ) before bufferevent_connect(). (4a5b534)
o Introduced evutil_make_socket_closeonexec() to preserve fd flags for F_SETFD. (d0939d2 Jardel Weyrich)
o evdns_getaddrinfo() now supports the /etc/hosts file. (72dd666)
o Look at the proper /etc/hosts file on windows. (66c02c7)
o Allow http connections to use evdns for hostname looksups. (c698b77)
o Changelist code to defer event changes until just before dispatch (27308aa)
o do not use a function to assign the evdns base; instead assign it via evhttp_connection_base_new() which is a new function introduced in 2.0 (5032e52)
o Functions to access more fields of struct event. (0683950)
o Make kqueue use changelists. (45e5ae3)
o Remove kqueue->pend_changes. (3225dfb)
o Minimize epoll_ctl calls by using changelist (c8c6a89)
o Add support for a "debug mode" to try to catch common errors. (cd17c3a)
o Note a missing ratelim function (361da8f)
o Add ev_[u]intptr_t to include/event2/util.h (1fa4c81)
o const-ify a few more functions in event.h (d38a7a1)
o Deprecate EVENT_FD and EVENT_SIGNAL. (f6b2694)
o Remove EVUTIL_CHECK_FMT. (6c21c89)
o Add EV_*_MAX macros to event2/util.h to expose limits for ev_* types. (aba1fff) o Functions to view and manipulate rate-limiting buckets. (85047a6)
o Add the rest of the integer limits, and add a test for them. (60742d5)
o Remove the 'flags' argument from evdns_base_set_option() (1dd7e6d)
o Add an arc4random implementation for use by evdns (d4de062)
o Use off_t for the length parameter of evbuffer_add_file (3fe60fd)
o Construct Windows locks using InitializeCriticalSectionAndSpinCount (32c6f1b)
o Expose view of current rate limit as constrained by group limit (162ce8a)
o Provide consistent, tested semantics for bufferevent timeouts (d328829)
BUGFIXES AND TESTS
o Tolerate code that returns from a fatal_cb. (91fe23f)
o Parenthesize macro arguments more aggressively (07e9e9b)
o Fix memory-leak of signal handler array with kqueue. (e1ffbb8)
o Stop passing EVTHREAD_READ and EVTHREAD_WRITE to non-rw locks. (76cd2b7)
o Fix two use-after-free bugs in unit tests spoted by lock debugging (d84d838)
o Fix a locking bug in event_base_loop() (da1718b)
o Fix an evdns lock violation. (2df1f82 Zhuang Yuyao)
o Valgrind fix: Clear struct kevent before checking for OSX bug. (56771a3 William Ahern)
o Fix up evthread compilation on windows (bd6f1ba Roman Puls)
o Fix regress_iocp.c usage of old lock allocation macros. (31687b4 unknown)
o Update nmake makefile to build evthread.c (b62d979 unknown)
o Fix a crash when reading badly formatted resolve.conf; from Yasuoka Masahiko (6c7c579 Yasuoka Masahiko)
o Fix a snow leopard compile warning in the unit tests. (7ae9445)
o Fix compile on Snow Leopard with gcc warnings enabled (70cdfe4 Sebastian Hahn)
o Only define _GNU_SOURCE if it is not already defined. (ea6b1df Joachim Bauch)
o Update sample/signal-test.c to use newer APIs and not leak. (f6430ac Evan Jones)
o Fix a segfault when writing a very fragmented evbuffer onto an SSL (a6adeca Joachim Bauch)
o Fix a segfault when freeing SSL bufferevents in an unusual order (a773df5 Joachim Bauch)
o Drop install-sh from our git repo: a mismatched version could break "make dist" (6799527)
o Set all instances of the version number correctly. (5a112d3)
o Fix a few locking issues on windows. (c51bb3c unknown)
o Use evutil_socket_t, not int, when logging socket errors. (292467c)
o Fix up behavior of never-defered callbacks a little (390e056)
o Replace some cases of uint32_t with ev_uint32_t. (a47d88d)
o Fix compilation of devpoll.c by adding missing thread includes. (fee2c77 Dagobert Michelsen)
o Make evutil_make_socket_nonblocking() leave any other flags alone. (4c8b7cd Jardel Weyrich)
o Fix an fd leak in evconnlistener_new_bind(). (24fb502 Jardel Weyrich)
o Fix a bogus free in evutil_new_addrinfo() (0d64051 Jardel Weyrich)
o Adjusted fcntl() retval comparison on evutil_make_socket_nonblocking(). (4df7dbc Jardel Weyrich)
o Fix the code that allowed DNS options to not end with : (ee4953f)
o Fix crash bugs when a bufferevent's eventcb is not set. (2e8eeea)
o Fix test-ratelim compilation on Linux. (885b427)
o Fix compilation of rate-limiting code on win32. (165d30e)
o Eradicated the last free() call. Let mm_free() take care of deallocation. (0546ce1 Jardel Weyrich)
o Fix byte counts when mixing deferred and non-deferred evbuffer callbacks. (29151e6)
o Fixed a memory leak on windows threads implementation. The CRITICAL_SECTION was not being free'd in evthread_win32_lock_free(). (2f33e00 Jardel Weyrich)
o Fixed a fd leak in start_accepting(), plus cosmetic changes (4367a33 Jardel Weyrich)
o Improved error handling in evconnlistener_new_async(). Also keeping the fd open because it is not opened by this function, so the caller is responsible for closing it. Additionally, since evconnlistener_new_bind() creates a socket and passes it to the function above, it required error checking to close the same socket. (fec66f9 Jardel Weyrich)
o Don't use a bind address for nameservers on loopback (8d4aaf9)
o Fix compilation of rate-limit code when threading support is disabled (97a8c79)
o Detect setenv/unsetenv; skip main/base_environ test if we can't fake them. (7296971)
o Check more internal event_add() calls for failure (ff3f6cd)
o Fix windows and msvc build (5c7a7bc)
o Call event_debug_unassign on internal events (a19b4a0)
o Try to fix a warning in hash_debug_entry (137f2c6)
o Fix a dumb typo in ev_intptr_t definitions. (27c9a40)
o do not fail while sending on http connections the client closed. (93d7369)
o make evhttp_send() safe against terminated connections, too (3978180)
o Make Libevent 1.4.12 build on win32 with Unicode enabled. (000a33e Brodie Thiesfield)
o Fix some additional -DUNICODE issues on win32. (a7a9431)
o Add a check to make soure our EVUTIL_AI flags do not conflict with the native ones (c18490e)
o Always use our own gai_strerror() replacement. (6810bdb)
o Make RNG work when we have arc4random() but not arc4random_buf() (4ec8fea)
o validate close cb on server when client connection closes (2f782af)
o Fix two unlocked reads in evbuffer. (7116bf2)
o When working without a current event base, don't try to use IOCP listeners (cb52838)
o Fix getpid() usage on Windows (ff2a134)
o Add a unit test for secure rng. (48a29b6)
o Add some headers to fix freebsd compilation (b72be50)
o When connect() succeeds immediately, don't invoke the callback immediately. (7515de9)
o Suspend read/write on bufferevents during hostname lookup (db08f64)
o Make bufferevent_free() clear all callbacks immediately. (b2fbeb3)
o Fix some race conditions in persistent events and event_reinit (e2642f0)
o Fix a bug in resetting timeouts on persistent events when IO triggers. (38ec0a7)
o Add a test for timeouts on filtering bufferevents. (c02bfe1)
o Add test for periodic timers that get activated for other reasons (8fcb7a1)
o Use new timeval diff comparison function in bufferevent test (f3dfe46)
o Delete stack-alloced event in new unit test before returning. (7ffd387)
o Fix mingw compilation (23170a6)
o Try to define a sane _EVENT_SIZEOF_SIZE_T for msvc compilation (1e14f82)
o Fix arc4random compilation on MSVC. (98edb89)
o deal with connect() failing immediately (7bc48bf)
o Small cleanups on freebsd-connect-refused patch. (57b7248)
BUILD AND DISTRIBUTION CHANGES
o Remove the contents of WIN32-Prj as unmaintained. (c69d5a5)
o Allow the user to redirect the verbose output of test/test.sh to a file (c382de6)
o Allow test.sh to be run as ./test/test.sh (7dfbe94)
o Never believe that we have pthreads on win32, even if gcc thinks we do. (78ed097)
o Make it compile under gcc --std=c89. (e2ca403)
o Fix a number of warnings from gcc -pedantic (918e9c5)
o Add the msvc-generated .lib files to .gitignore. (e244a2e)
o Add the "compile" script to gitignore. (1ba6bed)
INTERNALS AND CODE CLEANUPS
o Add a .gitignore file. (ba34071)
o New EVTHREAD_TRY_LOCK function to try to grab a lock. (689fc09)
o Add the abilitity to mark some buffer callbacks as never-deferred. (438f9ed)
o Refactor our 'suspend operation' logic on bufferevents. (0d744aa)
o Simplify the read high-watermark checking. (5846bf6)
o Improve readability of evutil_unparse_protoname() (5a43df8 Jardel Weyrich)
o Expose our cached gettimeofday value with a new interface (47854a8)
o Whitespace fixes in test.sh (0b151a9)
o Enable branch-prediction hints with EVUTIL_UNLIKELY. (eaaf27f)
o Refactor code from evdns into a new internal "read a file" function. (0f7144f)
o Comestic changes in evconnlistener_new(), new_accepting_socket(), accepted_socket_invoke_user_cb() and iocp_listener_enable(). (510ab6b Jardel Weyrich)
o Add unit-test for bad_request bug fixed in 1.4 recently. (6cc79c6 Pavel Plesov) o Add a comment on evthread_enable_lock_debuging. (b9f43b2)
o Fix test.sh on shells without echo -n (94131e9)
o More unit tests for getaddrinfo_async: v4timeout and cancel. (a334b31)
o Make http use evconnlistener. (ec34533)
o move dns utility functions into a separate file so that we can use them for http testing (b822639)
o add a test for evhttp_connection_base_new with a dns_base (26714ca)
o forgot to add void to test function (78a50fe)
o Add a forgotten header (changelist-internal.h) (4b9f307)
o Remove some commented-out code in evutil (26e1b6f)
o Remove a needless include of rpc_compat.h (70a4a3e)
o Use less memory for each entry in a hashtable (a66e947)
o Try to untangle the logic in server_port_flush(). (439aea0)
o Use ev_[u]intptr_t types in place of [u]intptr_t (cef61a2)
o Reduce windows header includes in our own headers. (da6135e)
o clean up terminate_chunked test (e8a9782)
o Increment the submicro version number. (63e868e)
o Update event-config.h version number to match configure.in (aae7db5)
o Clean up formatting: Disallow space-before-tab. (8fdf09c)
o Clean up formatting: use tabs, not 8-spaces, to indent. (e5bbd40)
o Clean up formatting: remove trailing spaces (e5cf987)
o Clean up formatting: function/keyword spacing consistency. (4faeaea)
Changes in 2.0.3-alpha:
o Add a new code to support SSL/TLS on bufferevents, using the OpenSSL library (where available).
o Fix a bug where we didn't allocate enough memory in event_get_supported_methods().
o Avoid segfault during failed allocation of locked evdns_base. (Found by Rocco Carbone.)
o Export new evutil_ascii_* functions to perform locale-independent character type operations.
o Try to compile better with MSVC: patches from Brodie Thiesfield
o New evconnlistener_get_fd function to expose a listener's associated socket.
o Expose an ev_socklen_t type for consistent use across platforms.
o Make bufferevent_socket_connect() work when the original fd was -1.
o Fix a bug in bufferevent_socket_connect() when the connection succeeds too quickly.
o Export an evutil_sockaddr_cmp() to compare to sockaddr objects for equality.
o Add a bufferevent_get_enabled() to tell what a bufferevent has been configured to do.
o Add an evbuffer_search_eol() function to locate the end of a line nondestructively.
o Add an evbuffer_search_range() function to search a bounded range of a buffer.
o Fix a rare crash bug in evdns.
o Have bufferevent_socket_connect() with no arguments put a bufferevent into connecting mode.
o Support sendfile on Solaris: patch from Caitlin Mercer.
o New functions to explicitly reference a socket used by an evhttp object. Patches from David Reiss.
o When we send a BEV_EVENT_CONNECTED to indicate connected status, we no longer invoke the write callback as well unless we actually wrote data too.
o If the kernel tells us that there are a negative number of bytes to read from a socket, do not believe it. Fixes bug 2841177; found by Alexander Pronchenkov.
o Do not detect whether we have monotonic clock support every time a new event base is created: instead do it only once. Patch taken from Chromium.
o Do not allocate the maximum event queue for the epoll backend at startup. Instead, start out accepting 32 events at a time, and double the queue's size when it seems that the OS is generating events faster than we're requesting them. Saves up to 374K per epoll-based event_base. Resolves bug 2839240.
o Treat an event with a negative fd as valid but untriggerable by Libevent. This is useful for applications that want to manually activate events.
o Fix compilation on Android, which forgot to define fd_mask in its sys/select.h
o Do not drop data from evbuffer when out of memory; reported by Jacek Masiulaniec
o New event_base_got_exit() and event_base_got_break() functions to tell whether an event loop exited because of an event_base_loopexit() or an event_base_loopbreak(). Patch from Ka-Hing Cheung.
o When adding or deleting an event from a non-main thread, only wake up the main thread when its behavior actually needs to change.
o Fix some bugs when using the old evdns interfaces to initialize the evdns module.
o Detect errors during bufferevent_connect(). Patch from Christopher Davis.
o Fix compilation for listener.h for C++ - missing extern "C". Patch from Ferenc Szalai.
o Make the event_base_loop() family of functions respect thread-safety better. This should clear up a few hard-to-debug race conditions.
o Fix a bug when using a specialized memory allocator on win32.
o Have the win32 select() backend label TCP-socket-connected events as EV_WRITE, not EV_READ. This should bring it in line with the other backends, and improve portability. Patch from Christopher Davis.
o Stop using enums as arguments or return values when what we mean is a bitfield of enum values. C++ doesn't believe that you can OR two enum values together and get another enum, and C++ takes its typing seriously. Patch from Christopher Davis.
o Add an API to replace all fatal calls to exit() with a user-provided panic function.
o Replace all assert() calls with a variant that is aware of the user-provided logging and panic functions.
o Add a return value to event_assign so that it can fail rather than asserting when the user gives it bad input. event_set still dies on bad input.
o The event_base_new() and event_base_new_with_config() functions now never call exit() on failure. For backward "compatibility", event_init() still does, but more consistently.
o Remove compat/sys/_time.h. It interfered with system headers on HPUX, and its functionality has been subsumed by event2/util.h and util-internal.h.
o Add a new bufferevent_socket_connect_hostname() to encapsulate the resolve-then-connect operation.
o Build kqueue.c correctly on GNU/kFreeBSD platforms. Patch pulled upstream from Debian.
o Alternative queue-based timeout algorithm for programs that use a large number of timeouts with the same value.
o New event_base_config option to disable the timeval cache entirely.
o Make EV_PERSIST timeouts more accurate: schedule the next event based on the scheduled time of the previous event, not based on the current time.
o Allow http.c to handle cases where getaddrinfo returns an IPv6 address. Patch from Ryan Phillips.
o Fix a problem with excessive memory allocation when using multiple event priorities.
o Default to using arc4random for DNS transaction IDs on systems that have it; from OpenBSD.
o Never check the environment when we're running setuid or setgid; from OpenBSD.
o Options passed to evdns_set_option() no longer need to end with a colon.
o Add an evutil_getaddrinfo() function to clone getaddrinfo on platforms that don't have it.
o Add an evdns_getaddrinfo() function to provide a nonblocking getaddrinfo using evdns, so programs can perform useful hostname lookup.
o Finally expose the IOCP-based bufferevent backend. It passes its unit tests, but probably still has some bugs remaining. Code by Nick Mathewson and Christopher Davis.
o Numerous other bugfixes.
o On FreeBSD and other OSes, connect can return ECONREFUSED immediately; instead of failing the function call, pretend with faileld in the callback.
o Fix a race condition in the pthreads test case; found by Nick Mathewson
o Remove most calls to event_err() in http and deal with memory errors instead
Changes in 2.0.2-alpha:
o Add a new flag to bufferevents to make all callbacks automatically deferred.
o Make evdns functionality locked, and automatically defer dns callbacks.
o Fix a possible free(NULL) when freeing an event_base with no signals.
o Add a flag to disable checking environment varibles when making an event_base
o Disallow setting less than 1 priority.
o Fix a bug when removing a timeout from the heap. [Patch from Marko Kreen]
o Use signal.h, not sys/signal.h. [Patch from mmadia]
o Try harder to build with certain older c99 compilers.
o Make sure that an event_config's flags field is always initialized to 0. [Bug report from Victor Goya]
o Avoid data corruption when reading data entirely into the second-to-last chain of an evbuffer. [Bug report from Victor Goya]
o Make sendfile work on FreeBSD
o Do not use vararg macros for accessing evrpc structures; this is not backwards compatible, but we did not promise any backwards compatibility for the rpc code.
o Actually define the event_config_set_flag() function.
o Try harder to compile with Visual C++.
o Move event_set() and its allies to event2/event_compat.h where they belong.
o Remove the event_gotsig code, which has long been deprecated and unused.
o Add an event_get_base() function to return the base assigned to an event.
o New function to automate connecting on a socket-based bufferevent.
o New functions to automate listening for incoming TCP connections.
o Do case-insensitive checks with a locale-independent comparison function.
o Rename the evbuffercb and everrorcb callbacks to bufferevent_data_cb and bufferevent_event_cb respectively. The old names are available in bufferevent_compat.h.
o Rename the EVBUFFER_* codes used by bufferevent event callbacks to BEV_EVENT_*, to avoid namespace collision with evbuffer flags. The old names are available in bufferevent_compat.h.
o Move the EVBUFFER_INPUT and EVBUFFER_OUTPUT macros to bufferevent_compat.h
o Add a bufferevent_getfd() function to mirror bufferevent_setfd()
o Make bufferevent_setfd() return an error code if the operation is not successful.
o Shave 22 bytes off struct event on 32-bit platforms by shrinking and re-ordering fields. The savings on 64-bit platforms is likely higher.
o Cap the maximum number of priorities at 256.
o Change the semantics of evbuffer_cb_set_flags() to be set-flag only; add a new evbuffer_cb_clear_flags() to remove set flags.
o Change the interface of evbuffer_add_reference so that the cleanup callback gets more information
o Revise the new evbuffer_reserve_space/evbuffer_commit_space() interfaces so that you can use them without causing extraneous copies or leaving gaps in the evbuffer.
o Add a new evbuffer_peek() interface to inspect data in an evbuffer without removing it.
o Fix a deadlock when suspending reads in a bufferevent due to a full buffer. (Spotted by Joachim Bauch.)
o Fix a memory error when freeing a thread-enabled event base with registered events. (Spotted by Joachim Bauch.)
o Try to contain degree of failure when running on a win32 version so heavily firewalled that we can't fake a socketpair.
o Activate fd events in a pseudorandom order with O(N) backends, so that we don't systematically favor low fds (select) or earlier-added fds (poll, win32).
o Replace some read()/write() instances with send()/recv() to work properly on win32.
o Set truncated flag correctly in evdns server replies.
o Raise RpcGenError in event_rpcgen.py; from jmanison and Zack Weinberg
o Fix preamble of rpcgen-generated files to rely on event2 includes; based on work by jmansion; patch from Zack Weinberg.
o Allow specifying the output filename for rpcgen; based on work by jmansion; patch from Zack Weinberg.
o Allow C identifiers as struct names; allow multiple comments in .rpc files; from Zack Weinberg
o Mitigate a race condition when using socket bufferevents in multiple threads.
o Use AC_SEARCH_LIBS, not AC_CHECK_LIB to avoid needless library use.
o Do not allow event_del(ev) to return while that event's callback is executing in another thread. This fixes a nasty race condition.
o event_get_supported_methods() now lists methods that have been disabled with the EVENT_NO* environment options.
o Rename encode_int[64] to evtag_encode_int[64] to avoid polluting the global namespace. The old method names are still available as macros in event2/tag_compat.h.
Changes in 2.0.1-alpha:
o free minheap on event_base_free(); from Christopher Layne
o debug cleanups in signal.c; from Christopher Layne
o provide event_base_new() that does not set the current_base global
o bufferevent_write now uses a const source argument; report from Charles Kerr
o improve documentation on event_base_loopexit; patch from Scott Lamb
o New function, event_{base_}loopbreak. Like event_loopexit, it makes an event loop stop executing and return. Unlike event_loopexit, it keeps subsequent pending events from getting executed. Patch from Scott Lamb
o Check return value of event_add in signal.c
o provide event_reinit() to reintialize an event_base after fork
o New function event_set_mem_functinons. It allows the user to give libevent replacement functions to use for memory management in place of malloc(), free(), etc. This should be generally useful for memory instrumentation, specialized allocators, and so on.
o The kqueue implementation now catches signals that are raised after event_add() is called but before the event_loop() call. This makes it match the other implementations.
o The kqueue implementation now restores original signal handlers correctly when its signal events are removed.
o Check return value of event_add in signal.c
o Add a more powerful evbuffer_readln as a replacement for evbuffer_readline. The new function handles more newline styles, and is more useful with buffers that may contain a nul characters.
o Do not mangle socket handles on 64-bit windows.
o The configure script now takes an --enable-gcc-warnigns option that turns on many optional gcc warnings. (Nick has been building with these for a while, but they might be useful to other developers.)
o move EV_PERSIST handling out of the event backends
o small improvements to evhttp documentation
o always generate Date and Content-Length headers for HTTP/1.1 replies
o set the correct event base for HTTP close events
o When building with GCC, use the "format" attribute to verify type correctness of calls to printf-like functions.
o Rewrite win32.c backend to be O(n lg n) rather than O(n^2).
o Removed obsoleted recalc code
o support for 32-bit tag numbers in rpc structures; this is wire compatible, but changes the API slightly.
o pull setters/getters out of RPC structures into a base class to which we just need to store a pointer; this reduces the memory footprint of these structures.
o prefix {encode,decode}_tag functions with evtag to avoid collisions
o fix a bug with event_rpcgen for integers
o Correctly handle DNS replies with no answers set (Fixes bug 1846282)
o add -Wstrict-aliasing to warnings and more cleanup
o removed linger from http server socket; reported by Ilya Martynov
o event_rpcgen now allows creating integer arrays
o support string arrays in event_rpcgen
o change evrpc hooking to allow pausing of RPCs; this will make it possible for the hook to do some meaning ful work; this is not backwards compatible.
o allow an http request callback to take ownership of a request structure
o allow association of meta data with RPC requests for hook processing
o associate more context for hooks to query such as the connection object
o remove pending timeouts on event_base_free()
o also check EAGAIN for Solaris' event ports; from W.C.A. Wijngaards
o devpoll and evport need reinit; tested by W.C.A Wijngaards
o event_base_get_method; from Springande Ulv
o Send CRLF after each chunk in HTTP output, for compliance with RFC2626. Patch from "propanbutan". Fixes bug 1894184.
o Add a int64_t parsing function, with unit tests, so we can apply Scott Lamb's fix to allow large HTTP values.
o Use a 64-bit field to hold HTTP content-lengths. Patch from Scott Lamb.
o Allow regression code to build even without Python installed
o remove NDEBUG ifdefs from evdns.c
o detect integer types properly on platforms without stdint.h
o udpate documentation of event_loop and event_base_loop; from Tani Hosokawa.
o simplify evbuffer by removing orig_buffer
o do not insert event into list when evsel->add fails
o add support for PUT/DELETE requests; from Josh Rotenberg
o introduce evhttp_accept_socket() to accept from an already created socket
o include Content-Length in reply for HTTP/1.0 requests with keep-alive
o increase listen queue for http sockets to 128; if that is not enough the evhttp_accpet_socket() api can be used with a prepared socket.
o Patch from Tani Hosokawa: make some functions in http.c threadsafe.
o test support for PUT/DELETE requests; from Josh Rotenberg
o rewrite of the evbuffer code to reduce memory copies
o Some older Solaris versions demand that _REENTRANT be defined to get strtok_r(); do so.
o Do not free the kqop file descriptor in other processes, also allow it to be 0; from Andrei Nigmatulin
o Provide OpenSSL style support for multiple threads accessing the same event_base
o make event_rpcgen.py generate code include event-config.h; reported by Sam Banks.
o switch thread support so that locks get allocated as they are needed.
o make event methods static so that they are not exported; from Andrei Nigmatulin
o make RPC replies use application/octet-stream as mime type
o do not delete uninitialized timeout event in evdns
o Correct the documentation on buffer printf functions.
o Don't warn on unimplemented epoll_create(): this isn't a problem, just a reason to fall back to poll or select.
o Correctly handle timeouts larger than 35 minutes on Linux with epoll.c. This is probably a kernel defect, but we'll have to support old kernels anyway even if it gets fixed.
o Make name_from_addr() threadsafe in http.c
o Add new thread-safe interfaces to evdns functions.
o Make all event_tagging interfaces threadsafe.
o Rename internal memory management functions.
o New functions (event_assign, event_new, event_free) for use by apps that want to be safely threadsafe, or want to remain ignorant of the contents of struct event.
o introduce bufferevent_read_buffer; allows reading without memory copy.
o expose bufferevent_setwatermark via header files and fix high watermark on read
o fix a bug in buffrevent read water marks and add a test for them
o fix a bug in which bufferevent_write_buffer would not schedule a write event
o provide bufferevent_input and bufferevent_output without requiring knowledge of the structure
o introduce bufferevent_setcb and bufferevent_setfd to allow better manipulation of bufferevents
o convert evhttp_connection to use bufferevents.
o use libevent's internal timercmp on all platforms, to avoid bugs on old platforms where timercmp(a,b,<=) is buggy.
o Remove the never-exported, never-used evhttp_hostportfile function.
o Support input/output filters for bufferevents; somewhat similar to libio's model. This will allow us to implement SSL, compression, etc, transparently to users of bufferevents such as the http layer.
o allow connections to be removed from an rpc pool
o add new evtimer_assign, signal_assign, evtimer_new, and signal_new functions to manipulate timer and signal events, analagous to the now-recommended event_assign and event_new
o switch internal uses of event_set over to use event_assign.
o introduce evbuffer_contiguous_space() api that tells a user how much data is available in the first buffer chain
o introduce evbuffer_reserve_space() and evbuffer_commit_space() to make processing in filters more efficient.
o reduce system calls for getting current time by caching it.
o separate signal events from io events; making the code less complex.
o support for periodic timeouts
o support for virtual HTTP hosts.
o turn event_initialized() into a function, and add function equivalents to EVENT_SIGNAL and EVENT_FD so that people don't need to include event_struct.h
o Build test directory correctly with CPPFLAGS set.
o Provide an API for retrieving the supported event mechanisms.
o event_base_new_with_config() and corresponding config APIs.
o migrate the evhttp header to event2/ but accessors are still missing.
o deprecate timeout_* event functions by moving them to event_compat.h
o Move windows gettimeofday replacement into a new evutil_gettimeofday().
o Make configure script work on IRIX.
o provide a method for canceling ongoing http requests.
o Make vsnprintf() returns consistent on win32.
o Fix connection keep-alive behavior for HTTP/1.0
o Fix use of freed memory in event_reinit; pointed out by Peter Postma
o constify struct timeval * where possible
o make event_get_supported_methods obey environment variables
o support for edge-triggered events on epoll and kqueue backends: patch from Valery Kholodkov
o support for selecting event backends by their features, and for querying the features of a backend.
o change failing behavior of event_base_new_with_config: if a config is provided and no backend is selected, return NULL instead of aborting.
o deliver partial data to request callbacks when chunked callback is set even if there is no chunking on the http level; allows cancelation of requests from within the chunked callback; from Scott Lamb.
o allow min_heap_erase to be called on removed members; from liusifan.
o Rename INPUT and OUTPUT to EVRPC_INPUT and EVRPC_OUTPUT. Retain INPUT/OUTPUT aliases on on-win32 platforms for backwards compatibility.
o Do not use SO_REUSEADDR when connecting
o Support 64-bit integers in RPC structs
o Correct handling of trailing headers in chunked replies; from Scott Lamb.
o Support multi-line HTTP headers; based on a patch from Moshe Litvin
o Reject negative Content-Length headers; anonymous bug report
o Detect CLOCK_MONOTONIC at runtime for evdns; anonymous bug report
o Various HTTP correctness fixes from Scott Lamb
o Fix a bug where deleting signals with the kqueue backend would cause subsequent adds to fail
o Support multiple events listening on the same signal; make signals regular events that go on the same event queue; problem report by Alexander Drozdov.
o Fix a problem with epoll() and reinit; problem report by Alexander Drozdov.
o Fix off-by-one errors in devpoll; from Ian Bell
o Make event_add not change any state if it fails; reported by Ian Bell.
o Fix a bug where headers arriving in multiple packets were not parsed; fix from Jiang Hong; test by me.
o Match the query in DNS replies to the query in the request; from Vsevolod Stakhov.
o Add new utility functions to correctly observe and log winsock errors.
o Do not remove Accept-Encoding header
o Clear the timer cache on entering the event loop; reported by Victor Chang
o Only bind the socket on connect when a local address has been provided; reported by Alejo Sanchez
o Allow setting of local port for evhttp connections to support millions of connections from a single system; from Richard Jones.
o Clear the timer cache when leaving the event loop; reported by Robin Haberkorn
o Fix a typo in setting the global event base; reported by lance.
o Set the 0x20 bit on outgoing alphabetic characters in DNS requests randomly, and insist on a match in replies. This helps resist DNS poisoning attacks.
o Make the http connection close detection work properly with bufferevents and fix a potential memory leak associated with it.
o Restructure the event backends so that they do not need to keep track of events themselves, as a side effect multiple events can use the same fd or signal.
o Add generic implementations for parsing and emiting IPv6 addresses on platforms that do not have inet_ntop and/or inet_pton.
o Allow DNS servers that have IPv6 addresses.
o Add an evbuffer_write_atmost() function to write a limited number of bytes to an fd.
o Refactor internal notify-main-thread logic to prefer eventfd to pipe, then pipe to socketpair, and only use socketpairs as a last resort.
o Try harder to pack all evbuffer reads into as few chains as possible, using readv/WSARecv as appropriate.
o New evthread_use_windows_threads() and evthread_use_pthreads() functions to set up the evthread callbacks with reasonable defaults.
o Change the semantics of timeouts in conjunction with EV_PERSIST; timeouts in that case will now repeat until deleted.
o sendfile, mmap and memory reference support for evbuffers.
o New evutil_make_listen_socket_reuseable() to abstract SO_REUSEADDR.
o New bind-to option to allow DNS clients to bind to an arbitrary port for outgoing requests.
o evbuffers can now be "frozen" to prevent operations at one or both ends.
o Bufferevents now notice external attempts to add data to an inbuf or remove it from an outbuf, and stop them.
o Fix parsing of queries where the encoded queries contained \r, \n or +
o Do not allow internal events to starve lower-priority events.
Changes in 1.4.0:
o allow \r or \n individually to separate HTTP headers instead of the standard "\r\n"; from Charles Kerr.
o demote most http warnings to debug messages
o Fix Solaris compilation; from Magne Mahre
o Add a "Date" header to HTTP responses, as required by HTTP 1.1.
o Support specifying the local address of an evhttp_connection using set_local_address
o Fix a memory leak in which failed HTTP connections whould not free the request object
o Make adding of array members in event_rpcgen more efficient, but doubling memory allocation
o Fix a memory leak in the DNS server
o Fix compilation when DNS_USE_OPENSSL_FOR_ID is enabled
o Fix buffer size and string generation in evdns_resolve_reverse_ipv6().
o Respond to nonstandard DNS queries with "NOTIMPL" rather than by ignoring them.
o In DNS responses, the CD flag should be preserved, not the TC flag.
o Fix http.c to compile properly with USE_DEBUG; from Christopher Layne
o Handle NULL timeouts correctly on Solaris; from Trond Norbye
o Recalculate pending events properly when reallocating event array on Solaris; from Trond Norbye
o Add Doxygen documentation to header files; from Mark Heily
o Add a evdns_set_transaction_id_fn() function to override the default
transaction ID generation code.
o Add an evutil module (with header evutil.h) to implement our standard cross-platform hacks, on the theory that somebody else would like to use them too.
o Fix signals implementation on windows.
o Fix http module on windows to close sockets properly.
o Make autogen.sh script run correctly on systems where /bin/sh isn't bash. (Patch from Trond Norbye, rewritten by Hagne Mahre and then Hannah Schroeter.)
o Skip calling gettime() in timeout_process if we are not in fact waiting for any events. (Patch from Trond Norbye)
o Make test subdirectory compile under mingw.
o Fix win32 buffer.c behavior so that it is correct for sockets (which do not like ReadFile and WriteFile).
o Make the test.sh script run unit tests for the evpoll method.
o Make the entire evdns.h header enclosed in "extern C" as appropriate.
o Fix implementation of strsep on platforms that lack it
o Fix implementation of getaddrinfo on platforms that lack it; mainly, this will make Windows http.c work better. Original patch by Lubomir Marinov.
o Fix evport implementation: port_disassociate called on unassociated events resulting in bogus errors; more efficient memory management; from Trond Norbye and Prakash Sangappa
o support for hooks on rpc input and output; can be used to implement rpc independent processing such as compression or authentication.
o use a min heap instead of a red-black tree for timeouts; as a result finding the min is a O(1) operation now; from Maxim Yegorushkin
o associate an event base with an rpc pool
o added two additional libraries: libevent_core and libevent_extra in addition to the regular libevent. libevent_core contains only the event core whereas libevent_extra contains dns, http and rpc support
o Begin using libtool's library versioning support correctly. If we don't mess up, this will more or less guarantee binaries linked against old versions of libevent continue working when we make changes to libevent that do not break backward compatibility.
o Fix evhttp.h compilation when TAILQ_ENTRY is not defined.
o Small code cleanups in epoll_dispatch().
o Increase the maximum number of addresses read from a packet in evdns to 32.
o Remove support for the rtsig method: it hasn't compiled for a while, and nobody seems to miss it very much. Let us know if there's a good reason to put it back in.
o Rename the "class" field in evdns_server_request to dns_question_class, so that it won't break compilation under C++. Use a macro so that old code won't break. Mark the macro as deprecated.
o Fix DNS unit tests so that having a DNS server with broken IPv6 support is no longer cause for aborting the unit tests.
o Make event_base_free() succeed even if there are pending non-internal events on a base. This may still leak memory and fds, but at least it no longer crashes.
o Post-process the config.h file into a new, installed event-config.h file that we can install, and whose macros will be safe to include in header files.
o Remove the long-deprecated acconfig.h file.
o Do not require #include <sys/types.h> before #include <event.h>.
o Add new evutil_timer* functions to wrap (or replace) the regular timeval manipulation functions.
o Fix many build issues when using the Microsoft C compiler.
o Remove a bash-ism in autogen.sh
o When calling event_del on a signal, restore the signal handler's previous value rather than setting it to SIG_DFL. Patch from Christopher Layne.
o Make the logic for active events work better with internal events; patch from Christopher Layne.
o We do not need to specially remove a timeout before calling event_del; patch from Christopher Layne.

Просмотреть файл

@ -1,244 +0,0 @@
# Doxyfile 1.5.1
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project
#
# All text after a hash (#) is considered a comment and will be ignored
# The format is:
# TAG = value [value, ...]
# For lists items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (" ")
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
PROJECT_NAME = libevent
# Place all output under 'doxygen/'
OUTPUT_DIRECTORY = doxygen/
# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
# will interpret the first line (until the first dot) of a JavaDoc-style
# comment as the brief description. If set to NO, the JavaDoc
# comments will behave just like the Qt-style comments (thus requiring an
# explicit @brief command for a brief description.
JAVADOC_AUTOBRIEF = YES
# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
# sources only. Doxygen will then generate output that is more tailored for C.
# For instance, some of the names that are used will be different. The list
# of all members will be omitted, etc.
OPTIMIZE_OUTPUT_FOR_C = YES
# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
# brief documentation of file, namespace and class members alphabetically
# by member name. If set to NO (the default) the members will appear in
# declaration order.
SORT_BRIEF_DOCS = YES
#---------------------------------------------------------------------------
# configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag can be used to specify the files and/or directories that contain
# documented source files. You may enter file names like "myfile.cpp" or
# directories like "/usr/src/myproject". Separate the files or directories
# with spaces.
INPUT = event.h evdns.h evhttp.h evrpc.h \
include/event2/event.h include/event2/event_struct.h \
include/event2/event_compat.h \
include/event2/buffer_compat.h \
include/event2/buffer.h include/event2/thread.h \
include/event2/tag.h include/event2/bufferevent.h \
include/event2/bufferevent_struct.h \
include/event2/bufferevent_compat.h \
include/event2/util.h \
include/event2/rpc.h include/event2/rpc_struct.h \
include/event2/rpc_compat.h \
include/event2/dns.h include/event2/dns_struct.h \
include/event2/dns_compat.h \
include/event2/http.h include/event2/http_struct.h \
include/event2/http_compat.h
#---------------------------------------------------------------------------
# configuration options related to the HTML output
#---------------------------------------------------------------------------
# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
# generate HTML output.
GENERATE_HTML = YES
#---------------------------------------------------------------------------
# configuration options related to the LaTeX output
#---------------------------------------------------------------------------
# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
# generate Latex output.
GENERATE_LATEX = YES
# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
# If a relative path is entered the value of OUTPUT_DIRECTORY will be
# put in front of it. If left blank `latex' will be used as the default path.
LATEX_OUTPUT = latex
# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
# invoked. If left blank `latex' will be used as the default command name.
LATEX_CMD_NAME = latex
# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
# generate index for LaTeX. If left blank `makeindex' will be used as the
# default command name.
MAKEINDEX_CMD_NAME = makeindex
# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
# LaTeX documents. This may be useful for small projects and may help to
# save some trees in general.
COMPACT_LATEX = NO
# The PAPER_TYPE tag can be used to set the paper type that is used
# by the printer. Possible values are: a4, a4wide, letter, legal and
# executive. If left blank a4wide will be used.
PAPER_TYPE = a4wide
# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
# packages that should be included in the LaTeX output.
EXTRA_PACKAGES =
# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
# the generated latex document. The header should contain everything until
# the first chapter. If it is left blank doxygen will generate a
# standard header. Notice: only use this tag if you know what you are doing!
LATEX_HEADER =
# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
# is prepared for conversion to pdf (using ps2pdf). The pdf file will
# contain links (just like the HTML output) instead of page references
# This makes the output suitable for online browsing using a pdf viewer.
PDF_HYPERLINKS = NO
# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
# plain latex in the generated Makefile. Set this option to YES to get a
# higher quality PDF documentation.
USE_PDFLATEX = NO
# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
# command to the generated LaTeX files. This will instruct LaTeX to keep
# running if errors occur, instead of asking the user for help.
# This option is also used when generating formulas in HTML.
LATEX_BATCHMODE = NO
# If LATEX_HIDE_INDICES is set to YES then doxygen will not
# include the index chapters (such as File Index, Compound Index, etc.)
# in the output.
LATEX_HIDE_INDICES = NO
#---------------------------------------------------------------------------
# configuration options related to the man page output
#---------------------------------------------------------------------------
# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
# generate man pages
GENERATE_MAN = YES
# The MAN_EXTENSION tag determines the extension that is added to
# the generated man pages (default is the subroutine's section .3)
MAN_EXTENSION = .3
# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
# then it will generate one additional man file for each entity
# documented in the real man page(s). These additional files
# only source the real man page, but without them the man command
# would be unable to find the correct page. The default is NO.
MAN_LINKS = YES
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
# evaluate all C-preprocessor directives found in the sources and include
# files.
ENABLE_PREPROCESSING = YES
# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
# names in the source code. If set to NO (the default) only conditional
# compilation will be performed. Macro expansion can be done in a controlled
# way by setting EXPAND_ONLY_PREDEF to YES.
MACRO_EXPANSION = NO
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
# then the macro expansion is limited to the macros specified with the
# PREDEFINED and EXPAND_AS_DEFINED tags.
EXPAND_ONLY_PREDEF = NO
# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
# in the INCLUDE_PATH (see below) will be search if a #include is found.
SEARCH_INCLUDES = YES
# The INCLUDE_PATH tag can be used to specify one or more directories that
# contain include files that are not input files but should be processed by
# the preprocessor.
INCLUDE_PATH =
# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
# patterns (like *.h and *.hpp) to filter out the header-files in the
# directories. If left blank, the patterns specified with FILE_PATTERNS will
# be used.
INCLUDE_FILE_PATTERNS =
# The PREDEFINED tag can be used to specify one or more macro names that
# are defined before the preprocessor is started (similar to the -D option of
# gcc). The argument of the tag is a list of macros of the form: name
# or name=definition (no spaces). If the definition and the = are
# omitted =1 is assumed. To prevent a macro definition from being
# undefined via #undef or recursively expanded use the := operator
# instead of the = operator.
PREDEFINED = TAILQ_ENTRY RB_ENTRY _EVENT_DEFINED_TQENTRY
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
# this tag can be used to specify a list of macro names that should be expanded.
# The macro definition that is found in the sources will be used.
# Use the PREDEFINED tag if you want to use a different macro definition.
EXPAND_AS_DEFINED =
# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
# doxygen's preprocessor will remove all function-like macros that are alone
# on a line, have an all uppercase name, and do not end with a semicolon. Such
# function macros are typically used for boiler-plate code, and will confuse
# the parser if not removed.
SKIP_FUNCTION_MACROS = YES

Просмотреть файл

@ -1,74 +0,0 @@
Libevent is available for use under the following license, commonly known
as the 3-clause (or "modified") BSD license:
==============================
Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================
Portions of Libevent are based on works by others, also made available by
them under the three-clause BSD license above. The copyright notices are
available in the corresponding source files; the license is as above. Here's
a list:
log.c:
Copyright (c) 2000 Dug Song <dugsong@monkey.org>
Copyright (c) 1993 The Regents of the University of California.
strlcpy.c:
Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
win32select.c:
Copyright (c) 2003 Michael A. Davis <mike@datanerds.net>
evport.c:
Copyright (c) 2007 Sun Microsystems
ht-internal.h:
Copyright (c) 2002 Christopher Clark
minheap-internal.h:
Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
==============================
The arc4module is available under the following, sometimes called the
"OpenBSD" license:
Copyright (c) 1996, David Mazieres <dm@uun.org>
Copyright (c) 2008, Damien Miller <djm@openbsd.org>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

Просмотреть файл

@ -1,202 +0,0 @@
# 'foreign' means that we're not enforcing GNU package rules strictly.
# '1.7' means that we need automake 1.7 or later (and we do).
AUTOMAKE_OPTIONS = foreign 1.7
ACLOCAL_AMFLAGS = -I m4
# This is the "Release" of the Libevent ABI. It takes precedence over
# the VERSION_INFO, so that two versions of Libevent with the same
# "Release" are never binary-compatible.
#
# This number incremented once for the 2.0 release candidate, and
# shouldn't increment again until Libevent 3.0. Also, we shouldn't
# increment to Libevent 3.0 unless we know in advance we're breaking
# the ABI.
#
#RELEASE = -release 2.0
#RELEASE =
# This is the version info for the libevent binary API. It has three
# numbers:
# Current -- the number of the binary API that we're implementing
# Revision -- which iteration of the implementation of the binary
# API are we supplying?
# Age -- How many previous binary API versions do we also
# support?
#
# To increment a VERSION_INFO (current:revision:age):
# If the ABI didn't change:
# Return (current:revision+1:age)
# If the ABI changed, but it's backward-compatible:
# Return (current+1:0:age+1)
# If the ABI changed and it isn't backward-compatible:
# Return (current+1:0:0)
#
# Once an RC is out, DO NOT MAKE ANY ABI-BREAKING CHANGES IN THAT SERIES.
#VERSION_INFO = 3:0:1
# History: RELEASE VERSION_INFO
# 2.0.1-alpha -- 2.0 1:0:0
# 2.0.2-alpha -- 2:0:0
# 2.0.3-alpha -- 2:0:0 (should have incremented; didn't.)
# 2.0.4-alpha -- 3:0:0
# 2.0.5-beta -- 4:0:0
# 2.0.6-rc -- 2.0 2:0:0
# 2.0.7-rc -- 2.0 3:0:1
# Planned:
# 2.0.8-stable -- 2.0 3:1:1 (Assuming no ABI change)
#
# If Libevent 2.1.1 makes 'struct event' etc opaque in the headers:
# 2.1.1-alpha -- 2.1 1:0:0
# If Libevent 2.1.1 does not makes 'struct event' opaque in the headers:
# 2.1.1-alpha -- 2.1 1:0:0
# ABI version history for this package effectively restarts every time
# we change RELEASE. Version 1.4.x had RELEASE of 1.4.
#
# Ideally, we would not be using RELEASE at all; instead we could just
# use the VERSION_INFO field to label our backward-incompatible ABI
# changes, and those would be few and far between. Unfortunately,
# Libevent still exposes far too many volatile structures in its
# headers, so we pretty much have to assume that most development
# series will break ABI compatibility. For now, it's simplest just to
# keep incrementing the RELEASE between series and resetting VERSION_INFO.
#
# Eventually, when we get to the point where the structures in the
# headers are all non-changing (or not there at all!), we can shift to
# a more normal worldview where backward-incompatible ABI changes are
# nice and rare. For the next couple of years, though, 'struct event'
# is user-visible, and so we can pretty much guarantee that release
# series won't be binary-compatible.
if RPC
dist_bin_SCRIPTS = event_rpcgen.py
endif
# These sources are conditionally added by configure.in or conditionally
# included from other files.
PLATFORM_DEPENDENT_SRC = \
epoll_sub.c \
arc4random.c
EXTRA_DIST = \
LICENSE \
autogen.sh \
libevent.pc.in \
Doxyfile \
whatsnew-2.0.txt \
Makefile.nmake \
$(PLATFORM_DEPENDENT_SRC)
# OMPI: Changed to noinst and libevent.la
noinst_LTLIBRARIES = libevent.la
SUBDIRS = . include
if BUILD_WIN32
SYS_LIBS = -lws2_32
SYS_SRC = win32select.c evthread_win32.c buffer_iocp.c event_iocp.c \
bufferevent_async.c
SYS_INCLUDES = -IWIN32-Code
else
SYS_LIBS =
SYS_SRC =
SYS_INCLUDES =
endif
if SELECT_BACKEND
SYS_SRC += select.c
endif
if POLL_BACKEND
SYS_SRC += poll.c
endif
if DEVPOLL_BACKEND
SYS_SRC += devpoll.c
endif
if KQUEUE_BACKEND
SYS_SRC += kqueue.c
endif
if EPOLL_BACKEND
SYS_SRC += epoll.c
endif
if EVPORT_BACKEND
SYS_SRC += evport.c
endif
if SIGNAL_SUPPORT
SYS_SRC += signal.c
endif
# Open MPI: commented this out. We generate event-config.h in the
# component configure.m4.
#BUILT_SOURCES = ./include/event2/event-config.h
# Open MPI: commented this out. We generate event-config.h in the
# component configure.m4.
#./include/event2/event-config.h: config.h
# @MKDIR_P@ ./include/event2
# echo '/* event2/event-config.h' > $@
# echo ' *' >> $@
# echo ' * This file was generated by autoconf when libevent was built, and post-' >> $@
# echo ' * processed by Libevent so that its macros would have a uniform prefix.' >> $@
# echo ' *' >> $@
# echo ' * DO NOT EDIT THIS FILE.' >> $@
# echo ' *' >> $@
# echo ' * Do not rely on macros in this file existing in later versions.'>> $@
# echo ' */' >> $@
# echo '#ifndef _EVENT2_EVENT_CONFIG_H_' >> $@
# echo '#define _EVENT2_EVENT_CONFIG_H_' >> $@
# sed -e 's/#define /#define _EVENT_/' \
# -e 's/#undef /#undef _EVENT_/' \
# -e 's/#ifndef /#ifndef _EVENT_/' < config.h >> $@
# echo "#endif" >> $@
CORE_SRC = event.c evthread.c buffer.c \
bufferevent.c bufferevent_sock.c bufferevent_filter.c \
bufferevent_pair.c listener.c bufferevent_ratelim.c \
evmap.c log.c evutil.c evutil_rand.c strlcpy.c event_tagging.c $(SYS_SRC)
CORE_LIBS =
if HTTP
CORE_SRC += http.c
endif
if DNS
CORE_SRC += evdns.c
endif
if RPC
CORE_SRC += evrpc.c
endif
if PTHREADS
CORE_SRC += evthread_pthread.c
endif
if OPENSSL
CORE_SRC += bufferevent_openssl.c
CORE_LIBS += -lcrypto -lssl
endif
libevent_la_SOURCES = $(CORE_SRC) $(headers)
libevent_la_LIBADD = $(CORE_LIBS) $(SYS_LIBS)
EXTRA_DIST += event.h evutil.h util-internal.h mm-internal.h ipv6-internal.h \
strlcpy-internal.h evbuffer-internal.h \
bufferevent-internal.h event-internal.h \
evthread-internal.h defer-internal.h \
minheap-internal.h log-internal.h evsignal-internal.h evmap-internal.h \
changelist-internal.h iocp-internal.h \
ratelim-internal.h \
WIN32-Code/event2/event-config.h \
WIN32-Code/tree.h \
compat/sys/queue.h $(SYS_INCLUDES) \
evhttp.h http-internal.h ht-internal.h \
evrpc.h evrpc-internal.h \
evdns.h
AM_CPPFLAGS = -I$(srcdir)/compat -I$(srcdir)/include -I$(builddir)/include $(SYS_INCLUDES)
DISTCLEANFILES = *~ libevent.pc $(builddir)/include/event2/event-config.h

Просмотреть файл

@ -1,47 +0,0 @@
# WATCH OUT! This makefile is a work in progress. It is probably missing
# tons of important things. DO NOT RELY ON IT TO BUILD A GOOD LIBEVENT.
# Needed for correctness
CFLAGS=/IWIN32-Code /Iinclude /Icompat /DWIN32 /DHAVE_CONFIG_H /I.
# For optimization and warnings
CFLAGS=$(CFLAGS) /Ox /W3 /wd4996 /nologo
# XXXX have a debug mode
LIBFLAGS=/nologo
CORE_OBJS=event.obj buffer.obj bufferevent.obj bufferevent_sock.obj \
bufferevent_pair.obj listener.obj evmap.obj log.obj evutil.obj \
strlcpy.obj signal.obj bufferevent_filter.obj evthread.obj \
bufferevent_ratelim.obj evutil_rand.obj
WIN_OBJS=win32select.obj evthread_win32.obj buffer_iocp.obj \
event_iocp.obj bufferevent_async.obj
EXTRA_OBJS=event_tagging.obj http.obj evdns.obj evrpc.obj
ALL_OBJS=$(CORE_OBJS) $(WIN_OBJS) $(EXTRA_OBJS)
STATIC_LIBS=libevent_core.lib libevent_extras.lib libevent.lib
all: static_libs tests
static_libs: $(STATIC_LIBS)
libevent_core.lib: $(CORE_OBJS) $(WIN_OBJS)
lib $(LIBFLAGS) $(CORE_OBJS) $(WIN_OBJS) /out:libevent_core.lib
libevent_extras.lib: $(EXTRA_OBJS)
lib $(LIBFLAGS) $(EXTRA_OBJS) /out:libevent_extras.lib
libevent.lib: $(CORE_OBJS) $(WIN_OBJS) $(EXTRA_OBJS)
lib $(LIBFLAGS) $(CORE_OBJS) $(EXTRA_OBJS) $(WIN_OBJS) /out:libevent.lib
clean:
del $(ALL_OBJS)
del $(STATIC_LIBS)
cd test
$(MAKE) /F Makefile.nmake clean
tests:
cd test
$(MAKE) /F Makefile.nmake

Просмотреть файл

@ -1,135 +0,0 @@
0. BUILDING AND INSTALLATION (Briefly)
$ ./configure
$ make
$ make verify # (optional)
$ sudo make install
1. BUILDING AND INSTALLATION (In Depth)
To build libevent, type
$ ./configure && make
(If you got libevent from the git repository, you will
first need to run the included "autogen.sh" script in order to
generate the configure script.)
Install as root via
# make install
You can run the regression tests by running
$ make verify
Before, reporting any problems, please run the regression tests.
To enable the low-level tracing build the library as:
CFLAGS=-DUSE_DEBUG ./configure [...]
Standard configure flags should work. In particular, see:
--disable-shared Only build static libraries
--prefix Install all files relative to this directory.
The configure script also supports the following flags:
--enable-gcc-warnings Enable extra compiler checking with GCC.
--disable-malloc-replacement
Don't let applications replace our memory
management functions
--disable-openssl Disable support for OpenSSL encryption.
--disable-thread-support Don't support multithreaded environments.
2. USEFUL LINKS:
For the latest released version of Libevent, see the official website at
http://monkey.org/~provos/libevent/ .
There's a pretty good work-in-progress manual up at
http://www.wangafu.net/~nickm/libevent-book/ .
For the latest development versions of Libevent, access our Git repository
via
"git clone git://levent.git.sourceforge.net/gitroot/levent/libevent"
You can browse the git repository online at
http://levent.git.sourceforge.net/git/gitweb-index.cgi .
To report bugs, request features, or submit patches to Libevent,
use the Sourceforge trackers at
https://sourceforge.net/tracker/?group_id=50884 .
There's also a libevent-users mailing list for talking about Libevent
use and development: http://archives.seul.org/libevent/users/
3. ACKNOWLEDGMENTS
The following people have helped with suggestions, ideas, code or
fixing bugs:
Alejo
Weston Andros Adamson
William Ahern
Avi Bab
Gilad Benjamini
Stas Bekman
Joachim Bauch
Ralph Castain
Shuo Chen
Ka-Hing Cheung
Andrew Danforth
Christopher Davis
Mike Davis
Mihai Draghicioiu
Shie Erlich
Alexander von Gernler
Artur Grabowski
Sebastian Hahn
Aaron Hopkins
Tani Hosokawa
Claudio Jeker
Valery Kyholodov
Marko Kreen
Scott Lamb
Christopher Layne
Adam Langley
Christopher Layne
Philip Lewis
Zhou Li
David Libenzi
Moshe Litvin
Hagne Mahre
Lubomir Marinov
Nick Mathewson
James Mansion
Nicholas Marriott
Andrey Matveev
Caitlin Mercer
Felix Nawothnig
Trond Norbye
Richard Nyberg
Jon Oberheide
Phil Oleson
Dave Pacheco
Tassilo von Parseval
Pierre Phaneuf
Ryan Phillips
Jon Poland
Bert JW Regeer
Hanna Schroeter
Ralf Schmitt
Mike Smellie
Kevin Springborn
Ferenc Szalai
Dug Song
Brodie Thiesfield
Zack Weinberg
Taral
propanbutan
mmadia
If we have forgotten your name, please contact us.

Просмотреть файл

@ -1,355 +0,0 @@
/* event2/event-config.h
*
* This file was generated by autoconf when libevent was built, and post-
* processed by Libevent so that its macros would have a uniform prefix.
*
* DO NOT EDIT THIS FILE.
*
* Do not rely on macros in this file existing in later versions.
*/
#ifndef _EVENT_CONFIG_H_
#define _EVENT_CONFIG_H_
/* config.h. Generated by configure. */
/* config.h.in. Generated from configure.in by autoheader. */
#define _WIN32_WINNT 0x0502
/* Define if libevent should not allow replacing the mm functions */
/* #undef _EVENT_DISABLE_MM_REPLACEMENT */
/* Define if libevent should not be compiled with thread support */
/* #undef _EVENT_DISABLE_THREAD_SUPPORT */
/* Define if clock_gettime is available in libc */
/* #undef _EVENT_DNS_USE_CPU_CLOCK_FOR_ID */
/* Define is no secure id variant is available */
/* #define _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID 1 */
#define _EVENT_DNS_USE_FTIME_FOR_ID 1
/* Define to 1 if you have the <arpa/inet.h> header file. */
/* #undef _EVENT_HAVE_ARPA_INET_H */
/* Define to 1 if you have the `clock_gettime' function. */
/* #undef _EVENT_HAVE_CLOCK_GETTIME */
/* Define if /dev/poll is available */
/* #undef _EVENT_HAVE_DEVPOLL */
/* Define to 1 if you have the <dlfcn.h> header file. */
/* #undef _EVENT_HAVE_DLFCN_H */
/* Define if your system supports the epoll system calls */
/* #undef _EVENT_HAVE_EPOLL */
/* Define to 1 if you have the `epoll_ctl' function. */
/* #undef _EVENT_HAVE_EPOLL_CTL */
/* Define to 1 if you have the `eventfd' function. */
/* #undef _EVENT_HAVE_EVENTFD */
/* Define if your system supports event ports */
/* #undef _EVENT_HAVE_EVENT_PORTS */
/* Define to 1 if you have the `fcntl' function. */
/* #undef _EVENT_HAVE_FCNTL */
/* Define to 1 if you have the <fcntl.h> header file. */
#define _EVENT_HAVE_FCNTL_H 1
/* Define to 1 if you have the `getaddrinfo' function. */
#define _EVENT_HAVE_GETADDRINFO 1
/* Define to 1 if you have the `getnameinfo' function. */
#define _EVENT_HAVE_GETNAMEINFO 1
/* Define to 1 if you have the `getprotobynumber' function. */
#define _EVENT_HAVE_GETPROTOBYNUMBER 1
/* Define to 1 if you have the `getservbyname' function. */
#define _EVENT_HAVE_GETSERVBYNAME 1
/* Define to 1 if you have the `gettimeofday' function. */
/* #define _EVENT_HAVE_GETTIMEOFDAY 1 */
/* Define to 1 if you have the `inet_aton' function. */
/* #undef _EVENT_HAVE_INET_ATON */
/* Define to 1 if you have the `inet_ntop' function. */
/* #undef _EVENT_HAVE_INET_NTOP */
/* Define to 1 if you have the `inet_pton' function. */
/* #undef _EVENT_HAVE_INET_PTON */
/* Define to 1 if you have the <inttypes.h> header file. */
/* #define _EVENT_HAVE_INTTYPES_H 1 */
/* Define to 1 if you have the `kqueue' function. */
/* #undef _EVENT_HAVE_KQUEUE */
/* Define if the system has zlib */
/* #undef _EVENT_HAVE_LIBZ */
/* Define to 1 if you have the <memory.h> header file. */
#define _EVENT_HAVE_MEMORY_H 1
/* Define to 1 if you have the `mmap' function. */
/* #undef _EVENT_HAVE_MMAP */
/* Define to 1 if you have the <netinet/in6.h> header file. */
/* #undef _EVENT_HAVE_NETINET_IN6_H */
/* Define to 1 if you have the <netinet/in.h> header file. */
/* #undef _EVENT_HAVE_NETINET_IN_H */
/* Define to 1 if you have the `pipe' function. */
/* #undef _EVENT_HAVE_PIPE */
/* Define to 1 if you have the `poll' function. */
/* #undef _EVENT_HAVE_POLL */
/* Define to 1 if you have the <poll.h> header file. */
/* #undef _EVENT_HAVE_POLL_H */
/* Define to 1 if you have the `port_create' function. */
/* #undef _EVENT_HAVE_PORT_CREATE */
/* Define to 1 if you have the <port.h> header file. */
/* #undef _EVENT_HAVE_PORT_H */
/* Define if you have POSIX threads libraries and header files. */
/* #undef _EVENT_HAVE_PTHREAD */
/* Define if we have pthreads on this system */
/* #undef _EVENT_HAVE_PTHREADS */
/* Define to 1 if the system has the type `sa_family_t'. */
/* #undef _EVENT_HAVE_SA_FAMILY_T */
/* Define to 1 if you have the `select' function. */
/* #undef _EVENT_HAVE_SELECT */
/* Define to 1 if you have the `sendfile' function. */
/* #undef _EVENT_HAVE_SENDFILE */
/* Define if F_SETFD is defined in <fcntl.h> */
/* #undef _EVENT_HAVE_SETFD */
/* Define to 1 if you have the `sigaction' function. */
/* #undef _EVENT_HAVE_SIGACTION */
/* Define to 1 if you have the `signal' function. */
#define _EVENT_HAVE_SIGNAL 1
/* Define to 1 if you have the `splice' function. */
/* #undef _EVENT_HAVE_SPLICE */
/* Define to 1 if you have the <stdarg.h> header file. */
#define _EVENT_HAVE_STDARG_H 1
/* Define to 1 if you have the <stddef.h> header file. */
#define _EVENT_HAVE_STDDEF_H 1
/* Define to 1 if you have the <stdint.h> header file. */
/* #define _EVENT_HAVE_STDINT_H 1 */
/* Define to 1 if you have the <stdlib.h> header file. */
#define _EVENT_HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define _EVENT_HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define _EVENT_HAVE_STRING_H 1
/* Define to 1 if you have the `strlcpy' function. */
/* #undef _EVENT_HAVE_STRLCPY */
/* Define to 1 if you have the `strsep' function. */
/* #undef _EVENT_HAVE_STRSEP */
/* Define to 1 if you have the `strtok_r' function. */
/* #undef _EVENT_HAVE_STRTOK_R */
/* Define to 1 if you have the `strtoll' function. */
/* #define _EVENT_HAVE_STRTOLL 1 */
#define _EVENT_HAVE_STRUCT_ADDRINFO 1
/* Define to 1 if the system has the type `struct in6_addr'. */
#define _EVENT_HAVE_STRUCT_IN6_ADDR 1
/* Define to 1 if `s6_addr16' is member of `struct in6_addr'. */
#define _EVENT_HAVE_STRUCT_IN6_ADDR_S6_ADDR16 1
/* Define to 1 if `s6_addr32' is member of `struct in6_addr'. */
#define _EVENT_HAVE_STRUCT_IN6_ADDR_S6_ADDR32 1
/* Define to 1 if the system has the type `struct sockaddr_in6'. */
#define _EVENT_HAVE_STRUCT_SOCKADDR_IN6 1
/* Define to 1 if `sin6_len' is member of `struct sockaddr_in6'. */
/* #undef _EVENT_HAVE_STRUCT_SOCKADDR_IN6_SIN6_LEN */
/* Define to 1 if `sin_len' is member of `struct sockaddr_in'. */
/* #undef _EVENT_HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */
/* Define to 1 if you have the <sys/devpoll.h> header file. */
/* #undef _EVENT_HAVE_SYS_DEVPOLL_H */
/* Define to 1 if you have the <sys/epoll.h> header file. */
/* #undef _EVENT_HAVE_SYS_EPOLL_H */
/* Define to 1 if you have the <sys/eventfd.h> header file. */
/* #undef _EVENT_HAVE_SYS_EVENTFD_H */
/* Define to 1 if you have the <sys/event.h> header file. */
/* #undef _EVENT_HAVE_SYS_EVENT_H */
/* Define to 1 if you have the <sys/ioctl.h> header file. */
/* #undef _EVENT_HAVE_SYS_IOCTL_H */
/* Define to 1 if you have the <sys/mman.h> header file. */
/* #undef _EVENT_HAVE_SYS_MMAN_H */
/* Define to 1 if you have the <sys/param.h> header file. */
/* #define _EVENT_HAVE_SYS_PARAM_H 1 */
/* Define to 1 if you have the <sys/queue.h> header file. */
/* #undef _EVENT_HAVE_SYS_QUEUE_H */
/* Define to 1 if you have the <sys/select.h> header file. */
/* #undef _EVENT_HAVE_SYS_SELECT_H */
/* Define to 1 if you have the <sys/sendfile.h> header file. */
/* #undef _EVENT_HAVE_SYS_SENDFILE_H */
/* Define to 1 if you have the <sys/socket.h> header file. */
/* #undef _EVENT_HAVE_SYS_SOCKET_H */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define _EVENT_HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/time.h> header file. */
/* #define _EVENT_HAVE_SYS_TIME_H 1 */
/* Define to 1 if you have the <sys/types.h> header file. */
#define _EVENT_HAVE_SYS_TYPES_H 1
/* Define to 1 if you have the <sys/uio.h> header file. */
/* #undef _EVENT_HAVE_SYS_UIO_H */
/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
/* #undef _EVENT_HAVE_TAILQFOREACH */
/* Define if timeradd is defined in <sys/time.h> */
/* #undef _EVENT_HAVE_TIMERADD */
/* Define if timerclear is defined in <sys/time.h> */
#define _EVENT_HAVE_TIMERCLEAR 1
/* Define if timercmp is defined in <sys/time.h> */
#define _EVENT_HAVE_TIMERCMP 1
/* Define if timerisset is defined in <sys/time.h> */
#define _EVENT_HAVE_TIMERISSET 1
/* Define to 1 if the system has the type `uint16_t'. */
/* #define _EVENT_HAVE_UINT16_T 1 */
/* Define to 1 if the system has the type `uint32_t'. */
/* #define _EVENT_HAVE_UINT32_T 1 */
/* Define to 1 if the system has the type `uint64_t'. */
/* #define _EVENT_HAVE_UINT64_T 1 */
/* Define to 1 if the system has the type `uint8_t'. */
/* #define _EVENT_HAVE_UINT8_T 1 */
/* Define to 1 if you have the <unistd.h> header file. */
/* #define _EVENT_HAVE_UNISTD_H 1 */
/* Define to 1 if you have the `vasprintf' function. */
/* #undef _EVENT_HAVE_VASPRINTF */
/* Define if kqueue works correctly with pipes */
/* #undef _EVENT_HAVE_WORKING_KQUEUE */
/* Numeric representation of the version */
#define _EVENT_NUMERIC_VERSION 0x02000700
/* Name of package */
#define _EVENT_PACKAGE "libevent"
/* Define to the address where bug reports for this package should be sent. */
#define _EVENT_PACKAGE_BUGREPORT ""
/* Define to the full name of this package. */
#define _EVENT_PACKAGE_NAME ""
/* Define to the full name and version of this package. */
#define _EVENT_PACKAGE_STRING ""
/* Define to the one symbol short name of this package. */
#define _EVENT_PACKAGE_TARNAME ""
/* Define to the version of this package. */
#define _EVENT_PACKAGE_VERSION ""
/* Define to necessary symbol if this constant uses a non-standard name on
your system. */
/* #undef _EVENT_PTHREAD_CREATE_JOINABLE */
/* The size of a `int', as computed by sizeof. */
#define _EVENT_SIZEOF_INT 4
/* The size of a `long', as computed by sizeof. */
#define _EVENT_SIZEOF_LONG 4
/* The size of a `long long', as computed by sizeof. */
#define _EVENT_SIZEOF_LONG_LONG 8
/* The size of a `short', as computed by sizeof. */
#define _EVENT_SIZEOF_SHORT 2
/* The size of `size_t', as computed by sizeof. */
#ifdef _WIN64
#define _EVENT_SIZEOF_SIZE_T 8
#else
#define _EVENT_SIZEOF_SIZE_T 4
#endif
/* Define to 1 if you have the ANSI C header files. */
#define _EVENT_STDC_HEADERS 1
/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
#define _EVENT_TIME_WITH_SYS_TIME 1
/* Version number of package */
#define _EVENT_VERSION "2.0.7-rc"
/* Define to appropriate substitue if compiler doesnt have __func__ */
#define _EVENT___func__ __FUNCTION__
/* Define to empty if `const' does not conform to ANSI C. */
/* #undef _EVENT_const */
/* Define to `__inline__' or `__inline' if that's what the C compiler
calls it, or to nothing if 'inline' is not supported under any name. */
#ifndef _EVENT___cplusplus
#define _EVENT_inline __inline
#endif
/* Define to `int' if <sys/types.h> does not define. */
/* #undef _EVENT_pid_t */
/* Define to `unsigned' if <sys/types.h> does not define. */
/* #undef _EVENT_size_t */
/* Define to unsigned int if you dont have it */
#define _EVENT_socklen_t unsigned int
/* Define to `int' if <sys/types.h> does not define. */
#define _EVENT_ssize_t SSIZE_T
#endif

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,533 +0,0 @@
/* Portable arc4random.c based on arc4random.c from OpenBSD.
* Portable version by Chris Davis, adapted for Libevent by Nick Mathewson
* Copyright (c) 2010 Chris Davis, Niels Provos, and Nick Mathewson
*
* Note that in Libevent, this file isn't compiled directly. Instead,
* it's included from evutil_rand.c
*/
/*
* Copyright (c) 1996, David Mazieres <dm@uun.org>
* Copyright (c) 2008, Damien Miller <djm@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Arc4 random number generator for OpenBSD.
*
* This code is derived from section 17.1 of Applied Cryptography,
* second edition, which describes a stream cipher allegedly
* compatible with RSA Labs "RC4" cipher (the actual description of
* which is a trade secret). The same algorithm is used as a stream
* cipher called "arcfour" in Tatu Ylonen's ssh package.
*
* Here the stream cipher has been modified always to include the time
* when initializing the state. That makes it impossible to
* regenerate the same random sequence twice, so this can't be used
* for encryption, but will generate good random numbers.
*
* RC4 is a registered trademark of RSA Laboratories.
*/
#ifndef ARC4RANDOM_EXPORT
#define ARC4RANDOM_EXPORT
#endif
#ifndef ARC4RANDOM_UINT32
#define ARC4RANDOM_UINT32 uint32_t
#endif
#ifndef ARC4RANDOM_NO_INCLUDES
#ifdef WIN32
#include <wincrypt.h>
#include <process.h>
#else
#include <fcntl.h>
#include <unistd.h>
#include <sys/param.h>
#include <sys/time.h>
#ifdef _EVENT_HAVE_SYS_SYSCTL_H
#include <sys/sysctl.h>
#endif
#endif
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#endif
/* Add platform entropy 32 bytes (256 bits) at a time. */
#define ADD_ENTROPY 32
/* Re-seed from the platform RNG after generating this many bytes. */
#define BYTES_BEFORE_RESEED 1600000
struct arc4_stream {
unsigned char i;
unsigned char j;
unsigned char s[256];
};
#ifdef WIN32
#define getpid _getpid
#define pid_t int
#endif
static int rs_initialized;
static struct arc4_stream rs;
static pid_t arc4_stir_pid;
static int arc4_count;
static int arc4_seeded_ok;
static inline unsigned char arc4_getbyte(void);
static inline void
arc4_init(void)
{
int n;
for (n = 0; n < 256; n++)
rs.s[n] = n;
rs.i = 0;
rs.j = 0;
}
static inline void
arc4_addrandom(const unsigned char *dat, int datlen)
{
int n;
unsigned char si;
rs.i--;
for (n = 0; n < 256; n++) {
rs.i = (rs.i + 1);
si = rs.s[rs.i];
rs.j = (rs.j + si + dat[n % datlen]);
rs.s[rs.i] = rs.s[rs.j];
rs.s[rs.j] = si;
}
rs.j = rs.i;
}
#ifndef WIN32
static ssize_t
read_all(int fd, unsigned char *buf, size_t count)
{
size_t numread = 0;
ssize_t result;
while (numread < count) {
result = read(fd, buf+numread, count-numread);
if (result<0)
return -1;
else if (result == 0)
break;
numread += result;
}
return (ssize_t)numread;
}
#endif
#ifdef WIN32
#define TRY_SEED_WIN32
static int
arc4_seed_win32(void)
{
/* This is adapted from Tor's crypto_seed_rng() */
static int provider_set = 0;
static HCRYPTPROV provider;
unsigned char buf[ADD_ENTROPY];
if (!provider_set) {
if (!CryptAcquireContext(&provider, NULL, NULL, PROV_RSA_FULL,
CRYPT_VERIFYCONTEXT)) {
if (GetLastError() != (DWORD)NTE_BAD_KEYSET)
return -1;
}
provider_set = 1;
}
if (!CryptGenRandom(provider, sizeof(buf), buf))
return -1;
arc4_addrandom(buf, sizeof(buf));
memset(buf, 0, sizeof(buf));
arc4_seeded_ok = 1;
return 0;
}
#endif
#if defined(_EVENT_HAVE_SYS_SYSCTL_H)
#if _EVENT_HAVE_DECL_CTL_KERN && _EVENT_HAVE_DECL_KERN_RANDOM && _EVENT_HAVE_DECL_RANDOM_UUID
#define TRY_SEED_SYSCTL_LINUX
static int
arc4_seed_sysctl_linux(void)
{
/* Based on code by William Ahern, this function tries to use the
* RANDOM_UUID sysctl to get entropy from the kernel. This can work
* even if /dev/urandom is inaccessible for some reason (e.g., we're
* running in a chroot). */
int mib[] = { CTL_KERN, KERN_RANDOM, RANDOM_UUID };
unsigned char buf[ADD_ENTROPY];
size_t len, n;
int i, any_set;
memset(buf, 0, sizeof(buf));
for (len = 0; len < sizeof(buf); len += n) {
n = sizeof(buf) - len;
if (0 != sysctl(mib, 3, &buf[len], &n, NULL, 0))
return -1;
}
/* make sure that the buffer actually got set. */
for (i=any_set=0; i<sizeof(buf); ++i) {
any_set |= buf[i];
}
if (!any_set)
return -1;
arc4_addrandom(buf, sizeof(buf));
memset(buf, 0, sizeof(buf));
arc4_seeded_ok = 1;
return 0;
}
#endif
#if _EVENT_HAVE_DECL_CTL_KERN && _EVENT_HAVE_DECL_KERN_ARND
#define TRY_SEED_SYSCTL_BSD
static int
arc4_seed_sysctl_bsd(void)
{
/* Based on code from William Ahern and from OpenBSD, this function
* tries to use the KERN_ARND syscall to get entropy from the kernel.
* This can work even if /dev/urandom is inaccessible for some reason
* (e.g., we're running in a chroot). */
int mib[] = { CTL_KERN, KERN_ARND };
unsigned char buf[ADD_ENTROPY];
size_t len, n;
int i, any_set;
memset(buf, 0, sizeof(buf));
len = sizeof(buf);
if (sysctl(mib, 2, buf, &len, NULL, 0) == -1) {
for (len = 0; len < sizeof(buf); len += sizeof(unsigned)) {
n = sizeof(unsigned);
if (n + len > sizeof(buf))
n = len - sizeof(buf);
if (sysctl(mib, 2, &buf[len], &n, NULL, 0) == -1)
return -1;
}
}
/* make sure that the buffer actually got set. */
for (i=any_set=0; i<sizeof(buf); ++i) {
any_set |= buf[i];
}
if (!any_set)
return -1;
arc4_addrandom(buf, sizeof(buf));
memset(buf, 0, sizeof(buf));
arc4_seeded_ok = 1;
return 0;
}
#endif
#endif /* defined(_EVENT_HAVE_SYS_SYSCTL_H) */
#ifdef __linux__
#define TRY_SEED_PROC_SYS_KERNEL_RANDOM_UUID
static int
arc4_seed_proc_sys_kernel_random_uuid(void)
{
/* Occasionally, somebody will make /proc/sys accessible in a chroot,
* but not /dev/urandom. Let's try /proc/sys/kernel/random/uuid.
* Its format is stupid, so we need to decode it from hex.
*/
int fd;
char buf[128];
unsigned char entropy[64];
int bytes, n, i, nybbles;
for (bytes = 0; bytes<ADD_ENTROPY; ) {
fd = open("/proc/sys/kernel/random/uuid", O_RDONLY, 0);
if (fd < 0)
return -1;
n = read(fd, buf, sizeof(buf));
close(fd);
if (n<=0)
return -1;
memset(entropy, 0, sizeof(entropy));
for (i=nybbles=0; i<n; ++i) {
if (EVUTIL_ISXDIGIT(buf[i])) {
int nyb = evutil_hex_char_to_int(buf[i]);
if (nybbles & 1) {
entropy[nybbles/2] |= nyb;
} else {
entropy[nybbles/2] |= nyb<<4;
}
++nybbles;
}
}
if (nybbles < 2)
return -1;
arc4_addrandom(entropy, nybbles/2);
bytes += nybbles/2;
}
memset(entropy, 0, sizeof(entropy));
memset(buf, 0, sizeof(buf));
return 0;
}
#endif
#ifndef WIN32
#define TRY_SEED_URANDOM
static int
arc4_seed_urandom(void)
{
/* This is adapted from Tor's crypto_seed_rng() */
static const char *filenames[] = {
"/dev/srandom", "/dev/urandom", "/dev/random", NULL
};
unsigned char buf[ADD_ENTROPY];
int fd, i;
size_t n;
for (i = 0; filenames[i]; ++i) {
fd = open(filenames[i], O_RDONLY, 0);
if (fd<0)
continue;
n = read_all(fd, buf, sizeof(buf));
close(fd);
if (n != sizeof(buf))
return -1;
arc4_addrandom(buf, sizeof(buf));
memset(buf, 0, sizeof(buf));
arc4_seeded_ok = 1;
return 0;
}
return -1;
}
#endif
static int
arc4_seed(void)
{
int ok = 0;
/* We try every method that might work, and don't give up even if one
* does seem to work. There's no real harm in over-seeding, and if
* one of these sources turns out to be broken, that would be bad. */
#ifdef TRY_SEED_WIN32
if (0 == arc4_seed_win32())
ok = 1;
#endif
#ifdef TRY_SEED_URANDOM
if (0 == arc4_seed_urandom())
ok = 1;
#endif
#ifdef TRY_SEED_PROC_SYS_KERNEL_RANDOM_UUID
if (0 == arc4_seed_proc_sys_kernel_random_uuid())
ok = 1;
#endif
#ifdef TRY_SEED_SYSCTL_LINUX
/* Apparently Linux is deprecating sysctl, and spewing warning
* messages when you try to use it. */
if (!ok && 0 == arc4_seed_sysctl_linux())
ok = 1;
#endif
#ifdef TRY_SEED_SYSCTL_BSD
if (0 == arc4_seed_sysctl_bsd())
ok = 1;
#endif
return ok ? 0 : -1;
}
static void
arc4_stir(void)
{
int i;
if (!rs_initialized) {
arc4_init();
rs_initialized = 1;
}
arc4_seed();
/*
* Discard early keystream, as per recommendations in
* "Weaknesses in the Key Scheduling Algorithm of RC4" by
* Scott Fluhrer, Itsik Mantin, and Adi Shamir.
* http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
*
* Ilya Mironov's "(Not So) Random Shuffles of RC4" suggests that
* we drop at least 2*256 bytes, with 12*256 as a conservative
* value.
*
* RFC4345 says to drop 6*256.
*
* At least some versions of this code drop 4*256, in a mistaken
* belief that "words" in the Fluhrer/Mantin/Shamir paper refers
* to processor words.
*
* We add another sect to the cargo cult, and choose 12*256.
*/
for (i = 0; i < 12*256; i++)
(void)arc4_getbyte();
arc4_count = BYTES_BEFORE_RESEED;
}
static void
arc4_stir_if_needed(void)
{
pid_t pid = getpid();
if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != pid)
{
arc4_stir_pid = pid;
arc4_stir();
}
}
static inline unsigned char
arc4_getbyte(void)
{
unsigned char si, sj;
rs.i = (rs.i + 1);
si = rs.s[rs.i];
rs.j = (rs.j + si);
sj = rs.s[rs.j];
rs.s[rs.i] = sj;
rs.s[rs.j] = si;
return (rs.s[(si + sj) & 0xff]);
}
static inline unsigned int
arc4_getword(void)
{
unsigned int val;
val = arc4_getbyte() << 24;
val |= arc4_getbyte() << 16;
val |= arc4_getbyte() << 8;
val |= arc4_getbyte();
return val;
}
#ifndef ARC4RANDOM_NOSTIR
ARC4RANDOM_EXPORT int
arc4random_stir(void)
{
int val;
_ARC4_LOCK();
val = arc4_stir();
_ARC4_UNLOCK();
return val;
}
#endif
#ifndef ARC4RANDOM_NOADDRANDOM
ARC4RANDOM_EXPORT void
arc4random_addrandom(const unsigned char *dat, int datlen)
{
int j;
_ARC4_LOCK();
if (!rs_initialized)
arc4_stir();
for (j = 0; j < datlen; j += 256) {
/* arc4_addrandom() ignores all but the first 256 bytes of
* its input. We want to make sure to look at ALL the
* data in 'dat', just in case the user is doing something
* crazy like passing us all the files in /var/log. */
arc4_addrandom(dat + j, datlen - j);
}
_ARC4_UNLOCK();
}
#endif
#ifndef ARC4RANDOM_NORANDOM
ARC4RANDOM_EXPORT ARC4RANDOM_UINT32
arc4random(void)
{
ARC4RANDOM_UINT32 val;
_ARC4_LOCK();
arc4_count -= 4;
arc4_stir_if_needed();
val = arc4_getword();
_ARC4_UNLOCK();
return val;
}
#endif
ARC4RANDOM_EXPORT void
arc4random_buf(void *_buf, size_t n)
{
unsigned char *buf = _buf;
_ARC4_LOCK();
arc4_stir_if_needed();
while (n--) {
if (--arc4_count <= 0)
arc4_stir();
buf[n] = arc4_getbyte();
}
_ARC4_UNLOCK();
}
#ifndef ARC4RANDOM_NOUNIFORM
/*
* Calculate a uniformly distributed random number less than upper_bound
* avoiding "modulo bias".
*
* Uniformity is achieved by generating new random numbers until the one
* returned is outside the range [0, 2**32 % upper_bound). This
* guarantees the selected random number will be inside
* [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
* after reduction modulo upper_bound.
*/
ARC4RANDOM_EXPORT unsigned int
arc4random_uniform(unsigned int upper_bound)
{
ARC4RANDOM_UINT32 r, min;
if (upper_bound < 2)
return 0;
#if (UINT_MAX > 0xffffffffUL)
min = 0x100000000UL % upper_bound;
#else
/* Calculate (2**32 % upper_bound) avoiding 64-bit math */
if (upper_bound > 0x80000000)
min = 1 + ~upper_bound; /* 2**32 - upper_bound */
else {
/* (2**32 - (x * 2)) % x == 2**32 % x when x <= 2**31 */
min = ((0xffffffff - (upper_bound * 2)) + 1) % upper_bound;
}
#endif
/*
* This could theoretically loop forever but each retry has
* p > 0.5 (worst case, usually far better) of selecting a
* number inside the range we need, so it should rarely need
* to re-roll.
*/
for (;;) {
r = arc4random();
if (r >= min)
break;
}
return r % upper_bound;
}
#endif

Просмотреть файл

@ -1,15 +0,0 @@
#!/bin/sh
if [ -x "`which autoreconf 2>/dev/null`" ] ; then
exec autoreconf -ivf
fi
LIBTOOLIZE=libtoolize
SYSNAME=`uname`
if [ "x$SYSNAME" = "xDarwin" ] ; then
LIBTOOLIZE=glibtoolize
fi
aclocal -I m4 && \
autoheader && \
$LIBTOOLIZE && \
autoconf && \
automake --add-missing --force-missing --copy

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,316 +0,0 @@
/*
* Copyright (c) 2009-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
@file buffer_iocp.c
This module implements overlapped read and write functions for evbuffer
objects on Windows.
*/
#include "event2/buffer.h"
#include "event2/buffer_compat.h"
#include "event2/util.h"
#include "event2/thread.h"
#include "event2/event-config.h"
#include "util-internal.h"
#include "evthread-internal.h"
#include "evbuffer-internal.h"
#include "iocp-internal.h"
#include "mm-internal.h"
#include <winsock2.h>
#include <windows.h>
#include <stdio.h>
#define MAX_WSABUFS 16
/** An evbuffer that can handle overlapped IO. */
struct evbuffer_overlapped {
struct evbuffer buffer;
/** The socket that we're doing overlapped IO on. */
evutil_socket_t fd;
/** pending I/O type */
unsigned read_in_progress : 1;
unsigned write_in_progress : 1;
/** The first pinned chain in the buffer. */
struct evbuffer_chain *first_pinned;
/** How many chains are pinned; how many of the fields in buffers
* are we using. */
int n_buffers;
WSABUF buffers[MAX_WSABUFS];
};
/** Given an evbuffer, return the correponding evbuffer structure, or NULL if
* the evbuffer isn't overlapped. */
static inline struct evbuffer_overlapped *
upcast_evbuffer(struct evbuffer *buf)
{
if (!buf || !buf->is_overlapped)
return NULL;
return EVUTIL_UPCAST(buf, struct evbuffer_overlapped, buffer);
}
/** Unpin all the chains noted as pinned in 'eo'. */
static void
pin_release(struct evbuffer_overlapped *eo, unsigned flag)
{
int i;
struct evbuffer_chain *next, *chain = eo->first_pinned;
for (i = 0; i < eo->n_buffers; ++i) {
EVUTIL_ASSERT(chain);
next = chain->next;
_evbuffer_chain_unpin(chain, flag);
chain = next;
}
}
void
evbuffer_commit_read(struct evbuffer *evbuf, ev_ssize_t nBytes)
{
struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf);
struct evbuffer_chain **chainp;
size_t remaining, len;
unsigned i;
EVBUFFER_LOCK(evbuf);
EVUTIL_ASSERT(buf->read_in_progress && !buf->write_in_progress);
EVUTIL_ASSERT(nBytes >= 0); /* XXXX Can this be false? */
evbuffer_unfreeze(evbuf, 0);
chainp = evbuf->last_with_datap;
if (!((*chainp)->flags & EVBUFFER_MEM_PINNED_R))
chainp = &(*chainp)->next;
remaining = nBytes;
for (i = 0; remaining > 0 && i < buf->n_buffers; ++i) {
EVUTIL_ASSERT(*chainp);
len = buf->buffers[i].len;
if (remaining < len)
len = remaining;
(*chainp)->off += len;
evbuf->last_with_datap = chainp;
remaining -= len;
chainp = &(*chainp)->next;
}
pin_release(buf, EVBUFFER_MEM_PINNED_R);
buf->read_in_progress = 0;
evbuf->total_len += nBytes;
_evbuffer_decref_and_unlock(evbuf);
}
void
evbuffer_commit_write(struct evbuffer *evbuf, ev_ssize_t nBytes)
{
struct evbuffer_overlapped *buf = upcast_evbuffer(evbuf);
EVBUFFER_LOCK(evbuf);
EVUTIL_ASSERT(buf->write_in_progress && !buf->read_in_progress);
evbuffer_unfreeze(evbuf, 1);
evbuffer_drain(evbuf, nBytes);
pin_release(buf,EVBUFFER_MEM_PINNED_W);
buf->write_in_progress = 0;
_evbuffer_decref_and_unlock(evbuf);
}
struct evbuffer *
evbuffer_overlapped_new(evutil_socket_t fd)
{
struct evbuffer_overlapped *evo;
evo = mm_calloc(1, sizeof(struct evbuffer_overlapped));
TAILQ_INIT(&evo->buffer.callbacks);
evo->buffer.refcnt = 1;
evo->buffer.last_with_datap = &evo->buffer.first;
evo->buffer.is_overlapped = 1;
evo->fd = fd;
return &evo->buffer;
}
int
evbuffer_launch_write(struct evbuffer *buf, ev_ssize_t at_most,
struct event_overlapped *ol)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
int r = -1;
int i;
struct evbuffer_chain *chain;
DWORD bytesSent;
if (!buf) {
/* No buffer, or it isn't overlapped */
return -1;
}
EVBUFFER_LOCK(buf);
EVUTIL_ASSERT(!buf_o->read_in_progress);
if (buf->freeze_start || buf_o->write_in_progress)
goto done;
if (!buf->total_len) {
/* Nothing to write */
r = 0;
goto done;
} else if (at_most < 0 || (size_t)at_most > buf->total_len) {
at_most = buf->total_len;
}
evbuffer_freeze(buf, 1);
buf_o->first_pinned = NULL;
buf_o->n_buffers = 0;
memset(buf_o->buffers, 0, sizeof(buf_o->buffers));
chain = buf_o->first_pinned = buf->first;
for (i=0; i < MAX_WSABUFS && chain; ++i, chain=chain->next) {
WSABUF *b = &buf_o->buffers[i];
b->buf = chain->buffer + chain->misalign;
_evbuffer_chain_pin(chain, EVBUFFER_MEM_PINNED_W);
if ((size_t)at_most > chain->off) {
b->len = chain->off;
at_most -= chain->off;
} else {
b->len = at_most;
++i;
break;
}
}
buf_o->n_buffers = i;
_evbuffer_incref(buf);
if (WSASend(buf_o->fd, buf_o->buffers, i, &bytesSent, 0,
&ol->overlapped, NULL)) {
int error = WSAGetLastError();
if (error != WSA_IO_PENDING) {
/* An actual error. */
pin_release(buf_o, EVBUFFER_MEM_PINNED_W);
evbuffer_unfreeze(buf, 1);
evbuffer_free(buf); /* decref */
goto done;
}
}
buf_o->write_in_progress = 1;
r = 0;
done:
EVBUFFER_UNLOCK(buf);
return r;
}
int
evbuffer_launch_read(struct evbuffer *buf, size_t at_most,
struct event_overlapped *ol)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
int r = -1, i;
int nvecs;
int npin=0;
struct evbuffer_chain *chain=NULL, **chainp;
DWORD bytesRead;
DWORD flags = 0;
struct evbuffer_iovec vecs[MAX_WSABUFS];
if (!buf_o)
return -1;
EVBUFFER_LOCK(buf);
EVUTIL_ASSERT(!buf_o->write_in_progress);
if (buf->freeze_end || buf_o->read_in_progress)
goto done;
buf_o->first_pinned = NULL;
buf_o->n_buffers = 0;
memset(buf_o->buffers, 0, sizeof(buf_o->buffers));
if (_evbuffer_expand_fast(buf, at_most, MAX_WSABUFS) == -1)
goto done;
evbuffer_freeze(buf, 0);
nvecs = _evbuffer_read_setup_vecs(buf, at_most,
vecs, MAX_WSABUFS, &chainp, 1);
for (i=0;i<nvecs;++i) {
WSABUF_FROM_EVBUFFER_IOV(
&buf_o->buffers[i],
&vecs[i]);
}
buf_o->n_buffers = nvecs;
buf_o->first_pinned = chain = *chainp;
npin=0;
for ( ; chain; chain = chain->next) {
_evbuffer_chain_pin(chain, EVBUFFER_MEM_PINNED_R);
++npin;
}
EVUTIL_ASSERT(npin == nvecs);
_evbuffer_incref(buf);
if (WSARecv(buf_o->fd, buf_o->buffers, nvecs, &bytesRead, &flags,
&ol->overlapped, NULL)) {
int error = WSAGetLastError();
if (error != WSA_IO_PENDING) {
/* An actual error. */
pin_release(buf_o, EVBUFFER_MEM_PINNED_R);
evbuffer_unfreeze(buf, 0);
evbuffer_free(buf); /* decref */
goto done;
}
}
buf_o->read_in_progress = 1;
r = 0;
done:
EVBUFFER_UNLOCK(buf);
return r;
}
evutil_socket_t
_evbuffer_overlapped_get_fd(struct evbuffer *buf)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
return buf_o ? buf_o->fd : -1;
}
void
_evbuffer_overlapped_set_fd(struct evbuffer *buf, evutil_socket_t fd)
{
struct evbuffer_overlapped *buf_o = upcast_evbuffer(buf);
EVBUFFER_LOCK(buf);
/* XXX is this right?, should it cancel current I/O operations? */
if (buf_o)
buf_o->fd = fd;
EVBUFFER_UNLOCK(buf);
}

Просмотреть файл

@ -1,387 +0,0 @@
/*
* Copyright (c) 2008-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _BUFFEREVENT_INTERNAL_H_
#define _BUFFEREVENT_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "event2/event-config.h"
#include "evutil.h"
#include "defer-internal.h"
#include "evthread-internal.h"
#include "event2/thread.h"
#include "ratelim-internal.h"
/* These flags are reasons that we might be declining to actually enable
reading or writing on a bufferevent.
*/
/* On a all bufferevents, for reading: used when we have read up to the
watermark value.
On a filtering bufferevent, for writing: used when the underlying
bufferevent's write buffer has been filled up to its watermark
value.
*/
#define BEV_SUSPEND_WM 0x01
/* On a base bufferevent: when we have emptied a bandwidth buckets */
#define BEV_SUSPEND_BW 0x02
/* On a base bufferevent: when we have emptied the group's bandwidth bucket. */
#define BEV_SUSPEND_BW_GROUP 0x04
/* On a socket bufferevent: can't do any operations while we're waiting for
* name lookup to finish. */
#define BEV_SUSPEND_LOOKUP 0x08
struct bufferevent_rate_limit_group {
/** List of all members in the group */
TAILQ_HEAD(rlim_group_member_list, bufferevent_private) members;
/** Current limits for the group. */
struct ev_token_bucket rate_limit;
struct ev_token_bucket_cfg rate_limit_cfg;
/** True iff we don't want to read from any member of the group.until
* the token bucket refills. */
unsigned read_suspended : 1;
/** True iff we don't want to write from any member of the group.until
* the token bucket refills. */
unsigned write_suspended : 1;
/** True iff we were unable to suspend one of the bufferevents in the
* group for reading the last time we tried, and we should try
* again. */
unsigned pending_unsuspend_read : 1;
/** True iff we were unable to suspend one of the bufferevents in the
* group for writing the last time we tried, and we should try
* again. */
unsigned pending_unsuspend_write : 1;
/*@{*/
/** Total number of bytes read or written in this group since last
* reset. */
ev_uint64_t total_read;
ev_uint64_t total_written;
/*@}*/
/** The number of bufferevents in the group. */
int n_members;
/** The smallest number of bytes that any member of the group should
* be limited to read or write at a time. */
ev_uint32_t min_share;
/** Timeout event that goes off once a tick, when the bucket is ready
* to refill. */
struct event master_refill_event;
/** Lock to protect the members of this group. This lock should nest
* within every bufferevent lock: if you are holding this lock, do
* not assume you can lock another bufferevent. */
void *lock;
};
/** Fields for rate-limiting a single bufferevent. */
struct bufferevent_rate_limit {
/* Linked-list elements for storing this bufferevent_private in a
* group.
*
* Note that this field is supposed to be protected by the group
* lock */
TAILQ_ENTRY(bufferevent_private) next_in_group;
/** The rate-limiting group for this bufferevent, or NULL if it is
* only rate-limited on its own. */
struct bufferevent_rate_limit_group *group;
/* This bufferevent's current limits. */
struct ev_token_bucket limit;
/* Pointer to the rate-limit configuration for this bufferevent.
* Can be shared. XXX reference-count this? */
struct ev_token_bucket_cfg *cfg;
/* Timeout event used when one this bufferevent's buckets are
* empty. */
struct event refill_bucket_event;
};
/** Parts of the bufferevent structure that are shared among all bufferevent
* types, but not exposed in bufferevent_struct.h. */
struct bufferevent_private {
/** The underlying bufferevent structure. */
struct bufferevent bev;
/** Evbuffer callback to enforce watermarks on input. */
struct evbuffer_cb_entry *read_watermarks_cb;
/** If set, we should free the lock when we free the bufferevent. */
unsigned own_lock : 1;
/** Flag: set if we have deferred callbacks and a read callback is
* pending. */
unsigned readcb_pending : 1;
/** Flag: set if we have deferred callbacks and a write callback is
* pending. */
unsigned writecb_pending : 1;
/** Flag: set if we are currently busy connecting. */
unsigned connecting : 1;
/** Flag: set if a connect failed prematurely; this is a hack for
* getting around the bufferevent abstraction. */
unsigned connection_refused : 1;
/** Set to the events pending if we have deferred callbacks and
* an events callback is pending. */
short eventcb_pending;
/** If set, read is suspended until one or more conditions are over.
* The actual value here is a bitfield of those conditions; see the
* BEV_SUSPEND_* flags above. */
short read_suspended;
/** If set, writing is suspended until one or more conditions are over.
* The actual value here is a bitfield of those conditions; see the
* BEV_SUSPEND_* flags above. */
short write_suspended;
/** Set to the current socket errno if we have deferred callbacks and
* an events callback is pending. */
int errno_pending;
/** The DNS error code for bufferevent_socket_connect_hostname */
int dns_error;
/** Used to implement deferred callbacks */
struct deferred_cb deferred;
/** The options this bufferevent was constructed with */
enum bufferevent_options options;
/** Current reference count for this bufferevent. */
int refcnt;
/** Lock for this bufferevent. Shared by the inbuf and the outbuf.
* If NULL, locking is disabled. */
void *lock;
/** Rate-limiting information for this bufferevent */
struct bufferevent_rate_limit *rate_limiting;
};
/** Possible operations for a control callback. */
enum bufferevent_ctrl_op {
BEV_CTRL_SET_FD,
BEV_CTRL_GET_FD,
BEV_CTRL_GET_UNDERLYING
};
/** Possible data types for a control callback */
union bufferevent_ctrl_data {
void *ptr;
evutil_socket_t fd;
};
/**
Implementation table for a bufferevent: holds function pointers and other
information to make the various bufferevent types work.
*/
struct bufferevent_ops {
/** The name of the bufferevent's type. */
const char *type;
/** At what offset into the implementation type will we find a
bufferevent structure?
Example: if the type is implemented as
struct bufferevent_x {
int extra_data;
struct bufferevent bev;
}
then mem_offset should be offsetof(struct bufferevent_x, bev)
*/
off_t mem_offset;
/** Enables one or more of EV_READ|EV_WRITE on a bufferevent. Does
not need to adjust the 'enabled' field. Returns 0 on success, -1
on failure.
*/
int (*enable)(struct bufferevent *, short);
/** Disables one or more of EV_READ|EV_WRITE on a bufferevent. Does
not need to adjust the 'enabled' field. Returns 0 on success, -1
on failure.
*/
int (*disable)(struct bufferevent *, short);
/** Free any storage and deallocate any extra data or structures used
in this implementation.
*/
void (*destruct)(struct bufferevent *);
/** Called when the timeouts on the bufferevent have changed.*/
int (*adj_timeouts)(struct bufferevent *);
/** Called to flush data. */
int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode);
/** Called to access miscellaneous fields. */
int (*ctrl)(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
};
extern const struct bufferevent_ops bufferevent_ops_socket;
extern const struct bufferevent_ops bufferevent_ops_filter;
extern const struct bufferevent_ops bufferevent_ops_pair;
#define BEV_IS_SOCKET(bevp) ((bevp)->be_ops == &bufferevent_ops_socket)
#define BEV_IS_FILTER(bevp) ((bevp)->be_ops == &bufferevent_ops_filter)
#define BEV_IS_PAIR(bevp) ((bevp)->be_ops == &bufferevent_ops_pair)
#ifdef WIN32
extern const struct bufferevent_ops bufferevent_ops_async;
#define BEV_IS_ASYNC(bevp) ((bevp)->be_ops == &bufferevent_ops_async)
#else
#define BEV_IS_ASYNC(bevp) 0
#endif
/** Initialize the shared parts of a bufferevent. */
int bufferevent_init_common(struct bufferevent_private *, struct event_base *, const struct bufferevent_ops *, enum bufferevent_options options);
/** For internal use: temporarily stop all reads on bufev, until the conditions
* in 'what' are over. */
void bufferevent_suspend_read(struct bufferevent *bufev, short what);
/** For internal use: clear the conditions 'what' on bufev, and re-enable
* reading if there are no conditions left. */
void bufferevent_unsuspend_read(struct bufferevent *bufev, short what);
/** For internal use: temporarily stop all writes on bufev, until the conditions
* in 'what' are over. */
void bufferevent_suspend_write(struct bufferevent *bufev, short what);
/** For internal use: clear the conditions 'what' on bufev, and re-enable
* writing if there are no conditions left. */
void bufferevent_unsuspend_write(struct bufferevent *bufev, short what);
#define bufferevent_wm_suspend_read(b) \
bufferevent_suspend_read((b), BEV_SUSPEND_WM)
#define bufferevent_wm_unsuspend_read(b) \
bufferevent_unsuspend_read((b), BEV_SUSPEND_WM)
/** Internal: Set up locking on a bufferevent. If lock is set, use it.
* Otherwise, use a new lock. */
int bufferevent_enable_locking(struct bufferevent *bufev, void *lock);
/** Internal: Increment the reference count on bufev. */
void bufferevent_incref(struct bufferevent *bufev);
/** Internal: Lock bufev and increase its reference count.
* unlocking it otherwise. */
void _bufferevent_incref_and_lock(struct bufferevent *bufev);
/** Internal: Decrement the reference count on bufev. Returns 1 if it freed
* the bufferevent.*/
int bufferevent_decref(struct bufferevent *bufev);
/** Internal: Drop the reference count on bufev, freeing as necessary, and
* unlocking it otherwise. Returns 1 if it freed the bufferevent. */
int _bufferevent_decref_and_unlock(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have a read callback, schedule
* a readcb. Otherwise just run the readcb. */
void _bufferevent_run_readcb(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have a write callback, schedule
* a writecb. Otherwise just run the writecb. */
void _bufferevent_run_writecb(struct bufferevent *bufev);
/** Internal: If callbacks are deferred and we have an eventcb, schedule
* it to run with events "what". Otherwise just run the eventcb. */
void _bufferevent_run_eventcb(struct bufferevent *bufev, short what);
/** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in
* which case add ev with no timeout. */
int _bufferevent_add_event(struct event *ev, const struct timeval *tv);
/* =========
* These next functions implement timeouts for bufferevents that aren't doing
* anything else with ev_read and ev_write, to handle timeouts.
* ========= */
/** Internal use: Set up the ev_read and ev_write callbacks so that
* the other "generic_timeout" functions will work on it. Call this from
* the constructor function. */
void _bufferevent_init_generic_timeout_cbs(struct bufferevent *bev);
/** Internal use: Delete the ev_read and ev_write callbacks if they're pending.
* Call this from the destructor function. */
int _bufferevent_del_generic_timeout_cbs(struct bufferevent *bev);
/** Internal use: Add or delete the generic timeout events as appropriate.
* (If an event is enabled and a timeout is set, we add the event. Otherwise
* we delete it.) Call this from anything that changes the timeout values,
* that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */
int _bufferevent_generic_adj_timeouts(struct bufferevent *bev);
/** Internal use: We have just successfully read data into an inbuf, so
* reset the read timeout (if any). */
#define BEV_RESET_GENERIC_READ_TIMEOUT(bev) \
do { \
if (evutil_timerisset(&(bev)->timeout_read)) \
event_add(&(bev)->ev_read, &(bev)->timeout_read); \
} while (0)
/** Internal use: We have just successfully written data from an inbuf, so
* reset the read timeout (if any). */
#define BEV_RESET_GENERIC_WRITE_TIMEOUT(bev) \
do { \
if (evutil_timerisset(&(bev)->timeout_write)) \
event_add(&(bev)->ev_write, &(bev)->timeout_write); \
} while (0)
#define BEV_DEL_GENERIC_READ_TIMEOUT(bev) \
event_del(&(bev)->ev_read)
#define BEV_DEL_GENERIC_WRITE_TIMEOUT(bev) \
event_del(&(bev)->ev_write)
/** Internal: Given a bufferevent, return its corresponding
* bufferevent_private. */
#define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev)
#ifdef _EVENT_DISABLE_THREAD_SUPPORT
#define BEV_LOCK(b) _EVUTIL_NIL_STMT
#define BEV_UNLOCK(b) _EVUTIL_NIL_STMT
#else
/** Internal: Grab the lock (if any) on a bufferevent */
#define BEV_LOCK(b) do { \
struct bufferevent_private *locking = BEV_UPCAST(b); \
EVLOCK_LOCK(locking->lock, 0); \
} while (0)
/** Internal: Release the lock (if any) on a bufferevent */
#define BEV_UNLOCK(b) do { \
struct bufferevent_private *locking = BEV_UPCAST(b); \
EVLOCK_UNLOCK(locking->lock, 0); \
} while (0)
#endif
/* ==== For rate-limiting. */
int _bufferevent_decrement_write_buckets(struct bufferevent_private *bev,
int bytes);
int _bufferevent_decrement_read_buckets(struct bufferevent_private *bev,
int bytes);
int _bufferevent_get_read_max(struct bufferevent_private *bev);
int _bufferevent_get_write_max(struct bufferevent_private *bev);
#ifdef __cplusplus
}
#endif
#endif /* _BUFFEREVENT_INTERNAL_H_ */

Просмотреть файл

@ -1,836 +0,0 @@
/*
* Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos, Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include "event2/event-config.h"
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _EVENT_HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef WIN32
#include <winsock2.h>
#endif
#include <errno.h>
#include "event2/util.h"
#include "event2/buffer.h"
#include "event2/buffer_compat.h"
#include "event2/bufferevent.h"
#include "event2/bufferevent_struct.h"
#include "event2/bufferevent_compat.h"
#include "event2/event.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "bufferevent-internal.h"
#include "evbuffer-internal.h"
#include "util-internal.h"
void
bufferevent_suspend_read(struct bufferevent *bufev, short what)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
if (!bufev_private->read_suspended)
bufev->be_ops->disable(bufev, EV_READ);
bufev_private->read_suspended |= what;
BEV_UNLOCK(bufev);
}
void
bufferevent_unsuspend_read(struct bufferevent *bufev, short what)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
bufev_private->read_suspended &= ~what;
if (!bufev_private->read_suspended)
bufev->be_ops->enable(bufev, EV_READ);
BEV_UNLOCK(bufev);
}
void
bufferevent_suspend_write(struct bufferevent *bufev, short what)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
if (!bufev_private->write_suspended)
bufev->be_ops->disable(bufev, EV_WRITE);
bufev_private->write_suspended |= what;
BEV_UNLOCK(bufev);
}
void
bufferevent_unsuspend_write(struct bufferevent *bufev, short what)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
bufev_private->write_suspended &= ~what;
if (!bufev_private->write_suspended)
bufev->be_ops->enable(bufev, EV_WRITE);
BEV_UNLOCK(bufev);
}
/* Callback to implement watermarks on the input buffer. Only enabled
* if the watermark is set. */
static void
bufferevent_inbuf_wm_cb(struct evbuffer *buf,
const struct evbuffer_cb_info *cbinfo,
void *arg)
{
struct bufferevent *bufev = arg;
size_t size;
size = evbuffer_get_length(buf);
if (size >= bufev->wm_read.high)
bufferevent_wm_suspend_read(bufev);
else
bufferevent_wm_unsuspend_read(bufev);
}
static void
bufferevent_run_deferred_callbacks_locked(struct deferred_cb *_, void *arg)
{
struct bufferevent_private *bufev_private = arg;
struct bufferevent *bufev = &bufev_private->bev;
BEV_LOCK(bufev);
if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
bufev->errorcb) {
/* The "connected" happened before any reads or writes, so
send it first. */
bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
bufev->errorcb(bufev, BEV_EVENT_CONNECTED, bufev->cbarg);
}
if (bufev_private->readcb_pending && bufev->readcb) {
bufev_private->readcb_pending = 0;
bufev->readcb(bufev, bufev->cbarg);
}
if (bufev_private->writecb_pending && bufev->writecb) {
bufev_private->writecb_pending = 0;
bufev->writecb(bufev, bufev->cbarg);
}
if (bufev_private->eventcb_pending && bufev->errorcb) {
short what = bufev_private->eventcb_pending;
int err = bufev_private->errno_pending;
bufev_private->eventcb_pending = 0;
bufev_private->errno_pending = 0;
EVUTIL_SET_SOCKET_ERROR(err);
bufev->errorcb(bufev, what, bufev->cbarg);
}
_bufferevent_decref_and_unlock(bufev);
}
static void
bufferevent_run_deferred_callbacks_unlocked(struct deferred_cb *_, void *arg)
{
struct bufferevent_private *bufev_private = arg;
struct bufferevent *bufev = &bufev_private->bev;
BEV_LOCK(bufev);
#define UNLOCKED(stmt) \
do { BEV_UNLOCK(bufev); stmt; BEV_LOCK(bufev); } while(0)
if ((bufev_private->eventcb_pending & BEV_EVENT_CONNECTED) &&
bufev->errorcb) {
/* The "connected" happened before any reads or writes, so
send it first. */
bufferevent_event_cb errorcb = bufev->errorcb;
void *cbarg = bufev->cbarg;
bufev_private->eventcb_pending &= ~BEV_EVENT_CONNECTED;
UNLOCKED(errorcb(bufev, BEV_EVENT_CONNECTED, cbarg));
}
if (bufev_private->readcb_pending && bufev->readcb) {
bufferevent_data_cb readcb = bufev->readcb;
void *cbarg = bufev->cbarg;
bufev_private->readcb_pending = 0;
UNLOCKED(readcb(bufev, cbarg));
}
if (bufev_private->writecb_pending && bufev->writecb) {
bufferevent_data_cb writecb = bufev->writecb;
void *cbarg = bufev->cbarg;
bufev_private->writecb_pending = 0;
UNLOCKED(writecb(bufev, cbarg));
}
if (bufev_private->eventcb_pending && bufev->errorcb) {
bufferevent_event_cb errorcb = bufev->errorcb;
void *cbarg = bufev->cbarg;
short what = bufev_private->eventcb_pending;
int err = bufev_private->errno_pending;
bufev_private->eventcb_pending = 0;
bufev_private->errno_pending = 0;
EVUTIL_SET_SOCKET_ERROR(err);
UNLOCKED(errorcb(bufev,what,cbarg));
}
_bufferevent_decref_and_unlock(bufev);
#undef UNLOCKED
}
#define SCHEDULE_DEFERRED(bevp) \
do { \
bufferevent_incref(&(bevp)->bev); \
event_deferred_cb_schedule( \
event_base_get_deferred_cb_queue((bevp)->bev.ev_base), \
&(bevp)->deferred); \
} while (0)
void
_bufferevent_run_readcb(struct bufferevent *bufev)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
if (bufev->readcb == NULL)
return;
if (p->options & BEV_OPT_DEFER_CALLBACKS) {
p->readcb_pending = 1;
if (!p->deferred.queued)
SCHEDULE_DEFERRED(p);
} else {
bufev->readcb(bufev, bufev->cbarg);
}
}
void
_bufferevent_run_writecb(struct bufferevent *bufev)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
if (bufev->writecb == NULL)
return;
if (p->options & BEV_OPT_DEFER_CALLBACKS) {
p->writecb_pending = 1;
if (!p->deferred.queued)
SCHEDULE_DEFERRED(p);
} else {
bufev->writecb(bufev, bufev->cbarg);
}
}
void
_bufferevent_run_eventcb(struct bufferevent *bufev, short what)
{
/* Requires that we hold the lock and a reference */
struct bufferevent_private *p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
if (bufev->errorcb == NULL)
return;
if (p->options & BEV_OPT_DEFER_CALLBACKS) {
p->eventcb_pending |= what;
p->errno_pending = EVUTIL_SOCKET_ERROR();
if (!p->deferred.queued)
SCHEDULE_DEFERRED(p);
} else {
bufev->errorcb(bufev, what, bufev->cbarg);
}
}
int
bufferevent_init_common(struct bufferevent_private *bufev_private,
struct event_base *base,
const struct bufferevent_ops *ops,
enum bufferevent_options options)
{
struct bufferevent *bufev = &bufev_private->bev;
if (!bufev->input) {
if ((bufev->input = evbuffer_new()) == NULL)
return -1;
}
if (!bufev->output) {
if ((bufev->output = evbuffer_new()) == NULL) {
evbuffer_free(bufev->input);
return -1;
}
}
bufev_private->refcnt = 1;
bufev->ev_base = base;
/* Disable timeouts. */
evutil_timerclear(&bufev->timeout_read);
evutil_timerclear(&bufev->timeout_write);
bufev->be_ops = ops;
/*
* Set to EV_WRITE so that using bufferevent_write is going to
* trigger a callback. Reading needs to be explicitly enabled
* because otherwise no data will be available.
*/
bufev->enabled = EV_WRITE;
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
if (options & BEV_OPT_THREADSAFE) {
if (bufferevent_enable_locking(bufev, NULL) < 0) {
/* cleanup */
evbuffer_free(bufev->input);
evbuffer_free(bufev->output);
bufev->input = NULL;
bufev->output = NULL;
return -1;
}
}
#endif
if ((options & (BEV_OPT_DEFER_CALLBACKS|BEV_OPT_UNLOCK_CALLBACKS))
== BEV_OPT_UNLOCK_CALLBACKS) {
event_warnx("UNLOCK_CALLBACKS requires DEFER_CALLBACKS");
return -1;
}
if (options & BEV_OPT_DEFER_CALLBACKS) {
if (options & BEV_OPT_UNLOCK_CALLBACKS)
event_deferred_cb_init(&bufev_private->deferred,
bufferevent_run_deferred_callbacks_unlocked,
bufev_private);
else
event_deferred_cb_init(&bufev_private->deferred,
bufferevent_run_deferred_callbacks_locked,
bufev_private);
}
bufev_private->options = options;
evbuffer_set_parent(bufev->input, bufev);
evbuffer_set_parent(bufev->output, bufev);
return 0;
}
void
bufferevent_setcb(struct bufferevent *bufev,
bufferevent_data_cb readcb, bufferevent_data_cb writecb,
bufferevent_event_cb eventcb, void *cbarg)
{
BEV_LOCK(bufev);
bufev->readcb = readcb;
bufev->writecb = writecb;
bufev->errorcb = eventcb;
bufev->cbarg = cbarg;
BEV_UNLOCK(bufev);
}
struct evbuffer *
bufferevent_get_input(struct bufferevent *bufev)
{
return bufev->input;
}
struct evbuffer *
bufferevent_get_output(struct bufferevent *bufev)
{
return bufev->output;
}
int
bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)
{
if (evbuffer_add(bufev->output, data, size) == -1)
return (-1);
return 0;
}
int
bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf)
{
if (evbuffer_add_buffer(bufev->output, buf) == -1)
return (-1);
return 0;
}
size_t
bufferevent_read(struct bufferevent *bufev, void *data, size_t size)
{
return (evbuffer_remove(bufev->input, data, size));
}
int
bufferevent_read_buffer(struct bufferevent *bufev, struct evbuffer *buf)
{
return (evbuffer_add_buffer(buf, bufev->input));
}
int
bufferevent_enable(struct bufferevent *bufev, short event)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
short impl_events = event;
int r = 0;
_bufferevent_incref_and_lock(bufev);
if (bufev_private->read_suspended)
impl_events &= ~EV_READ;
if (bufev_private->write_suspended)
impl_events &= ~EV_WRITE;
bufev->enabled |= event;
if (impl_events && bufev->be_ops->enable(bufev, impl_events) < 0)
r = -1;
_bufferevent_decref_and_unlock(bufev);
return r;
}
int
bufferevent_set_timeouts(struct bufferevent *bufev,
const struct timeval *tv_read,
const struct timeval *tv_write)
{
int r = 0;
BEV_LOCK(bufev);
if (tv_read) {
bufev->timeout_read = *tv_read;
} else {
evutil_timerclear(&bufev->timeout_read);
}
if (tv_write) {
bufev->timeout_write = *tv_write;
} else {
evutil_timerclear(&bufev->timeout_write);
}
if (bufev->be_ops->adj_timeouts)
r = bufev->be_ops->adj_timeouts(bufev);
BEV_UNLOCK(bufev);
return r;
}
/* Obsolete; use bufferevent_set_timeouts */
void
bufferevent_settimeout(struct bufferevent *bufev,
int timeout_read, int timeout_write)
{
struct timeval tv_read, tv_write;
struct timeval *ptv_read = NULL, *ptv_write = NULL;
memset(&tv_read, 0, sizeof(tv_read));
memset(&tv_write, 0, sizeof(tv_write));
if (timeout_read) {
tv_read.tv_sec = timeout_read;
ptv_read = &tv_read;
}
if (timeout_write) {
tv_write.tv_sec = timeout_write;
ptv_write = &tv_write;
}
bufferevent_set_timeouts(bufev, ptv_read, ptv_write);
}
int
bufferevent_disable(struct bufferevent *bufev, short event)
{
int r = 0;
BEV_LOCK(bufev);
bufev->enabled &= ~event;
if (bufev->be_ops->disable(bufev, event) < 0)
r = -1;
BEV_UNLOCK(bufev);
return r;
}
/*
* Sets the water marks
*/
void
bufferevent_setwatermark(struct bufferevent *bufev, short events,
size_t lowmark, size_t highmark)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
if (events & EV_WRITE) {
bufev->wm_write.low = lowmark;
bufev->wm_write.high = highmark;
}
if (events & EV_READ) {
bufev->wm_read.low = lowmark;
bufev->wm_read.high = highmark;
if (highmark) {
/* There is now a new high-water mark for read.
enable the callback if needed, and see if we should
suspend/bufferevent_wm_unsuspend. */
if (bufev_private->read_watermarks_cb == NULL) {
bufev_private->read_watermarks_cb =
evbuffer_add_cb(bufev->input,
bufferevent_inbuf_wm_cb,
bufev);
}
evbuffer_cb_set_flags(bufev->input,
bufev_private->read_watermarks_cb,
EVBUFFER_CB_ENABLED|EVBUFFER_CB_NODEFER);
if (evbuffer_get_length(bufev->input) > highmark)
bufferevent_wm_suspend_read(bufev);
else if (evbuffer_get_length(bufev->input) < highmark)
bufferevent_wm_unsuspend_read(bufev);
} else {
/* There is now no high-water mark for read. */
if (bufev_private->read_watermarks_cb)
evbuffer_cb_clear_flags(bufev->input,
bufev_private->read_watermarks_cb,
EVBUFFER_CB_ENABLED);
bufferevent_wm_unsuspend_read(bufev);
}
}
BEV_UNLOCK(bufev);
}
int
bufferevent_flush(struct bufferevent *bufev,
short iotype,
enum bufferevent_flush_mode mode)
{
int r = -1;
BEV_LOCK(bufev);
if (bufev->be_ops->flush)
r = bufev->be_ops->flush(bufev, iotype, mode);
BEV_UNLOCK(bufev);
return r;
}
void
_bufferevent_incref_and_lock(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
BEV_UPCAST(bufev);
BEV_LOCK(bufev);
++bufev_private->refcnt;
}
#if 0
static void
_bufferevent_transfer_lock_ownership(struct bufferevent *donor,
struct bufferevent *recipient)
{
struct bufferevent_private *d = BEV_UPCAST(donor);
struct bufferevent_private *r = BEV_UPCAST(recipient);
if (d->lock != r->lock)
return;
if (r->own_lock)
return;
if (d->own_lock) {
d->own_lock = 0;
r->own_lock = 1;
}
}
#endif
int
_bufferevent_decref_and_unlock(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
struct bufferevent *underlying;
EVUTIL_ASSERT(bufev_private->refcnt > 0);
if (--bufev_private->refcnt) {
BEV_UNLOCK(bufev);
return 0;
}
underlying = bufferevent_get_underlying(bufev);
/* Clean up the shared info */
if (bufev->be_ops->destruct)
bufev->be_ops->destruct(bufev);
/* XXX what happens if refcnt for these buffers is > 1?
* The buffers can share a lock with this bufferevent object,
* but the lock might be destroyed below. */
/* evbuffer will free the callbacks */
evbuffer_free(bufev->input);
evbuffer_free(bufev->output);
if (bufev_private->rate_limiting) {
if (bufev_private->rate_limiting->group)
bufferevent_remove_from_rate_limit_group_internal(bufev,0);
if (event_initialized(&bufev_private->rate_limiting->refill_bucket_event))
event_del(&bufev_private->rate_limiting->refill_bucket_event);
event_debug_unassign(&bufev_private->rate_limiting->refill_bucket_event);
mm_free(bufev_private->rate_limiting);
bufev_private->rate_limiting = NULL;
}
event_debug_unassign(&bufev->ev_read);
event_debug_unassign(&bufev->ev_write);
BEV_UNLOCK(bufev);
if (bufev_private->own_lock)
EVTHREAD_FREE_LOCK(bufev_private->lock,
EVTHREAD_LOCKTYPE_RECURSIVE);
/* Free the actual allocated memory. */
mm_free(((char*)bufev) - bufev->be_ops->mem_offset);
/* Release the reference to underlying now that we no longer need the
* reference to it. We wait this long mainly in case our lock is
* shared with underlying.
*
* The 'destruct' function will also drop a reference to underlying
* if BEV_OPT_CLOSE_ON_FREE is set.
*
* XXX Should we/can we just refcount evbuffer/bufferevent locks?
* It would probably save us some headaches.
*/
if (underlying)
bufferevent_decref(underlying);
return 1;
}
int
bufferevent_decref(struct bufferevent *bufev)
{
BEV_LOCK(bufev);
return _bufferevent_decref_and_unlock(bufev);
}
void
bufferevent_free(struct bufferevent *bufev)
{
BEV_LOCK(bufev);
bufferevent_setcb(bufev, NULL, NULL, NULL, NULL);
_bufferevent_decref_and_unlock(bufev);
}
void
bufferevent_incref(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_private =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
BEV_LOCK(bufev);
++bufev_private->refcnt;
BEV_UNLOCK(bufev);
}
int
bufferevent_enable_locking(struct bufferevent *bufev, void *lock)
{
#ifdef _EVENT_DISABLE_THREAD_SUPPORT
return -1;
#else
struct bufferevent *underlying;
if (BEV_UPCAST(bufev)->lock)
return -1;
underlying = bufferevent_get_underlying(bufev);
if (!lock && underlying && BEV_UPCAST(underlying)->lock) {
lock = BEV_UPCAST(underlying)->lock;
BEV_UPCAST(bufev)->lock = lock;
BEV_UPCAST(bufev)->own_lock = 0;
} else if (!lock) {
EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
if (!lock)
return -1;
BEV_UPCAST(bufev)->lock = lock;
BEV_UPCAST(bufev)->own_lock = 1;
} else {
BEV_UPCAST(bufev)->lock = lock;
BEV_UPCAST(bufev)->own_lock = 0;
}
evbuffer_enable_locking(bufev->input, lock);
evbuffer_enable_locking(bufev->output, lock);
if (underlying && !BEV_UPCAST(underlying)->lock)
bufferevent_enable_locking(underlying, lock);
return 0;
#endif
}
int
bufferevent_setfd(struct bufferevent *bev, evutil_socket_t fd)
{
union bufferevent_ctrl_data d;
int res = -1;
d.fd = fd;
BEV_LOCK(bev);
if (bev->be_ops->ctrl)
res = bev->be_ops->ctrl(bev, BEV_CTRL_SET_FD, &d);
BEV_UNLOCK(bev);
return res;
}
evutil_socket_t
bufferevent_getfd(struct bufferevent *bev)
{
union bufferevent_ctrl_data d;
int res = -1;
d.fd = -1;
BEV_LOCK(bev);
if (bev->be_ops->ctrl)
res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_FD, &d);
BEV_UNLOCK(bev);
return (res<0) ? -1 : d.fd;
}
short
bufferevent_get_enabled(struct bufferevent *bufev)
{
short r;
BEV_LOCK(bufev);
r = bufev->enabled;
BEV_UNLOCK(bufev);
return r;
}
struct bufferevent *
bufferevent_get_underlying(struct bufferevent *bev)
{
union bufferevent_ctrl_data d;
int res = -1;
d.ptr = NULL;
BEV_LOCK(bev);
if (bev->be_ops->ctrl)
res = bev->be_ops->ctrl(bev, BEV_CTRL_GET_UNDERLYING, &d);
BEV_UNLOCK(bev);
return (res<0) ? NULL : d.ptr;
}
static void
bufferevent_generic_read_timeout_cb(evutil_socket_t fd, short event, void *ctx)
{
struct bufferevent *bev = ctx;
_bufferevent_incref_and_lock(bev);
bufferevent_disable(bev, EV_READ);
_bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_READING);
_bufferevent_decref_and_unlock(bev);
}
static void
bufferevent_generic_write_timeout_cb(evutil_socket_t fd, short event, void *ctx)
{
struct bufferevent *bev = ctx;
_bufferevent_incref_and_lock(bev);
bufferevent_disable(bev, EV_WRITE);
_bufferevent_run_eventcb(bev, BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING);
_bufferevent_decref_and_unlock(bev);
}
void
_bufferevent_init_generic_timeout_cbs(struct bufferevent *bev)
{
evtimer_assign(&bev->ev_read, bev->ev_base,
bufferevent_generic_read_timeout_cb, bev);
evtimer_assign(&bev->ev_write, bev->ev_base,
bufferevent_generic_write_timeout_cb, bev);
}
int
_bufferevent_del_generic_timeout_cbs(struct bufferevent *bev)
{
int r1,r2;
r1 = event_del(&bev->ev_read);
r2 = event_del(&bev->ev_write);
if (r1<0 || r2<0)
return -1;
return 0;
}
int
_bufferevent_generic_adj_timeouts(struct bufferevent *bev)
{
const short enabled = bev->enabled;
struct bufferevent_private *bev_p =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
int r1=0, r2=0;
if ((enabled & EV_READ) && !bev_p->read_suspended &&
evutil_timerisset(&bev->timeout_read))
r1 = event_add(&bev->ev_read, &bev->timeout_read);
else
r1 = event_del(&bev->ev_read);
if ((enabled & EV_WRITE) && !bev_p->write_suspended &&
evutil_timerisset(&bev->timeout_write) &&
evbuffer_get_length(bev->output))
r2 = event_add(&bev->ev_write, &bev->timeout_write);
else
r2 = event_del(&bev->ev_write);
if (r1 < 0 || r2 < 0)
return -1;
return 0;
}
int
_bufferevent_add_event(struct event *ev, const struct timeval *tv)
{
if (tv->tv_sec == 0 && tv->tv_usec == 0)
return event_add(ev, NULL);
else
return event_add(ev, tv);
}
/* For use by user programs only; internally, we should be calling
either _bufferevent_incref_and_lock(), or BEV_LOCK. */
void
bufferevent_lock(struct bufferevent *bev)
{
_bufferevent_incref_and_lock(bev);
}
void
bufferevent_unlock(struct bufferevent *bev)
{
_bufferevent_decref_and_unlock(bev);
}

Просмотреть файл

@ -1,652 +0,0 @@
/*
* Copyright (c) 2009-2010 Niels Provos and Nick Mathewson
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _EVENT_HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef _EVENT_HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef WIN32
#include <winsock2.h>
#include <ws2tcpip.h>
#endif
#include <sys/queue.h>
#include "event2/util.h"
#include "event2/bufferevent.h"
#include "event2/buffer.h"
#include "event2/bufferevent_struct.h"
#include "event2/event.h"
#include "event2/util.h"
#include "event-internal.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "bufferevent-internal.h"
#include "util-internal.h"
#include "iocp-internal.h"
/* prototypes */
static int be_async_enable(struct bufferevent *, short);
static int be_async_disable(struct bufferevent *, short);
static void be_async_destruct(struct bufferevent *);
static int be_async_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
static int be_async_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
struct bufferevent_async {
struct bufferevent_private bev;
struct event_overlapped connect_overlapped;
struct event_overlapped read_overlapped;
struct event_overlapped write_overlapped;
unsigned read_in_progress : 1;
unsigned write_in_progress : 1;
unsigned ok : 1;
unsigned read_added : 1;
unsigned write_added : 1;
};
const struct bufferevent_ops bufferevent_ops_async = {
"socket_async",
evutil_offsetof(struct bufferevent_async, bev.bev),
be_async_enable,
be_async_disable,
be_async_destruct,
_bufferevent_generic_adj_timeouts,
be_async_flush,
be_async_ctrl,
};
static inline struct bufferevent_async *
upcast(struct bufferevent *bev)
{
struct bufferevent_async *bev_a;
if (bev->be_ops != &bufferevent_ops_async)
return NULL;
bev_a = EVUTIL_UPCAST(bev, struct bufferevent_async, bev.bev);
return bev_a;
}
static inline struct bufferevent_async *
upcast_connect(struct event_overlapped *eo)
{
struct bufferevent_async *bev_a;
bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, connect_overlapped);
EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
return bev_a;
}
static inline struct bufferevent_async *
upcast_read(struct event_overlapped *eo)
{
struct bufferevent_async *bev_a;
bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, read_overlapped);
EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
return bev_a;
}
static inline struct bufferevent_async *
upcast_write(struct event_overlapped *eo)
{
struct bufferevent_async *bev_a;
bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, write_overlapped);
EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
return bev_a;
}
static void
bev_async_del_write(struct bufferevent_async *beva)
{
struct bufferevent *bev = &beva->bev.bev;
if (beva->write_added) {
beva->write_added = 0;
event_base_del_virtual(bev->ev_base);
}
}
static void
bev_async_del_read(struct bufferevent_async *beva)
{
struct bufferevent *bev = &beva->bev.bev;
if (beva->read_added) {
beva->read_added = 0;
event_base_del_virtual(bev->ev_base);
}
}
static void
bev_async_add_write(struct bufferevent_async *beva)
{
struct bufferevent *bev = &beva->bev.bev;
if (!beva->write_added) {
beva->write_added = 1;
event_base_add_virtual(bev->ev_base);
}
}
static void
bev_async_add_read(struct bufferevent_async *beva)
{
struct bufferevent *bev = &beva->bev.bev;
if (!beva->read_added) {
beva->read_added = 1;
event_base_add_virtual(bev->ev_base);
}
}
static void
bev_async_consider_writing(struct bufferevent_async *beva)
{
size_t at_most;
int limit;
struct bufferevent *bev = &beva->bev.bev;
/* Don't write if there's a write in progress, or we do not
* want to write, or when there's nothing left to write. */
if (beva->write_in_progress)
return;
if (!beva->ok || !(bev->enabled&EV_WRITE) ||
!evbuffer_get_length(bev->output)) {
bev_async_del_write(beva);
return;
}
at_most = evbuffer_get_length(bev->output);
/* XXXX This over-commits. */
limit = _bufferevent_get_write_max(&beva->bev);
if (at_most >= limit)
at_most = limit;
if (beva->bev.write_suspended) {
bev_async_del_write(beva);
return;
}
/* XXXX doesn't respect low-water mark very well. */
bufferevent_incref(bev);
if (evbuffer_launch_write(bev->output, at_most,
&beva->write_overlapped)) {
bufferevent_decref(bev);
beva->ok = 0;
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
} else {
beva->write_in_progress = 1;
bev_async_add_write(beva);
}
}
static void
bev_async_consider_reading(struct bufferevent_async *beva)
{
size_t cur_size;
size_t read_high;
size_t at_most;
int limit;
struct bufferevent *bev = &beva->bev.bev;
/* Don't read if there is a read in progress, or we do not
* want to read. */
if (beva->read_in_progress)
return;
if (!beva->ok || !(bev->enabled&EV_READ)) {
bev_async_del_read(beva);
return;
}
/* Don't read if we're full */
cur_size = evbuffer_get_length(bev->input);
read_high = bev->wm_read.high;
if (read_high) {
if (cur_size >= read_high) {
bev_async_del_read(beva);
return;
}
at_most = read_high - cur_size;
} else {
at_most = 16384; /* FIXME totally magic. */
}
/* XXXX This over-commits. */
limit = _bufferevent_get_read_max(&beva->bev);
if (at_most >= limit)
at_most = limit;
if (beva->bev.read_suspended) {
bev_async_del_read(beva);
return;
}
bufferevent_incref(bev);
if (evbuffer_launch_read(bev->input, at_most, &beva->read_overlapped)) {
beva->ok = 0;
bufferevent_decref(bev);
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
} else {
beva->read_in_progress = 1;
bev_async_add_read(beva);
}
return;
}
static void
be_async_outbuf_callback(struct evbuffer *buf,
const struct evbuffer_cb_info *cbinfo,
void *arg)
{
struct bufferevent *bev = arg;
struct bufferevent_async *bev_async = upcast(bev);
/* If we added data to the outbuf and were not writing before,
* we may want to write now. */
_bufferevent_incref_and_lock(bev);
if (cbinfo->n_added)
bev_async_consider_writing(bev_async);
_bufferevent_decref_and_unlock(bev);
}
static void
be_async_inbuf_callback(struct evbuffer *buf,
const struct evbuffer_cb_info *cbinfo,
void *arg)
{
struct bufferevent *bev = arg;
struct bufferevent_async *bev_async = upcast(bev);
/* If we drained data from the inbuf and were not reading before,
* we may want to read now */
_bufferevent_incref_and_lock(bev);
if (cbinfo->n_deleted)
bev_async_consider_reading(bev_async);
_bufferevent_decref_and_unlock(bev);
}
static int
be_async_enable(struct bufferevent *buf, short what)
{
struct bufferevent_async *bev_async = upcast(buf);
if (!bev_async->ok)
return -1;
/* NOTE: This interferes with non-blocking connect */
if (what & EV_READ)
BEV_RESET_GENERIC_READ_TIMEOUT(buf);
if (what & EV_WRITE)
BEV_RESET_GENERIC_WRITE_TIMEOUT(buf);
/* If we newly enable reading or writing, and we aren't reading or
writing already, consider launching a new read or write. */
if (what & EV_READ)
bev_async_consider_reading(bev_async);
if (what & EV_WRITE)
bev_async_consider_writing(bev_async);
return 0;
}
static int
be_async_disable(struct bufferevent *bev, short what)
{
struct bufferevent_async *bev_async = upcast(bev);
/* XXXX If we disable reading or writing, we may want to consider
* canceling any in-progress read or write operation, though it might
* not work. */
if (what & EV_READ) {
BEV_DEL_GENERIC_READ_TIMEOUT(bev);
bev_async_del_read(bev_async);
}
if (what & EV_WRITE) {
BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
bev_async_del_write(bev_async);
}
return 0;
}
static void
be_async_destruct(struct bufferevent *bev)
{
struct bufferevent_async *bev_async = upcast(bev);
struct bufferevent_private *bev_p = BEV_UPCAST(bev);
evutil_socket_t fd;
EVUTIL_ASSERT(!upcast(bev)->write_in_progress &&
!upcast(bev)->read_in_progress);
bev_async_del_read(bev_async);
bev_async_del_write(bev_async);
fd = _evbuffer_overlapped_get_fd(bev->input);
if (bev_p->options & BEV_OPT_CLOSE_ON_FREE)
evutil_closesocket(fd);
/* delete this in case non-blocking connect was used */
if (event_initialized(&bev->ev_write)) {
event_del(&bev->ev_write);
_bufferevent_del_generic_timeout_cbs(bev);
}
}
/* GetQueuedCompletionStatus doesn't reliably yield WSA error codes, so
* we use WSAGetOverlappedResult to translate. */
static void
bev_async_set_wsa_error(struct bufferevent *bev, struct event_overlapped *eo)
{
DWORD bytes, flags;
evutil_socket_t fd;
fd = _evbuffer_overlapped_get_fd(bev->input);
WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags);
}
static int
be_async_flush(struct bufferevent *bev, short what,
enum bufferevent_flush_mode mode)
{
return 0;
}
static void
connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
ev_ssize_t nbytes, int ok)
{
struct bufferevent_async *bev_a = upcast_connect(eo);
struct bufferevent *bev = &bev_a->bev.bev;
BEV_LOCK(bev);
EVUTIL_ASSERT(bev_a->bev.connecting);
bev_a->bev.connecting = 0;
if (ok)
bufferevent_async_set_connected(bev);
else
bev_async_set_wsa_error(bev, eo);
_bufferevent_run_eventcb(bev,
ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR);
event_base_del_virtual(bev->ev_base);
_bufferevent_decref_and_unlock(bev);
}
static void
read_complete(struct event_overlapped *eo, ev_uintptr_t key,
ev_ssize_t nbytes, int ok)
{
struct bufferevent_async *bev_a = upcast_read(eo);
struct bufferevent *bev = &bev_a->bev.bev;
short what = BEV_EVENT_READING;
BEV_LOCK(bev);
EVUTIL_ASSERT(bev_a->read_in_progress);
evbuffer_commit_read(bev->input, nbytes);
bev_a->read_in_progress = 0;
if (!ok)
bev_async_set_wsa_error(bev, eo);
if (bev_a->ok) {
if (ok && nbytes) {
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
_bufferevent_decrement_read_buckets(&bev_a->bev,
nbytes);
if (evbuffer_get_length(bev->input) >= bev->wm_read.low)
_bufferevent_run_readcb(bev);
bev_async_consider_reading(bev_a);
} else if (!ok) {
what |= BEV_EVENT_ERROR;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
} else if (!nbytes) {
what |= BEV_EVENT_EOF;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
}
}
_bufferevent_decref_and_unlock(bev);
}
static void
write_complete(struct event_overlapped *eo, ev_uintptr_t key,
ev_ssize_t nbytes, int ok)
{
struct bufferevent_async *bev_a = upcast_write(eo);
struct bufferevent *bev = &bev_a->bev.bev;
short what = BEV_EVENT_WRITING;
BEV_LOCK(bev);
EVUTIL_ASSERT(bev_a->write_in_progress);
evbuffer_commit_write(bev->output, nbytes);
bev_a->write_in_progress = 0;
if (!ok)
bev_async_set_wsa_error(bev, eo);
if (bev_a->ok) {
if (ok && nbytes) {
BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
_bufferevent_decrement_write_buckets(&bev_a->bev,
nbytes);
if (evbuffer_get_length(bev->output) <=
bev->wm_write.low)
_bufferevent_run_writecb(bev);
bev_async_consider_writing(bev_a);
} else if (!ok) {
what |= BEV_EVENT_ERROR;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
} else if (!nbytes) {
what |= BEV_EVENT_EOF;
bev_a->ok = 0;
_bufferevent_run_eventcb(bev, what);
}
}
_bufferevent_decref_and_unlock(bev);
}
struct bufferevent *
bufferevent_async_new(struct event_base *base,
evutil_socket_t fd, int options)
{
struct bufferevent_async *bev_a;
struct bufferevent *bev;
struct event_iocp_port *iocp;
options |= BEV_OPT_THREADSAFE;
if (!(iocp = event_base_get_iocp(base)))
return NULL;
if (fd >= 0 && event_iocp_port_associate(iocp, fd, 1)<0) {
int err = GetLastError();
/* We may have alrady associated this fd with a port.
* Let's hope it's this port, and that the error code
* for doing this neer changes. */
if (err != ERROR_INVALID_PARAMETER)
return NULL;
}
if (!(bev_a = mm_calloc(1, sizeof(struct bufferevent_async))))
return NULL;
bev = &bev_a->bev.bev;
if (!(bev->input = evbuffer_overlapped_new(fd))) {
mm_free(bev_a);
return NULL;
}
if (!(bev->output = evbuffer_overlapped_new(fd))) {
evbuffer_free(bev->input);
mm_free(bev_a);
return NULL;
}
if (bufferevent_init_common(&bev_a->bev, base, &bufferevent_ops_async,
options)<0)
goto err;
evbuffer_add_cb(bev->input, be_async_inbuf_callback, bev);
evbuffer_add_cb(bev->output, be_async_outbuf_callback, bev);
event_overlapped_init(&bev_a->connect_overlapped, connect_complete);
event_overlapped_init(&bev_a->read_overlapped, read_complete);
event_overlapped_init(&bev_a->write_overlapped, write_complete);
bev_a->ok = fd >= 0;
if (bev_a->ok)
_bufferevent_init_generic_timeout_cbs(bev);
return bev;
err:
bufferevent_free(&bev_a->bev.bev);
return NULL;
}
void
bufferevent_async_set_connected(struct bufferevent *bev)
{
struct bufferevent_async *bev_async = upcast(bev);
bev_async->ok = 1;
_bufferevent_init_generic_timeout_cbs(bev);
/* Now's a good time to consider reading/writing */
be_async_enable(bev, bev->enabled);
}
int
bufferevent_async_can_connect(struct bufferevent *bev)
{
const struct win32_extension_fns *ext =
event_get_win32_extension_fns();
if (BEV_IS_ASYNC(bev) &&
event_base_get_iocp(bev->ev_base) &&
ext && ext->ConnectEx)
return 1;
return 0;
}
int
bufferevent_async_connect(struct bufferevent *bev, evutil_socket_t fd,
const struct sockaddr *sa, int socklen)
{
BOOL rc;
struct bufferevent_async *bev_async = upcast(bev);
struct sockaddr_storage ss;
const struct win32_extension_fns *ext =
event_get_win32_extension_fns();
EVUTIL_ASSERT(ext && ext->ConnectEx && fd >= 0 && sa != NULL);
/* ConnectEx() requires that the socket be bound to an address
* with bind() before using, otherwise it will fail. We attempt
* to issue a bind() here, taking into account that the error
* code is set to WSAEINVAL when the socket is already bound. */
memset(&ss, 0, sizeof(ss));
if (sa->sa_family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = INADDR_ANY;
} else if (sa->sa_family == AF_INET6) {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
sin6->sin6_family = AF_INET6;
sin6->sin6_addr = in6addr_any;
} else {
/* Well, the user will have to bind() */
return -1;
}
if (bind(fd, (struct sockaddr *)&ss, sizeof(ss)) < 0 &&
WSAGetLastError() != WSAEINVAL)
return -1;
event_base_add_virtual(bev->ev_base);
bufferevent_incref(bev);
rc = ext->ConnectEx(fd, sa, socklen, NULL, 0, NULL,
&bev_async->connect_overlapped.overlapped);
if (rc || WSAGetLastError() == ERROR_IO_PENDING)
return 0;
event_base_del_virtual(bev->ev_base);
bufferevent_decref(bev);
return -1;
}
static int
be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
union bufferevent_ctrl_data *data)
{
switch (op) {
case BEV_CTRL_GET_FD:
data->fd = _evbuffer_overlapped_get_fd(bev->input);
return 0;
case BEV_CTRL_SET_FD: {
struct event_iocp_port *iocp;
if (data->fd == _evbuffer_overlapped_get_fd(bev->input))
return 0;
if (!(iocp = event_base_get_iocp(bev->ev_base)))
return -1;
if (event_iocp_port_associate(iocp, data->fd, 1) < 0)
return -1;
_evbuffer_overlapped_set_fd(bev->input, data->fd);
_evbuffer_overlapped_set_fd(bev->output, data->fd);
return 0;
}
case BEV_CTRL_GET_UNDERLYING:
default:
return -1;
}
}

Просмотреть файл

@ -1,487 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
* Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include "event2/event-config.h"
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _EVENT_HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef WIN32
#include <winsock2.h>
#endif
#include "event2/util.h"
#include "event2/bufferevent.h"
#include "event2/buffer.h"
#include "event2/bufferevent_struct.h"
#include "event2/event.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "bufferevent-internal.h"
#include "util-internal.h"
/* prototypes */
static int be_filter_enable(struct bufferevent *, short);
static int be_filter_disable(struct bufferevent *, short);
static void be_filter_destruct(struct bufferevent *);
static void be_filter_readcb(struct bufferevent *, void *);
static void be_filter_writecb(struct bufferevent *, void *);
static void be_filter_eventcb(struct bufferevent *, short, void *);
static int be_filter_flush(struct bufferevent *bufev,
short iotype, enum bufferevent_flush_mode mode);
static int be_filter_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
static void bufferevent_filtered_outbuf_cb(struct evbuffer *buf,
const struct evbuffer_cb_info *info, void *arg);
struct bufferevent_filtered {
struct bufferevent_private bev;
/** The bufferevent that we read/write filtered data from/to. */
struct bufferevent *underlying;
/** A callback on our outbuf to notice when somebody adds data */
struct evbuffer_cb_entry *outbuf_cb;
/** True iff we have received an EOF callback from the underlying
* bufferevent. */
unsigned got_eof;
/** Function to free context when we're done. */
void (*free_context)(void *);
/** Input filter */
bufferevent_filter_cb process_in;
/** Output filter */
bufferevent_filter_cb process_out;
/** User-supplied argument to the filters. */
void *context;
};
const struct bufferevent_ops bufferevent_ops_filter = {
"filter",
evutil_offsetof(struct bufferevent_filtered, bev.bev),
be_filter_enable,
be_filter_disable,
be_filter_destruct,
_bufferevent_generic_adj_timeouts,
be_filter_flush,
be_filter_ctrl,
};
/* Given a bufferevent that's really the bev filter of a bufferevent_filtered,
* return that bufferevent_filtered. Returns NULL otherwise.*/
static inline struct bufferevent_filtered *
upcast(struct bufferevent *bev)
{
struct bufferevent_filtered *bev_f;
if (bev->be_ops != &bufferevent_ops_filter)
return NULL;
bev_f = (void*)( ((char*)bev) -
evutil_offsetof(struct bufferevent_filtered, bev.bev));
EVUTIL_ASSERT(bev_f->bev.bev.be_ops == &bufferevent_ops_filter);
return bev_f;
}
#define downcast(bev_f) (&(bev_f)->bev.bev)
/** Return 1 iff bevf's underlying bufferevent's output buffer is at or
* over its high watermark such that we should not write to it in a given
* flush mode. */
static int
be_underlying_writebuf_full(struct bufferevent_filtered *bevf,
enum bufferevent_flush_mode state)
{
struct bufferevent *u = bevf->underlying;
return state == BEV_NORMAL &&
u->wm_write.high &&
evbuffer_get_length(u->output) >= u->wm_write.high;
}
/** Return 1 if our input buffer is at or over its high watermark such that we
* should not write to it in a given flush mode. */
static int
be_readbuf_full(struct bufferevent_filtered *bevf,
enum bufferevent_flush_mode state)
{
struct bufferevent *bufev = downcast(bevf);
return state == BEV_NORMAL &&
bufev->wm_read.high &&
evbuffer_get_length(bufev->input) >= bufev->wm_read.high;
}
/* Filter to use when we're created with a NULL filter. */
static enum bufferevent_filter_result
be_null_filter(struct evbuffer *src, struct evbuffer *dst, ev_ssize_t lim,
enum bufferevent_flush_mode state, void *ctx)
{
(void)state;
if (evbuffer_remove_buffer(src, dst, lim) == 0)
return BEV_OK;
else
return BEV_ERROR;
}
struct bufferevent *
bufferevent_filter_new(struct bufferevent *underlying,
bufferevent_filter_cb input_filter,
bufferevent_filter_cb output_filter,
int options,
void (*free_context)(void *),
void *ctx)
{
struct bufferevent_filtered *bufev_f;
int tmp_options = options & ~BEV_OPT_THREADSAFE;
if (!input_filter)
input_filter = be_null_filter;
if (!output_filter)
output_filter = be_null_filter;
bufev_f = mm_calloc(1, sizeof(struct bufferevent_filtered));
if (!bufev_f)
return NULL;
if (bufferevent_init_common(&bufev_f->bev, underlying->ev_base,
&bufferevent_ops_filter, tmp_options) < 0) {
mm_free(bufev_f);
return NULL;
}
if (options & BEV_OPT_THREADSAFE) {
bufferevent_enable_locking(downcast(bufev_f), NULL);
}
bufev_f->underlying = underlying;
bufev_f->process_in = input_filter;
bufev_f->process_out = output_filter;
bufev_f->free_context = free_context;
bufev_f->context = ctx;
bufferevent_setcb(bufev_f->underlying,
be_filter_readcb, be_filter_writecb, be_filter_eventcb, bufev_f);
bufev_f->outbuf_cb = evbuffer_add_cb(downcast(bufev_f)->output,
bufferevent_filtered_outbuf_cb, bufev_f);
_bufferevent_init_generic_timeout_cbs(downcast(bufev_f));
bufferevent_incref(underlying);
return downcast(bufev_f);
}
static void
be_filter_destruct(struct bufferevent *bev)
{
struct bufferevent_filtered *bevf = upcast(bev);
EVUTIL_ASSERT(bevf);
if (bevf->free_context)
bevf->free_context(bevf->context);
if (bevf->bev.options & BEV_OPT_CLOSE_ON_FREE) {
/* Yes, there is also a decref in bufferevent_decref.
* That decref corresponds to the incref when we set
* underlying for the first time. This decref is an
* extra one to remove the last reference.
*/
if (BEV_UPCAST(bevf->underlying)->refcnt < 2) {
event_warnx("BEV_OPT_CLOSE_ON_FREE set on an "
"bufferevent with too few references");
} else {
bufferevent_free(bevf->underlying);
}
}
_bufferevent_del_generic_timeout_cbs(bev);
}
static int
be_filter_enable(struct bufferevent *bev, short event)
{
struct bufferevent_filtered *bevf = upcast(bev);
if (event & EV_READ)
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
if (event & EV_WRITE)
BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
return bufferevent_enable(bevf->underlying, event);
}
static int
be_filter_disable(struct bufferevent *bev, short event)
{
struct bufferevent_filtered *bevf = upcast(bev);
if (event & EV_READ)
BEV_DEL_GENERIC_READ_TIMEOUT(bev);
if (event & EV_WRITE)
BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
return bufferevent_disable(bevf->underlying, event);
}
static enum bufferevent_filter_result
be_filter_process_input(struct bufferevent_filtered *bevf,
enum bufferevent_flush_mode state,
int *processed_out)
{
enum bufferevent_filter_result res;
struct bufferevent *bev = downcast(bevf);
if (state == BEV_NORMAL) {
/* If we're in 'normal' mode, don't urge data on the filter
* unless we're reading data and under our high-water mark.*/
if (!(bev->enabled & EV_READ) ||
be_readbuf_full(bevf, state))
return BEV_OK;
}
do {
ev_ssize_t limit = -1;
if (state == BEV_NORMAL && bev->wm_read.high)
limit = bev->wm_read.high -
evbuffer_get_length(bev->input);
res = bevf->process_in(bevf->underlying->input,
bev->input, limit, state, bevf->context);
if (res == BEV_OK)
*processed_out = 1;
} while (res == BEV_OK &&
(bev->enabled & EV_READ) &&
evbuffer_get_length(bevf->underlying->input) &&
!be_readbuf_full(bevf, state));
if (*processed_out)
BEV_RESET_GENERIC_READ_TIMEOUT(bev);
return res;
}
static enum bufferevent_filter_result
be_filter_process_output(struct bufferevent_filtered *bevf,
enum bufferevent_flush_mode state,
int *processed_out)
{
/* Requires references and lock: might call writecb */
enum bufferevent_filter_result res = BEV_OK;
struct bufferevent *bufev = downcast(bevf);
int again = 0;
if (state == BEV_NORMAL) {
/* If we're in 'normal' mode, don't urge data on the
* filter unless we're writing data, and the underlying
* bufferevent is accepting data, and we have data to
* give the filter. If we're in 'flush' or 'finish',
* call the filter no matter what. */
if (!(bufev->enabled & EV_WRITE) ||
be_underlying_writebuf_full(bevf, state) ||
!evbuffer_get_length(bufev->output))
return BEV_OK;
}
/* disable the callback that calls this function
when the user adds to the output buffer. */
evbuffer_cb_set_flags(bufev->output, bevf->outbuf_cb, 0);
do {
int processed = 0;
again = 0;
do {
ev_ssize_t limit = -1;
if (state == BEV_NORMAL &&
bevf->underlying->wm_write.high)
limit = bevf->underlying->wm_write.high -
evbuffer_get_length(bevf->underlying->output);
res = bevf->process_out(downcast(bevf)->output,
bevf->underlying->output,
limit,
state,
bevf->context);
if (res == BEV_OK)
processed = *processed_out = 1;
} while (/* Stop if the filter wasn't successful...*/
res == BEV_OK &&
/* Or if we aren't writing any more. */
(bufev->enabled & EV_WRITE) &&
/* Of if we have nothing more to write and we are
* not flushing. */
evbuffer_get_length(bufev->output) &&
/* Or if we have filled the underlying output buffer. */
!be_underlying_writebuf_full(bevf,state));
if (processed &&
evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
/* call the write callback.*/
_bufferevent_run_writecb(bufev);
if (res == BEV_OK &&
(bufev->enabled & EV_WRITE) &&
evbuffer_get_length(bufev->output) &&
!be_underlying_writebuf_full(bevf, state)) {
again = 1;
}
}
} while (again);
/* reenable the outbuf_cb */
evbuffer_cb_set_flags(bufev->output,bevf->outbuf_cb,
EVBUFFER_CB_ENABLED);
if (*processed_out)
BEV_RESET_GENERIC_WRITE_TIMEOUT(bufev);
return res;
}
/* Called when the size of our outbuf changes. */
static void
bufferevent_filtered_outbuf_cb(struct evbuffer *buf,
const struct evbuffer_cb_info *cbinfo, void *arg)
{
struct bufferevent_filtered *bevf = arg;
struct bufferevent *bev = downcast(bevf);
if (cbinfo->n_added) {
int processed_any = 0;
/* Somebody added more data to the output buffer. Try to
* process it, if we should. */
_bufferevent_incref_and_lock(bev);
be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
_bufferevent_decref_and_unlock(bev);
}
}
/* Called when the underlying socket has read. */
static void
be_filter_readcb(struct bufferevent *underlying, void *_me)
{
struct bufferevent_filtered *bevf = _me;
enum bufferevent_filter_result res;
enum bufferevent_flush_mode state;
struct bufferevent *bufev = downcast(bevf);
int processed_any = 0;
_bufferevent_incref_and_lock(bufev);
if (bevf->got_eof)
state = BEV_FINISHED;
else
state = BEV_NORMAL;
res = be_filter_process_input(bevf, state, &processed_any);
/* XXX This should be in process_input, not here. There are
* other places that can call process-input, and they should
* force readcb calls as needed. */
if (processed_any &&
evbuffer_get_length(bufev->input) >= bufev->wm_read.low)
_bufferevent_run_readcb(bufev);
_bufferevent_decref_and_unlock(bufev);
}
/* Called when the underlying socket has drained enough that we can write to
it. */
static void
be_filter_writecb(struct bufferevent *underlying, void *_me)
{
struct bufferevent_filtered *bevf = _me;
struct bufferevent *bev = downcast(bevf);
int processed_any = 0;
_bufferevent_incref_and_lock(bev);
be_filter_process_output(bevf, BEV_NORMAL, &processed_any);
_bufferevent_decref_and_unlock(bev);
}
/* Called when the underlying socket has given us an error */
static void
be_filter_eventcb(struct bufferevent *underlying, short what, void *_me)
{
struct bufferevent_filtered *bevf = _me;
struct bufferevent *bev = downcast(bevf);
_bufferevent_incref_and_lock(bev);
/* All we can really to is tell our own eventcb. */
_bufferevent_run_eventcb(bev, what);
_bufferevent_decref_and_unlock(bev);
}
static int
be_filter_flush(struct bufferevent *bufev,
short iotype, enum bufferevent_flush_mode mode)
{
struct bufferevent_filtered *bevf = upcast(bufev);
int processed_any = 0;
EVUTIL_ASSERT(bevf);
_bufferevent_incref_and_lock(bufev);
if (iotype & EV_READ) {
be_filter_process_input(bevf, mode, &processed_any);
}
if (iotype & EV_WRITE) {
be_filter_process_output(bevf, mode, &processed_any);
}
/* XXX check the return value? */
/* XXX does this want to recursively call lower-level flushes? */
bufferevent_flush(bevf->underlying, iotype, mode);
_bufferevent_decref_and_unlock(bufev);
return processed_any;
}
static int
be_filter_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
union bufferevent_ctrl_data *data)
{
struct bufferevent_filtered *bevf;
switch (op) {
case BEV_CTRL_GET_UNDERLYING:
bevf = upcast(bev);
data->ptr = bevf->underlying;
return 0;
case BEV_CTRL_GET_FD:
case BEV_CTRL_SET_FD:
default:
return -1;
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,333 +0,0 @@
/*
* Copyright (c) 2009-2010 Niels Provos, Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#ifdef WIN32
#include <winsock2.h>
#endif
#include "event2/event-config.h"
#include "event2/util.h"
#include "event2/buffer.h"
#include "event2/bufferevent.h"
#include "event2/bufferevent_struct.h"
#include "event2/event.h"
#include "defer-internal.h"
#include "bufferevent-internal.h"
#include "mm-internal.h"
#include "util-internal.h"
struct bufferevent_pair {
struct bufferevent_private bev;
struct bufferevent_pair *partner;
};
/* Given a bufferevent that's really a bev part of a bufferevent_pair,
* return that bufferevent_filtered. Returns NULL otherwise.*/
static inline struct bufferevent_pair *
upcast(struct bufferevent *bev)
{
struct bufferevent_pair *bev_p;
if (bev->be_ops != &bufferevent_ops_pair)
return NULL;
bev_p = EVUTIL_UPCAST(bev, struct bufferevent_pair, bev.bev);
EVUTIL_ASSERT(bev_p->bev.bev.be_ops == &bufferevent_ops_pair);
return bev_p;
}
#define downcast(bev_pair) (&(bev_pair)->bev.bev)
static inline void
incref_and_lock(struct bufferevent *b)
{
struct bufferevent_pair *bevp;
_bufferevent_incref_and_lock(b);
bevp = upcast(b);
if (bevp->partner)
_bufferevent_incref_and_lock(downcast(bevp->partner));
}
static inline void
decref_and_unlock(struct bufferevent *b)
{
struct bufferevent_pair *bevp = upcast(b);
if (bevp->partner)
_bufferevent_decref_and_unlock(downcast(bevp->partner));
_bufferevent_decref_and_unlock(b);
}
/* XXX Handle close */
static void be_pair_outbuf_cb(struct evbuffer *,
const struct evbuffer_cb_info *, void *);
static struct bufferevent_pair *
bufferevent_pair_elt_new(struct event_base *base,
int options)
{
struct bufferevent_pair *bufev;
if (! (bufev = mm_calloc(1, sizeof(struct bufferevent_pair))))
return NULL;
if (bufferevent_init_common(&bufev->bev, base, &bufferevent_ops_pair,
options)) {
mm_free(bufev);
return NULL;
}
if (!evbuffer_add_cb(bufev->bev.bev.output, be_pair_outbuf_cb, bufev)) {
bufferevent_free(downcast(bufev));
return NULL;
}
_bufferevent_init_generic_timeout_cbs(&bufev->bev.bev);
return bufev;
}
int
bufferevent_pair_new(struct event_base *base, int options,
struct bufferevent *pair[2])
{
struct bufferevent_pair *bufev1 = NULL, *bufev2 = NULL;
int tmp_options;
options |= BEV_OPT_DEFER_CALLBACKS;
tmp_options = options & ~BEV_OPT_THREADSAFE;
bufev1 = bufferevent_pair_elt_new(base, options);
if (!bufev1)
return -1;
bufev2 = bufferevent_pair_elt_new(base, tmp_options);
if (!bufev2) {
bufferevent_free(downcast(bufev1));
return -1;
}
if (options & BEV_OPT_THREADSAFE) {
/*XXXX check return */
bufferevent_enable_locking(downcast(bufev2), bufev1->bev.lock);
}
bufev1->partner = bufev2;
bufev2->partner = bufev1;
evbuffer_freeze(downcast(bufev1)->input, 0);
evbuffer_freeze(downcast(bufev1)->output, 1);
evbuffer_freeze(downcast(bufev2)->input, 0);
evbuffer_freeze(downcast(bufev2)->output, 1);
pair[0] = downcast(bufev1);
pair[1] = downcast(bufev2);
return 0;
}
static void
be_pair_transfer(struct bufferevent *src, struct bufferevent *dst,
int ignore_wm)
{
size_t src_size, dst_size;
size_t n;
evbuffer_unfreeze(src->output, 1);
evbuffer_unfreeze(dst->input, 0);
if (dst->wm_read.high) {
size_t dst_size = evbuffer_get_length(dst->input);
if (dst_size < dst->wm_read.high) {
n = dst->wm_read.high - dst_size;
evbuffer_remove_buffer(src->output, dst->input, n);
} else {
if (!ignore_wm)
goto done;
n = evbuffer_get_length(src->output);
evbuffer_add_buffer(dst->input, src->output);
}
} else {
n = evbuffer_get_length(src->output);
evbuffer_add_buffer(dst->input, src->output);
}
if (n) {
BEV_RESET_GENERIC_READ_TIMEOUT(dst);
if (evbuffer_get_length(dst->output))
BEV_RESET_GENERIC_WRITE_TIMEOUT(dst);
else
BEV_DEL_GENERIC_WRITE_TIMEOUT(dst);
}
src_size = evbuffer_get_length(src->output);
dst_size = evbuffer_get_length(dst->input);
if (dst_size >= dst->wm_read.low) {
_bufferevent_run_readcb(dst);
}
if (src_size <= src->wm_write.low) {
_bufferevent_run_writecb(src);
}
done:
evbuffer_freeze(src->output, 1);
evbuffer_freeze(dst->input, 0);
}
static inline int
be_pair_wants_to_talk(struct bufferevent_pair *src,
struct bufferevent_pair *dst)
{
return (downcast(src)->enabled & EV_WRITE) &&
(downcast(dst)->enabled & EV_READ) &&
!dst->bev.read_suspended &&
evbuffer_get_length(downcast(src)->output);
}
static void
be_pair_outbuf_cb(struct evbuffer *outbuf,
const struct evbuffer_cb_info *info, void *arg)
{
struct bufferevent_pair *bev_pair = arg;
struct bufferevent_pair *partner = bev_pair->partner;
incref_and_lock(downcast(bev_pair));
if (info->n_added > info->n_deleted && partner) {
/* We got more data. If the other side's reading, then
hand it over. */
if (be_pair_wants_to_talk(bev_pair, partner)) {
be_pair_transfer(downcast(bev_pair), downcast(partner), 0);
}
}
decref_and_unlock(downcast(bev_pair));
}
static int
be_pair_enable(struct bufferevent *bufev, short events)
{
struct bufferevent_pair *bev_p = upcast(bufev);
struct bufferevent_pair *partner = bev_p->partner;
incref_and_lock(bufev);
if (events & EV_READ) {
BEV_RESET_GENERIC_READ_TIMEOUT(bufev);
}
if ((events & EV_WRITE) && evbuffer_get_length(bufev->output))
BEV_RESET_GENERIC_WRITE_TIMEOUT(bufev);
/* We're starting to read! Does the other side have anything to write?*/
if ((events & EV_READ) && partner &&
be_pair_wants_to_talk(partner, bev_p)) {
be_pair_transfer(downcast(partner), bufev, 0);
}
/* We're starting to write! Does the other side want to read? */
if ((events & EV_WRITE) && partner &&
be_pair_wants_to_talk(bev_p, partner)) {
be_pair_transfer(bufev, downcast(partner), 0);
}
decref_and_unlock(bufev);
return 0;
}
static int
be_pair_disable(struct bufferevent *bev, short events)
{
if (events & EV_READ) {
BEV_DEL_GENERIC_READ_TIMEOUT(bev);
}
if (events & EV_WRITE)
BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
return 0;
}
static void
be_pair_destruct(struct bufferevent *bev)
{
struct bufferevent_pair *bev_p = upcast(bev);
if (bev_p->partner) {
bev_p->partner->partner = NULL;
bev_p->partner = NULL;
}
_bufferevent_del_generic_timeout_cbs(bev);
}
static int
be_pair_flush(struct bufferevent *bev, short iotype,
enum bufferevent_flush_mode mode)
{
struct bufferevent_pair *bev_p = upcast(bev);
struct bufferevent *partner;
incref_and_lock(bev);
if (!bev_p->partner)
return -1;
partner = downcast(bev_p->partner);
if (mode == BEV_NORMAL)
return 0;
if ((iotype & EV_READ) != 0)
be_pair_transfer(partner, bev, 1);
if ((iotype & EV_WRITE) != 0)
be_pair_transfer(bev, partner, 1);
if (mode == BEV_FINISHED) {
_bufferevent_run_eventcb(partner, iotype|BEV_EVENT_EOF);
}
decref_and_unlock(bev);
return 0;
}
struct bufferevent *
bufferevent_pair_get_partner(struct bufferevent *bev)
{
struct bufferevent_pair *bev_p;
struct bufferevent *partner;
bev_p = upcast(bev);
if (! bev_p)
return NULL;
incref_and_lock(bev);
partner = downcast(bev_p->partner);
decref_and_unlock(bev);
return partner;
}
const struct bufferevent_ops bufferevent_ops_pair = {
"pair_elt",
evutil_offsetof(struct bufferevent_pair, bev.bev),
be_pair_enable,
be_pair_disable,
be_pair_destruct,
_bufferevent_generic_adj_timeouts,
be_pair_flush,
NULL, /* ctrl */
};

Просмотреть файл

@ -1,947 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
* Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include <limits.h>
#include <string.h>
#include <stdlib.h>
#include "event2/event.h"
#include "event2/event_struct.h"
#include "event2/util.h"
#include "event2/bufferevent.h"
#include "event2/bufferevent_struct.h"
#include "event2/buffer.h"
#include "ratelim-internal.h"
#include "bufferevent-internal.h"
#include "mm-internal.h"
#include "util-internal.h"
int
ev_token_bucket_init(struct ev_token_bucket *bucket,
const struct ev_token_bucket_cfg *cfg,
ev_uint32_t current_tick,
int reinitialize)
{
if (reinitialize) {
/* on reinitialization, we only clip downwards, since we've
already used who-knows-how-much bandwidth this tick. We
leave "last_updated" as it is; the next update will add the
appropriate amount of bandwidth to the bucket.
*/
if (bucket->read_limit > cfg->read_maximum)
bucket->read_limit = cfg->read_maximum;
if (bucket->write_limit > cfg->write_maximum)
bucket->write_limit = cfg->write_maximum;
} else {
bucket->read_limit = cfg->read_rate;
bucket->write_limit = cfg->write_rate;
bucket->last_updated = current_tick;
}
return 0;
}
int
ev_token_bucket_update(struct ev_token_bucket *bucket,
const struct ev_token_bucket_cfg *cfg,
ev_uint32_t current_tick)
{
/* It's okay if the tick number overflows, since we'll just
* wrap around when we do the unsigned substraction. */
unsigned n_ticks = current_tick - bucket->last_updated;
/* Make sure some ticks actually happened, and that time didn't
* roll back. */
if (n_ticks == 0 || n_ticks > INT_MAX)
return 0;
/* Naively, we would say
bucket->limit += n_ticks * cfg->rate;
if (bucket->limit > cfg->maximum)
bucket->limit = cfg->maximum;
But we're worried about overflow, so we do it like this:
*/
if ((cfg->read_maximum - bucket->read_limit) / n_ticks < cfg->read_rate)
bucket->read_limit = cfg->read_maximum;
else
bucket->read_limit += n_ticks * cfg->read_rate;
if ((cfg->write_maximum - bucket->write_limit) / n_ticks < cfg->write_rate)
bucket->write_limit = cfg->write_maximum;
else
bucket->write_limit += n_ticks * cfg->write_rate;
bucket->last_updated = current_tick;
return 1;
}
static inline void
bufferevent_update_buckets(struct bufferevent_private *bev)
{
/* Must hold lock on bev. */
struct timeval now;
unsigned tick;
event_base_gettimeofday_cached(bev->bev.ev_base, &now);
tick = ev_token_bucket_get_tick(&now, bev->rate_limiting->cfg);
if (tick != bev->rate_limiting->limit.last_updated)
ev_token_bucket_update(&bev->rate_limiting->limit,
bev->rate_limiting->cfg, tick);
}
ev_uint32_t
ev_token_bucket_get_tick(const struct timeval *tv,
const struct ev_token_bucket_cfg *cfg)
{
/* This computation uses two multiplies and a divide. We could do
* fewer if we knew that the tick length was an integer number of
* seconds, or if we knew it divided evenly into a second. We should
* investigate that more.
*/
/* We cast to an ev_uint64_t first, since we don't want to overflow
* before we do the final divide. */
ev_uint64_t msec = (ev_uint64_t)tv->tv_sec * 1000 + tv->tv_usec / 1000;
return (unsigned)(msec / cfg->msec_per_tick);
}
struct ev_token_bucket_cfg *
ev_token_bucket_cfg_new(ev_uint32_t read_rate, ev_uint32_t read_burst,
ev_uint32_t write_rate, ev_uint32_t write_burst,
const struct timeval *tick_len)
{
struct ev_token_bucket_cfg *r;
struct timeval g;
if (! tick_len) {
g.tv_sec = 1;
g.tv_usec = 0;
tick_len = &g;
}
if (read_rate > read_burst || write_rate > write_burst ||
read_rate < 1 || write_rate < 1)
return NULL;
r = mm_calloc(1, sizeof(struct ev_token_bucket_cfg));
if (!r)
return NULL;
r->read_rate = read_rate;
r->write_rate = write_rate;
r->read_maximum = read_burst;
r->write_maximum = write_burst;
memcpy(&r->tick_timeout, tick_len, sizeof(struct timeval));
r->msec_per_tick = (tick_len->tv_sec * 1000) + tick_len->tv_usec/1000;
return r;
}
void
ev_token_bucket_cfg_free(struct ev_token_bucket_cfg *cfg)
{
mm_free(cfg);
}
/* No matter how big our bucket gets, don't try to read more than this
* much in a single read operation. */
#define MAX_TO_READ_EVER 16384
/* No matter how big our bucket gets, don't try to write more than this
* much in a single write operation. */
#define MAX_TO_WRITE_EVER 16384
#define LOCK_GROUP(g) EVLOCK_LOCK((g)->lock, 0)
#define UNLOCK_GROUP(g) EVLOCK_UNLOCK((g)->lock, 0)
static int _bev_group_suspend_reading(struct bufferevent_rate_limit_group *g);
static int _bev_group_suspend_writing(struct bufferevent_rate_limit_group *g);
/** Helper: figure out the maximum amount we should write if is_write, or
the maximum amount we should read if is_read. Return that maximum, or
0 if our bucket is wholly exhausted.
*/
static inline int
_bufferevent_get_rlim_max(struct bufferevent_private *bev, int is_write)
{
/* needs lock on bev. */
int max_so_far = is_write?MAX_TO_WRITE_EVER:MAX_TO_READ_EVER;
#define LIM(x) \
(is_write ? (x).write_limit : (x).read_limit)
#define GROUP_SUSPENDED(g) \
(is_write ? (g)->write_suspended : (g)->read_suspended)
/* Sets max_so_far to MIN(x, max_so_far) */
#define CLAMPTO(x) \
do { \
if (max_so_far > (x)) \
max_so_far = (x); \
} while (0);
if (!bev->rate_limiting)
return max_so_far;
/* If rate-limiting is enabled at all, update the appropriate
bucket, and take the smaller of our rate limit and the group
rate limit.
*/
if (bev->rate_limiting->cfg) {
bufferevent_update_buckets(bev);
max_so_far = LIM(bev->rate_limiting->limit);
}
if (bev->rate_limiting->group) {
struct bufferevent_rate_limit_group *g =
bev->rate_limiting->group;
ev_uint32_t share;
LOCK_GROUP(g);
if (GROUP_SUSPENDED(g)) {
/* We can get here if we failed to lock this
* particular bufferevent while suspending the whole
* group. */
if (is_write)
bufferevent_suspend_write(&bev->bev,
BEV_SUSPEND_BW_GROUP);
else
bufferevent_suspend_read(&bev->bev,
BEV_SUSPEND_BW_GROUP);
share = 0;
} else {
/* XXXX probably we should divide among the active
* members, not the total members. */
share = LIM(g->rate_limit) / g->n_members;
if (share < g->min_share)
share = g->min_share;
}
UNLOCK_GROUP(g);
CLAMPTO(share);
}
if (max_so_far < 0)
max_so_far = 0;
return max_so_far;
}
int
_bufferevent_get_read_max(struct bufferevent_private *bev)
{
return _bufferevent_get_rlim_max(bev, 0);
}
int
_bufferevent_get_write_max(struct bufferevent_private *bev)
{
return _bufferevent_get_rlim_max(bev, 1);
}
int
_bufferevent_decrement_read_buckets(struct bufferevent_private *bev, int bytes)
{
/* XXXXX Make sure all users of this function check its return value */
int r = 0;
/* need to hold lock on bev */
if (!bev->rate_limiting)
return 0;
if (bev->rate_limiting->cfg) {
bev->rate_limiting->limit.read_limit -= bytes;
if (bev->rate_limiting->limit.read_limit <= 0) {
bufferevent_suspend_read(&bev->bev, BEV_SUSPEND_BW);
if (event_add(&bev->rate_limiting->refill_bucket_event,
&bev->rate_limiting->cfg->tick_timeout) < 0)
r = -1;
}
}
if (bev->rate_limiting->group) {
LOCK_GROUP(bev->rate_limiting->group);
bev->rate_limiting->group->rate_limit.read_limit -= bytes;
bev->rate_limiting->group->total_read += bytes;
if (bev->rate_limiting->group->rate_limit.read_limit <= 0) {
_bev_group_suspend_reading(bev->rate_limiting->group);
}
UNLOCK_GROUP(bev->rate_limiting->group);
}
return r;
}
int
_bufferevent_decrement_write_buckets(struct bufferevent_private *bev, int bytes)
{
/* XXXXX Make sure all users of this function check its return value */
int r = 0;
/* need to hold lock */
if (!bev->rate_limiting)
return 0;
if (bev->rate_limiting->cfg) {
bev->rate_limiting->limit.write_limit -= bytes;
if (bev->rate_limiting->limit.write_limit <= 0) {
bufferevent_suspend_write(&bev->bev, BEV_SUSPEND_BW);
if (event_add(&bev->rate_limiting->refill_bucket_event,
&bev->rate_limiting->cfg->tick_timeout) < 0)
r = -1;
}
}
if (bev->rate_limiting->group) {
LOCK_GROUP(bev->rate_limiting->group);
bev->rate_limiting->group->rate_limit.write_limit -= bytes;
bev->rate_limiting->group->total_written += bytes;
if (bev->rate_limiting->group->rate_limit.write_limit <= 0) {
_bev_group_suspend_writing(bev->rate_limiting->group);
}
UNLOCK_GROUP(bev->rate_limiting->group);
}
return r;
}
/** Stop reading on every bufferevent in <b>g</b> */
static int
_bev_group_suspend_reading(struct bufferevent_rate_limit_group *g)
{
/* Needs group lock */
struct bufferevent_private *bev;
g->read_suspended = 1;
g->pending_unsuspend_read = 0;
/* Note that in this loop we call EVLOCK_TRY_LOCK instead of BEV_LOCK,
to prevent a deadlock. (Ordinarily, the group lock nests inside
the bufferevent locks. If we are unable to lock any individual
bufferevent, it will find out later when it looks at its limit
and sees that its group is suspended.
*/
TAILQ_FOREACH(bev, &g->members, rate_limiting->next_in_group) {
if (EVLOCK_TRY_LOCK(bev->lock)) {
bufferevent_suspend_read(&bev->bev,
BEV_SUSPEND_BW_GROUP);
EVLOCK_UNLOCK(bev->lock, 0);
}
}
return 0;
}
/** Stop writing on every bufferevent in <b>g</b> */
static int
_bev_group_suspend_writing(struct bufferevent_rate_limit_group *g)
{
/* Needs group lock */
struct bufferevent_private *bev;
g->write_suspended = 1;
g->pending_unsuspend_write = 0;
TAILQ_FOREACH(bev, &g->members, rate_limiting->next_in_group) {
if (EVLOCK_TRY_LOCK(bev->lock)) {
bufferevent_suspend_write(&bev->bev,
BEV_SUSPEND_BW_GROUP);
EVLOCK_UNLOCK(bev->lock, 0);
}
}
return 0;
}
/** Timer callback invoked on a single bufferevent with one or more exhausted
buckets when they are ready to refill. */
static void
_bev_refill_callback(evutil_socket_t fd, short what, void *arg)
{
unsigned tick;
struct timeval now;
struct bufferevent_private *bev = arg;
int again = 0;
BEV_LOCK(&bev->bev);
if (!bev->rate_limiting || !bev->rate_limiting->cfg) {
BEV_UNLOCK(&bev->bev);
return;
}
/* First, update the bucket */
event_base_gettimeofday_cached(bev->bev.ev_base, &now);
tick = ev_token_bucket_get_tick(&now,
bev->rate_limiting->cfg);
ev_token_bucket_update(&bev->rate_limiting->limit,
bev->rate_limiting->cfg,
tick);
/* Now unsuspend any read/write operations as appropriate. */
if ((bev->read_suspended & BEV_SUSPEND_BW)) {
if (bev->rate_limiting->limit.read_limit > 0)
bufferevent_unsuspend_read(&bev->bev, BEV_SUSPEND_BW);
else
again = 1;
}
if ((bev->write_suspended & BEV_SUSPEND_BW)) {
if (bev->rate_limiting->limit.write_limit > 0)
bufferevent_unsuspend_write(&bev->bev, BEV_SUSPEND_BW);
else
again = 1;
}
if (again) {
/* One or more of the buckets may need another refill if they
started negative.
XXXX if we need to be quiet for more ticks, we should
maybe figure out what timeout we really want.
*/
/* XXXX Handle event_add failure somehow */
event_add(&bev->rate_limiting->refill_bucket_event,
&bev->rate_limiting->cfg->tick_timeout);
}
BEV_UNLOCK(&bev->bev);
}
/** Helper: grab a random element from a bufferevent group. */
static struct bufferevent_private *
_bev_group_random_element(struct bufferevent_rate_limit_group *group)
{
int which;
struct bufferevent_private *bev;
/* requires group lock */
if (!group->n_members)
return NULL;
EVUTIL_ASSERT(! TAILQ_EMPTY(&group->members));
which = _evutil_weakrand() % group->n_members;
bev = TAILQ_FIRST(&group->members);
while (which--)
bev = TAILQ_NEXT(bev, rate_limiting->next_in_group);
return bev;
}
/** Iterate over the elements of a rate-limiting group 'g' with a random
starting point, assigning each to the variable 'bev', and executing the
block 'block'.
We do this in a half-baked effort to get fairness among group members.
XXX Round-robin or some kind of priority queue would be even more fair.
*/
#define FOREACH_RANDOM_ORDER(block) \
do { \
first = _bev_group_random_element(g); \
for (bev = first; bev != TAILQ_END(&g->members); \
bev = TAILQ_NEXT(bev, rate_limiting->next_in_group)) { \
block ; \
} \
for (bev = TAILQ_FIRST(&g->members); bev && bev != first; \
bev = TAILQ_NEXT(bev, rate_limiting->next_in_group)) { \
block ; \
} \
} while (0)
static void
_bev_group_unsuspend_reading(struct bufferevent_rate_limit_group *g)
{
int again = 0;
struct bufferevent_private *bev, *first;
g->read_suspended = 0;
FOREACH_RANDOM_ORDER({
if (EVLOCK_TRY_LOCK(bev->lock)) {
bufferevent_unsuspend_read(&bev->bev,
BEV_SUSPEND_BW_GROUP);
EVLOCK_UNLOCK(bev->lock, 0);
} else {
again = 1;
}
});
g->pending_unsuspend_read = again;
}
static void
_bev_group_unsuspend_writing(struct bufferevent_rate_limit_group *g)
{
int again = 0;
struct bufferevent_private *bev, *first;
g->write_suspended = 0;
FOREACH_RANDOM_ORDER({
if (EVLOCK_TRY_LOCK(bev->lock)) {
bufferevent_unsuspend_write(&bev->bev,
BEV_SUSPEND_BW_GROUP);
EVLOCK_UNLOCK(bev->lock, 0);
} else {
again = 1;
}
});
g->pending_unsuspend_write = again;
}
/** Callback invoked every tick to add more elements to the group bucket
and unsuspend group members as needed.
*/
static void
_bev_group_refill_callback(evutil_socket_t fd, short what, void *arg)
{
struct bufferevent_rate_limit_group *g = arg;
unsigned tick;
struct timeval now;
event_base_gettimeofday_cached(event_get_base(&g->master_refill_event), &now);
LOCK_GROUP(g);
tick = ev_token_bucket_get_tick(&now, &g->rate_limit_cfg);
ev_token_bucket_update(&g->rate_limit, &g->rate_limit_cfg, tick);
if (g->pending_unsuspend_read ||
(g->read_suspended && (g->rate_limit.read_limit >= g->min_share))) {
_bev_group_unsuspend_reading(g);
}
if (g->pending_unsuspend_write ||
(g->write_suspended && (g->rate_limit.write_limit >= g->min_share))){
_bev_group_unsuspend_writing(g);
}
/* XXXX Rather than waiting to the next tick to unsuspend stuff
* with pending_unsuspend_write/read, we should do it on the
* next iteration of the mainloop.
*/
UNLOCK_GROUP(g);
}
int
bufferevent_set_rate_limit(struct bufferevent *bev,
struct ev_token_bucket_cfg *cfg)
{
struct bufferevent_private *bevp =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
int r = -1;
struct bufferevent_rate_limit *rlim;
struct timeval now;
ev_uint32_t tick;
/* XXX reference-count cfg */
BEV_LOCK(bev);
if (cfg == NULL) {
if (bevp->rate_limiting) {
bevp->rate_limiting->cfg = NULL;
bufferevent_unsuspend_read(bev, BEV_SUSPEND_BW);
bufferevent_unsuspend_write(bev, BEV_SUSPEND_BW);
}
r = 0;
goto done;
}
event_base_gettimeofday_cached(bev->ev_base, &now);
tick = ev_token_bucket_get_tick(&now, cfg);
if (bevp->rate_limiting && bevp->rate_limiting->cfg == cfg) {
;
} else if (bevp->rate_limiting) {
bevp->rate_limiting->cfg = cfg;
ev_token_bucket_init(&bevp->rate_limiting->limit, cfg, tick, 1);
if (bevp->rate_limiting->limit.read_limit > 0)
bufferevent_unsuspend_read(bev, BEV_SUSPEND_BW);
else
bufferevent_suspend_read(bev, BEV_SUSPEND_BW);
if (bevp->rate_limiting->limit.write_limit > 0)
bufferevent_unsuspend_write(bev, BEV_SUSPEND_BW);
else
bufferevent_suspend_write(bev, BEV_SUSPEND_BW);
} else {
rlim = mm_calloc(1, sizeof(struct bufferevent_rate_limit));
if (!rlim)
goto done;
rlim->cfg = cfg;
ev_token_bucket_init(&rlim->limit, cfg, tick, 0);
evtimer_assign(&rlim->refill_bucket_event, bev->ev_base,
_bev_refill_callback, bevp);
bevp->rate_limiting = rlim;
}
r = 0;
done:
BEV_UNLOCK(bev);
return r;
}
struct bufferevent_rate_limit_group *
bufferevent_rate_limit_group_new(struct event_base *base,
const struct ev_token_bucket_cfg *cfg)
{
struct bufferevent_rate_limit_group *g;
struct timeval now;
ev_uint32_t tick;
event_base_gettimeofday_cached(base, &now);
tick = ev_token_bucket_get_tick(&now, cfg);
g = mm_calloc(1, sizeof(struct bufferevent_rate_limit_group));
if (!g)
return NULL;
memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg));
TAILQ_INIT(&g->members);
ev_token_bucket_init(&g->rate_limit, cfg, tick, 0);
g->min_share = 64;
event_assign(&g->master_refill_event, base, -1, EV_PERSIST,
_bev_group_refill_callback, g);
/*XXXX handle event_add failure */
event_add(&g->master_refill_event, &cfg->tick_timeout);
EVTHREAD_ALLOC_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
return g;
}
int
bufferevent_rate_limit_group_set_cfg(
struct bufferevent_rate_limit_group *g,
const struct ev_token_bucket_cfg *cfg)
{
int same_tick;
if (!g || !cfg)
return -1;
LOCK_GROUP(g);
same_tick = evutil_timercmp(
&g->rate_limit_cfg.tick_timeout, &cfg->tick_timeout, ==);
memcpy(&g->rate_limit_cfg, cfg, sizeof(g->rate_limit_cfg));
if (g->rate_limit.read_limit > cfg->read_maximum)
g->rate_limit.read_limit = cfg->read_maximum;
if (g->rate_limit.write_limit > cfg->write_maximum)
g->rate_limit.write_limit = cfg->write_maximum;
if (!same_tick) {
/* This can cause a hiccup in the schedule */
event_add(&g->master_refill_event, &cfg->tick_timeout);
}
UNLOCK_GROUP(g);
return 0;
}
int
bufferevent_rate_limit_group_set_min_share(
struct bufferevent_rate_limit_group *g,
size_t share)
{
g->min_share = share;
return 0;
}
void
bufferevent_rate_limit_group_free(struct bufferevent_rate_limit_group *g)
{
LOCK_GROUP(g);
EVUTIL_ASSERT(0 == g->n_members);
event_del(&g->master_refill_event);
UNLOCK_GROUP(g);
EVTHREAD_FREE_LOCK(g->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
mm_free(g);
}
int
bufferevent_add_to_rate_limit_group(struct bufferevent *bev,
struct bufferevent_rate_limit_group *g)
{
int wsuspend, rsuspend;
struct bufferevent_private *bevp =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
BEV_LOCK(bev);
if (!bevp->rate_limiting) {
struct bufferevent_rate_limit *rlim;
rlim = mm_calloc(1, sizeof(struct bufferevent_rate_limit));
if (!rlim) {
BEV_UNLOCK(bev);
return -1;
}
evtimer_assign(&rlim->refill_bucket_event, bev->ev_base,
_bev_refill_callback, bevp);
bevp->rate_limiting = rlim;
}
if (bevp->rate_limiting->group == g) {
BEV_UNLOCK(bev);
return 0;
}
if (bevp->rate_limiting->group)
bufferevent_remove_from_rate_limit_group(bev);
LOCK_GROUP(g);
bevp->rate_limiting->group = g;
++g->n_members;
TAILQ_INSERT_TAIL(&g->members, bevp, rate_limiting->next_in_group);
rsuspend = g->read_suspended;
wsuspend = g->write_suspended;
UNLOCK_GROUP(g);
if (rsuspend)
bufferevent_suspend_read(bev, BEV_SUSPEND_BW_GROUP);
if (wsuspend)
bufferevent_suspend_write(bev, BEV_SUSPEND_BW_GROUP);
BEV_UNLOCK(bev);
return 0;
}
int
bufferevent_remove_from_rate_limit_group(struct bufferevent *bev)
{
return bufferevent_remove_from_rate_limit_group_internal(bev, 1);
}
int
bufferevent_remove_from_rate_limit_group_internal(struct bufferevent *bev,
int unsuspend)
{
struct bufferevent_private *bevp =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
BEV_LOCK(bev);
if (bevp->rate_limiting && bevp->rate_limiting->group) {
struct bufferevent_rate_limit_group *g =
bevp->rate_limiting->group;
LOCK_GROUP(g);
bevp->rate_limiting->group = NULL;
--g->n_members;
TAILQ_REMOVE(&g->members, bevp, rate_limiting->next_in_group);
UNLOCK_GROUP(g);
}
if (unsuspend) {
bufferevent_unsuspend_read(bev, BEV_SUSPEND_BW_GROUP);
bufferevent_unsuspend_write(bev, BEV_SUSPEND_BW_GROUP);
}
BEV_UNLOCK(bev);
return 0;
}
/* ===
* API functions to expose rate limits.
*
* Don't use these from inside Libevent; they're meant to be for use by
* the program.
* === */
/* Mostly you don't want to use this function from inside libevent;
* _bufferevent_get_read_max() is more likely what you want*/
ev_ssize_t
bufferevent_get_read_limit(struct bufferevent *bev)
{
ev_ssize_t r;
struct bufferevent_private *bevp;
BEV_LOCK(bev);
bevp = BEV_UPCAST(bev);
if (bevp->rate_limiting && bevp->rate_limiting->cfg) {
bufferevent_update_buckets(bevp);
r = bevp->rate_limiting->limit.read_limit;
} else {
r = EV_SSIZE_MAX;
}
BEV_UNLOCK(bev);
return r;
}
/* Mostly you don't want to use this function from inside libevent;
* _bufferevent_get_write_max() is more likely what you want*/
ev_ssize_t
bufferevent_get_write_limit(struct bufferevent *bev)
{
ev_ssize_t r;
struct bufferevent_private *bevp;
BEV_LOCK(bev);
bevp = BEV_UPCAST(bev);
if (bevp->rate_limiting && bevp->rate_limiting->cfg) {
bufferevent_update_buckets(bevp);
r = bevp->rate_limiting->limit.write_limit;
} else {
r = EV_SSIZE_MAX;
}
BEV_UNLOCK(bev);
return r;
}
ev_ssize_t
bufferevent_get_max_to_read(struct bufferevent *bev)
{
ev_ssize_t r;
BEV_LOCK(bev);
r = _bufferevent_get_read_max(BEV_UPCAST(bev));
BEV_UNLOCK(bev);
return r;
}
ev_ssize_t
bufferevent_get_max_to_write(struct bufferevent *bev)
{
ev_ssize_t r;
BEV_LOCK(bev);
r = _bufferevent_get_write_max(BEV_UPCAST(bev));
BEV_UNLOCK(bev);
return r;
}
/* Mostly you don't want to use this function from inside libevent;
* _bufferevent_get_read_max() is more likely what you want*/
ev_ssize_t
bufferevent_rate_limit_group_get_read_limit(
struct bufferevent_rate_limit_group *grp)
{
ev_ssize_t r;
LOCK_GROUP(grp);
r = grp->rate_limit.read_limit;
UNLOCK_GROUP(grp);
return r;
}
/* Mostly you don't want to use this function from inside libevent;
* _bufferevent_get_write_max() is more likely what you want. */
ev_ssize_t
bufferevent_rate_limit_group_get_write_limit(
struct bufferevent_rate_limit_group *grp)
{
ev_ssize_t r;
LOCK_GROUP(grp);
r = grp->rate_limit.write_limit;
UNLOCK_GROUP(grp);
return r;
}
int
bufferevent_decrement_read_limit(struct bufferevent *bev, ev_ssize_t decr)
{
int r = 0;
ev_int32_t old_limit, new_limit;
struct bufferevent_private *bevp;
BEV_LOCK(bev);
bevp = BEV_UPCAST(bev);
EVUTIL_ASSERT(bevp->rate_limiting && bevp->rate_limiting->cfg);
old_limit = bevp->rate_limiting->limit.read_limit;
new_limit = (bevp->rate_limiting->limit.read_limit -= decr);
if (old_limit > 0 && new_limit <= 0) {
bufferevent_suspend_read(bev, BEV_SUSPEND_BW);
if (event_add(&bevp->rate_limiting->refill_bucket_event,
&bevp->rate_limiting->cfg->tick_timeout) < 0)
r = -1;
} else if (old_limit <= 0 && new_limit > 0) {
event_del(&bevp->rate_limiting->refill_bucket_event);
bufferevent_unsuspend_read(bev, BEV_SUSPEND_BW);
}
BEV_UNLOCK(bev);
return r;
}
int
bufferevent_decrement_write_limit(struct bufferevent *bev, ev_ssize_t decr)
{
/* XXXX this is mostly copy-and-paste from
* bufferevent_decrement_read_limit */
int r = 0;
ev_int32_t old_limit, new_limit;
struct bufferevent_private *bevp;
BEV_LOCK(bev);
bevp = BEV_UPCAST(bev);
EVUTIL_ASSERT(bevp->rate_limiting && bevp->rate_limiting->cfg);
old_limit = bevp->rate_limiting->limit.write_limit;
new_limit = (bevp->rate_limiting->limit.write_limit -= decr);
if (old_limit > 0 && new_limit <= 0) {
bufferevent_suspend_write(bev, BEV_SUSPEND_BW);
if (event_add(&bevp->rate_limiting->refill_bucket_event,
&bevp->rate_limiting->cfg->tick_timeout) < 0)
r = -1;
} else if (old_limit <= 0 && new_limit > 0) {
event_del(&bevp->rate_limiting->refill_bucket_event);
bufferevent_unsuspend_write(bev, BEV_SUSPEND_BW);
}
BEV_UNLOCK(bev);
return r;
}
int
bufferevent_rate_limit_group_decrement_read(
struct bufferevent_rate_limit_group *grp, ev_ssize_t decr)
{
int r = 0;
ev_int32_t old_limit, new_limit;
LOCK_GROUP(grp);
old_limit = grp->rate_limit.read_limit;
new_limit = (grp->rate_limit.read_limit -= decr);
if (old_limit > 0 && new_limit <= 0) {
_bev_group_suspend_reading(grp);
} else if (old_limit <= 0 && new_limit > 0) {
_bev_group_unsuspend_reading(grp);
}
UNLOCK_GROUP(grp);
return r;
}
int
bufferevent_rate_limit_group_decrement_write(
struct bufferevent_rate_limit_group *grp, ev_ssize_t decr)
{
int r = 0;
ev_int32_t old_limit, new_limit;
LOCK_GROUP(grp);
old_limit = grp->rate_limit.write_limit;
new_limit = (grp->rate_limit.write_limit -= decr);
if (old_limit > 0 && new_limit <= 0) {
_bev_group_suspend_writing(grp);
} else if (old_limit <= 0 && new_limit > 0) {
_bev_group_unsuspend_writing(grp);
}
UNLOCK_GROUP(grp);
return r;
}
void
bufferevent_rate_limit_group_get_totals(struct bufferevent_rate_limit_group *grp,
ev_uint64_t *total_read_out, ev_uint64_t *total_written_out)
{
EVUTIL_ASSERT(grp != NULL);
if (total_read_out)
*total_read_out = grp->total_read;
if (total_written_out)
*total_written_out = grp->total_written;
}
void
bufferevent_rate_limit_group_reset_totals(struct bufferevent_rate_limit_group *grp)
{
grp->total_read = grp->total_written = 0;
}

Просмотреть файл

@ -1,688 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
* Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include "event2/event-config.h"
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef _EVENT_HAVE_STDARG_H
#include <stdarg.h>
#endif
#ifdef _EVENT_HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef WIN32
#include <winsock2.h>
#include <ws2tcpip.h>
#endif
#ifdef _EVENT_HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef _EVENT_HAVE_NETINET_IN_H
#include <netinet/in.h>
#endif
#ifdef _EVENT_HAVE_NETINET_IN6_H
#include <netinet/in6.h>
#endif
#include "event2/util.h"
#include "event2/bufferevent.h"
#include "event2/buffer.h"
#include "event2/bufferevent_struct.h"
#include "event2/bufferevent_compat.h"
#include "event2/event.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "bufferevent-internal.h"
#include "util-internal.h"
#ifdef WIN32
#include "iocp-internal.h"
#endif
/* prototypes */
static int be_socket_enable(struct bufferevent *, short);
static int be_socket_disable(struct bufferevent *, short);
static void be_socket_destruct(struct bufferevent *);
static int be_socket_adj_timeouts(struct bufferevent *);
static int be_socket_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
static int be_socket_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
static void be_socket_setfd(struct bufferevent *, evutil_socket_t);
const struct bufferevent_ops bufferevent_ops_socket = {
"socket",
evutil_offsetof(struct bufferevent_private, bev),
be_socket_enable,
be_socket_disable,
be_socket_destruct,
be_socket_adj_timeouts,
be_socket_flush,
be_socket_ctrl,
};
#define be_socket_add(ev, t) \
_bufferevent_add_event((ev), (t))
static void
bufferevent_socket_outbuf_cb(struct evbuffer *buf,
const struct evbuffer_cb_info *cbinfo,
void *arg)
{
struct bufferevent *bufev = arg;
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
if (cbinfo->n_added &&
(bufev->enabled & EV_WRITE) &&
!event_pending(&bufev->ev_write, EV_WRITE, NULL) &&
!bufev_p->write_suspended) {
/* Somebody added data to the buffer, and we would like to
* write, and we were not writing. So, start writing. */
be_socket_add(&bufev->ev_write, &bufev->timeout_write);
/* XXXX handle failure from be_socket_add */
}
}
static void
bufferevent_readcb(evutil_socket_t fd, short event, void *arg)
{
struct bufferevent *bufev = arg;
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
struct evbuffer *input;
int res = 0;
short what = BEV_EVENT_READING;
int howmuch = -1, readmax=-1;
_bufferevent_incref_and_lock(bufev);
if (event == EV_TIMEOUT) {
what |= BEV_EVENT_TIMEOUT;
goto error;
}
input = bufev->input;
/*
* If we have a high watermark configured then we don't want to
* read more data than would make us reach the watermark.
*/
if (bufev->wm_read.high != 0) {
howmuch = bufev->wm_read.high - evbuffer_get_length(input);
/* we somehow lowered the watermark, stop reading */
if (howmuch <= 0) {
bufferevent_wm_suspend_read(bufev);
goto done;
}
}
readmax = _bufferevent_get_read_max(bufev_p);
if (howmuch < 0 || howmuch > readmax) /* The use of -1 for "unlimited"
* uglifies this code. */
howmuch = readmax;
if (bufev_p->read_suspended)
goto done;
evbuffer_unfreeze(input, 0);
res = evbuffer_read(input, fd, howmuch);
evbuffer_freeze(input, 0);
if (res == -1) {
int err = evutil_socket_geterror(fd);
if (EVUTIL_ERR_RW_RETRIABLE(err))
goto reschedule;
/* error case */
what |= BEV_EVENT_ERROR;
} else if (res == 0) {
/* eof case */
what |= BEV_EVENT_EOF;
}
if (res <= 0)
goto error;
_bufferevent_decrement_read_buckets(bufev_p, res);
/* Invoke the user callback - must always be called last */
if (evbuffer_get_length(input) >= bufev->wm_read.low)
_bufferevent_run_readcb(bufev);
goto done;
reschedule:
goto done;
error:
bufferevent_disable(bufev, EV_READ);
_bufferevent_run_eventcb(bufev, what);
done:
_bufferevent_decref_and_unlock(bufev);
}
static void
bufferevent_writecb(evutil_socket_t fd, short event, void *arg)
{
struct bufferevent *bufev = arg;
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
int res = 0;
short what = BEV_EVENT_WRITING;
int connected = 0;
int atmost = -1;
_bufferevent_incref_and_lock(bufev);
if (event == EV_TIMEOUT) {
what |= BEV_EVENT_TIMEOUT;
goto error;
}
if (bufev_p->connecting) {
int c = evutil_socket_finished_connecting(fd);
/* we need to fake the error if the connection was refused
* immediately - usually connection to localhost on BSD */
if (bufev_p->connection_refused) {
bufev_p->connection_refused = 0;
c = -1;
}
if (c == 0)
goto done;
bufev_p->connecting = 0;
if (c < 0) {
event_del(&bufev->ev_write);
event_del(&bufev->ev_read);
_bufferevent_run_eventcb(bufev, BEV_EVENT_ERROR);
goto done;
} else {
connected = 1;
#ifdef WIN32
if (BEV_IS_ASYNC(bufev)) {
event_del(&bufev->ev_write);
bufferevent_async_set_connected(bufev);
_bufferevent_run_eventcb(bufev,
BEV_EVENT_CONNECTED);
goto done;
}
#endif
_bufferevent_run_eventcb(bufev,
BEV_EVENT_CONNECTED);
if (!(bufev->enabled & EV_WRITE) ||
bufev_p->write_suspended) {
event_del(&bufev->ev_write);
goto done;
}
}
}
atmost = _bufferevent_get_write_max(bufev_p);
if (bufev_p->write_suspended)
goto done;
if (evbuffer_get_length(bufev->output)) {
evbuffer_unfreeze(bufev->output, 1);
res = evbuffer_write_atmost(bufev->output, fd, atmost);
evbuffer_freeze(bufev->output, 1);
if (res == -1) {
int err = evutil_socket_geterror(fd);
if (EVUTIL_ERR_RW_RETRIABLE(err))
goto reschedule;
what |= BEV_EVENT_ERROR;
} else if (res == 0) {
/* eof case
XXXX Actually, a 0 on write doesn't indicate
an EOF. An ECONNRESET might be more typical.
*/
what |= BEV_EVENT_EOF;
}
if (res <= 0)
goto error;
_bufferevent_decrement_write_buckets(bufev_p, res);
}
if (evbuffer_get_length(bufev->output) == 0) {
event_del(&bufev->ev_write);
}
/*
* Invoke the user callback if our buffer is drained or below the
* low watermark.
*/
if ((res || !connected) &&
evbuffer_get_length(bufev->output) <= bufev->wm_write.low) {
_bufferevent_run_writecb(bufev);
}
goto done;
reschedule:
if (evbuffer_get_length(bufev->output) == 0) {
event_del(&bufev->ev_write);
}
goto done;
error:
bufferevent_disable(bufev, EV_WRITE);
_bufferevent_run_eventcb(bufev, what);
done:
_bufferevent_decref_and_unlock(bufev);
}
struct bufferevent *
bufferevent_socket_new(struct event_base *base, evutil_socket_t fd,
int options)
{
struct bufferevent_private *bufev_p;
struct bufferevent *bufev;
#ifdef WIN32
if (base && event_base_get_iocp(base))
return bufferevent_async_new(base, fd, options);
#endif
if ((bufev_p = mm_calloc(1, sizeof(struct bufferevent_private)))== NULL)
return NULL;
if (bufferevent_init_common(bufev_p, base, &bufferevent_ops_socket,
options) < 0) {
mm_free(bufev_p);
return NULL;
}
bufev = &bufev_p->bev;
event_assign(&bufev->ev_read, bufev->ev_base, fd,
EV_READ|EV_PERSIST, bufferevent_readcb, bufev);
event_assign(&bufev->ev_write, bufev->ev_base, fd,
EV_WRITE|EV_PERSIST, bufferevent_writecb, bufev);
evbuffer_add_cb(bufev->output, bufferevent_socket_outbuf_cb, bufev);
evbuffer_freeze(bufev->input, 0);
evbuffer_freeze(bufev->output, 1);
return bufev;
}
int
bufferevent_socket_connect(struct bufferevent *bev,
struct sockaddr *sa, int socklen)
{
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
evutil_socket_t fd;
int r = 0;
int result=-1;
int ownfd = 0;
_bufferevent_incref_and_lock(bev);
if (!bufev_p)
goto done;
fd = bufferevent_getfd(bev);
if (fd < 0) {
if (!sa)
goto done;
fd = socket(sa->sa_family, SOCK_STREAM, 0);
if (fd < 0)
goto done;
if (evutil_make_socket_nonblocking(fd)<0)
goto done;
ownfd = 1;
}
if (sa) {
#ifdef WIN32
if (bufferevent_async_can_connect(bev)) {
bufferevent_setfd(bev, fd);
r = bufferevent_async_connect(bev, fd, sa, socklen);
if (r < 0)
goto freesock;
bufev_p->connecting = 1;
result = 0;
goto done;
} else
#endif
r = evutil_socket_connect(&fd, sa, socklen);
if (r < 0)
goto freesock;
}
#ifdef WIN32
/* ConnectEx() isn't always around, even when IOCP is enabled.
* Here, we borrow the socket object's write handler to fall back
* on a non-blocking connect() when ConnectEx() is unavailable. */
if (BEV_IS_ASYNC(bev)) {
event_assign(&bev->ev_write, bev->ev_base, fd,
EV_WRITE|EV_PERSIST, bufferevent_writecb, bev);
}
#endif
bufferevent_setfd(bev, fd);
if (r == 0) {
if (! be_socket_enable(bev, EV_WRITE)) {
bufev_p->connecting = 1;
result = 0;
goto done;
}
} else if (r == 1) {
/* The connect succeeded already. How very BSD of it. */
result = 0;
bufev_p->connecting = 1;
event_active(&bev->ev_write, EV_WRITE, 1);
} else {
/* The connect failed already. How very BSD of it. */
bufev_p->connection_refused = 1;
bufev_p->connecting = 1;
result = 0;
event_active(&bev->ev_write, EV_WRITE, 1);
}
goto done;
freesock:
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
if (ownfd)
evutil_closesocket(fd);
/* do something about the error? */
done:
_bufferevent_decref_and_unlock(bev);
return result;
}
static void
bufferevent_connect_getaddrinfo_cb(int result, struct evutil_addrinfo *ai,
void *arg)
{
struct bufferevent *bev = arg;
struct bufferevent_private *bev_p =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
int r;
BEV_LOCK(bev);
bufferevent_unsuspend_write(bev, BEV_SUSPEND_LOOKUP);
bufferevent_unsuspend_read(bev, BEV_SUSPEND_LOOKUP);
if (result != 0) {
bev_p->dns_error = result;
_bufferevent_run_eventcb(bev, BEV_EVENT_ERROR);
_bufferevent_decref_and_unlock(bev);
if (ai)
evutil_freeaddrinfo(ai);
return;
}
/* XXX use the other addrinfos? */
r = bufferevent_socket_connect(bev, ai->ai_addr, ai->ai_addrlen);
_bufferevent_decref_and_unlock(bev);
evutil_freeaddrinfo(ai);
}
int
bufferevent_socket_connect_hostname(struct bufferevent *bev,
struct evdns_base *evdns_base, int family, const char *hostname, int port)
{
char portbuf[10];
struct evutil_addrinfo hint;
int err;
struct bufferevent_private *bev_p =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
if (family != AF_INET && family != AF_INET6 && family != AF_UNSPEC)
return -1;
if (port < 1 || port > 65535)
return -1;
BEV_LOCK(bev);
bev_p->dns_error = 0;
BEV_UNLOCK(bev);
evutil_snprintf(portbuf, sizeof(portbuf), "%d", port);
memset(&hint, 0, sizeof(hint));
hint.ai_family = family;
hint.ai_protocol = IPPROTO_TCP;
hint.ai_socktype = SOCK_STREAM;
bufferevent_suspend_write(bev, BEV_SUSPEND_LOOKUP);
bufferevent_suspend_read(bev, BEV_SUSPEND_LOOKUP);
bufferevent_incref(bev);
err = evutil_getaddrinfo_async(evdns_base, hostname, portbuf,
&hint, bufferevent_connect_getaddrinfo_cb, bev);
if (err == 0) {
return 0;
} else {
bufferevent_unsuspend_write(bev, BEV_SUSPEND_LOOKUP);
bufferevent_unsuspend_read(bev, BEV_SUSPEND_LOOKUP);
return -1;
}
}
int
bufferevent_socket_get_dns_error(struct bufferevent *bev)
{
int rv;
struct bufferevent_private *bev_p =
EVUTIL_UPCAST(bev, struct bufferevent_private, bev);
BEV_LOCK(bev);
rv = bev_p->dns_error;
BEV_LOCK(bev);
return rv;
}
/*
* Create a new buffered event object.
*
* The read callback is invoked whenever we read new data.
* The write callback is invoked whenever the output buffer is drained.
* The error callback is invoked on a write/read error or on EOF.
*
* Both read and write callbacks maybe NULL. The error callback is not
* allowed to be NULL and have to be provided always.
*/
struct bufferevent *
bufferevent_new(evutil_socket_t fd,
bufferevent_data_cb readcb, bufferevent_data_cb writecb,
bufferevent_event_cb eventcb, void *cbarg)
{
struct bufferevent *bufev;
if (!(bufev = bufferevent_socket_new(NULL, fd, 0)))
return NULL;
bufferevent_setcb(bufev, readcb, writecb, eventcb, cbarg);
return bufev;
}
static int
be_socket_enable(struct bufferevent *bufev, short event)
{
if (event & EV_READ) {
if (be_socket_add(&bufev->ev_read,&bufev->timeout_read) == -1)
return -1;
}
if (event & EV_WRITE) {
if (be_socket_add(&bufev->ev_write,&bufev->timeout_write) == -1)
return -1;
}
return 0;
}
static int
be_socket_disable(struct bufferevent *bufev, short event)
{
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
if (event & EV_READ) {
if (event_del(&bufev->ev_read) == -1)
return -1;
}
/* Don't actually disable the write if we are trying to connect. */
if ((event & EV_WRITE) && ! bufev_p->connecting) {
if (event_del(&bufev->ev_write) == -1)
return -1;
}
return 0;
}
static void
be_socket_destruct(struct bufferevent *bufev)
{
struct bufferevent_private *bufev_p =
EVUTIL_UPCAST(bufev, struct bufferevent_private, bev);
evutil_socket_t fd;
EVUTIL_ASSERT(bufev->be_ops == &bufferevent_ops_socket);
fd = event_get_fd(&bufev->ev_read);
event_del(&bufev->ev_read);
event_del(&bufev->ev_write);
if ((bufev_p->options & BEV_OPT_CLOSE_ON_FREE) && fd >= 0)
EVUTIL_CLOSESOCKET(fd);
}
static int
be_socket_adj_timeouts(struct bufferevent *bufev)
{
int r = 0;
if (event_pending(&bufev->ev_read, EV_READ, NULL))
if (be_socket_add(&bufev->ev_read, &bufev->timeout_read) < 0)
r = -1;
if (event_pending(&bufev->ev_write, EV_WRITE, NULL)) {
if (be_socket_add(&bufev->ev_write, &bufev->timeout_write) < 0)
r = -1;
}
return r;
}
static int
be_socket_flush(struct bufferevent *bev, short iotype,
enum bufferevent_flush_mode mode)
{
return 0;
}
static void
be_socket_setfd(struct bufferevent *bufev, evutil_socket_t fd)
{
BEV_LOCK(bufev);
EVUTIL_ASSERT(bufev->be_ops == &bufferevent_ops_socket);
event_del(&bufev->ev_read);
event_del(&bufev->ev_write);
event_assign(&bufev->ev_read, bufev->ev_base, fd,
EV_READ|EV_PERSIST, bufferevent_readcb, bufev);
event_assign(&bufev->ev_write, bufev->ev_base, fd,
EV_WRITE|EV_PERSIST, bufferevent_writecb, bufev);
if (fd >= 0)
bufferevent_enable(bufev, bufev->enabled);
BEV_UNLOCK(bufev);
}
/* XXXX Should non-socket bufferevents support this? */
int
bufferevent_priority_set(struct bufferevent *bufev, int priority)
{
int r = -1;
BEV_LOCK(bufev);
if (bufev->be_ops != &bufferevent_ops_socket)
goto done;
if (event_priority_set(&bufev->ev_read, priority) == -1)
goto done;
if (event_priority_set(&bufev->ev_write, priority) == -1)
goto done;
r = 0;
done:
BEV_UNLOCK(bufev);
return r;
}
/* XXXX Should non-socket bufferevents support this? */
int
bufferevent_base_set(struct event_base *base, struct bufferevent *bufev)
{
int res = -1;
BEV_LOCK(bufev);
if (bufev->be_ops != &bufferevent_ops_socket)
goto done;
bufev->ev_base = base;
res = event_base_set(base, &bufev->ev_read);
if (res == -1)
goto done;
res = event_base_set(base, &bufev->ev_write);
done:
BEV_UNLOCK(bufev);
return res;
}
static int
be_socket_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
union bufferevent_ctrl_data *data)
{
switch (op) {
case BEV_CTRL_SET_FD:
be_socket_setfd(bev, data->fd);
return 0;
case BEV_CTRL_GET_FD:
data->fd = event_get_fd(&bev->ev_read);
return 0;
case BEV_CTRL_GET_UNDERLYING:
default:
return -1;
}
}

Просмотреть файл

@ -1,101 +0,0 @@
/*
* Copyright (c) 2009-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _CHANGELIST_H_
#define _CHANGELIST_H_
/*
A "changelist" is a list of all the fd status changes that should be made
between calls to the backend's dispatch function. There are a few reasons
that a backend would want to queue changes like this rather than processing
them immediately.
1) Sometimes applications will add and delete the same event more than
once between calls to dispatch. Processing these changes immediately
is needless, and potentially expensive (especially if we're on a system
that makes one syscall per changed event).
2) Sometimes we can coalesce multiple changes on the same fd into a single
syscall if we know about them in advance. For example, epoll can do an
add and a delete at the same time, but only if we have found out about
both of them before we tell epoll.
3) Sometimes adding an event that we immediately delete can cause
unintended consequences: in kqueue, this makes pending events get
reported spuriously.
*/
#include <event2/util.h>
/** Represents a */
struct event_change {
/** The fd or signal whose events are to be changed */
evutil_socket_t fd;
/* The events that were enabled on the fd before any of these changes
were made. May include EV_READ or EV_WRITE. */
short old_events;
/* The changes that we want to make in reading and writing on this fd.
* If this is a signal, then read_change has EV_CHANGE_SIGNAL set,
* and write_change is unused. */
ev_uint8_t read_change;
ev_uint8_t write_change;
};
/* Flags for read_change and write_change. */
/* If set, add the event. */
#define EV_CHANGE_ADD 0x01
/* If set, delete the event. Exclusive with EV_CHANGE_ADD */
#define EV_CHANGE_DEL 0x02
/* If set, this event refers a signal, not an fd. */
#define EV_CHANGE_SIGNAL EV_SIGNAL
/* Set for persistent events. Currently not used. */
#define EV_CHANGE_PERSIST EV_PERSIST
/* Set for adding edge-triggered events. */
#define EV_CHANGE_ET EV_ET
/* The value of fdinfo_size that a backend should use if it is letting
* changelist handle its add and delete functions. */
#define EVENT_CHANGELIST_FDINFO_SIZE sizeof(int)
/** Set up the data fields in a changelist. */
void event_changelist_init(struct event_changelist *changelist);
/** Remove every change in the changelist, and make corresponding changes
* in the event maps in the base. This function is generally used right
* after making all the changes in the changelist. */
void event_changelist_remove_all(struct event_changelist *changelist,
struct event_base *base);
/** Free all memory held in a changelist. */
void event_changelist_freemem(struct event_changelist *changelist);
/** Implementation of eventop_add that queues the event in a changelist. */
int event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events,
void *p);
/** Implementation of eventop_del that queues the event in a changelist. */
int event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events,
void *p);
#endif

Просмотреть файл

@ -1,488 +0,0 @@
/* $OpenBSD: queue.h,v 1.16 2000/09/07 19:47:59 art Exp $ */
/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)queue.h 8.5 (Berkeley) 8/20/94
*/
#ifndef _SYS_QUEUE_H_
#define _SYS_QUEUE_H_
/*
* This file defines five types of data structures: singly-linked lists,
* lists, simple queues, tail queues, and circular queues.
*
*
* A singly-linked list is headed by a single forward pointer. The elements
* are singly linked for minimum space and pointer manipulation overhead at
* the expense of O(n) removal for arbitrary elements. New elements can be
* added to the list after an existing element or at the head of the list.
* Elements being removed from the head of the list should use the explicit
* macro for this purpose for optimum efficiency. A singly-linked list may
* only be traversed in the forward direction. Singly-linked lists are ideal
* for applications with large datasets and few or no removals or for
* implementing a LIFO queue.
*
* A list is headed by a single forward pointer (or an array of forward
* pointers for a hash table header). The elements are doubly linked
* so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before
* or after an existing element or at the head of the list. A list
* may only be traversed in the forward direction.
*
* A simple queue is headed by a pair of pointers, one the head of the
* list and the other to the tail of the list. The elements are singly
* linked to save space, so elements can only be removed from the
* head of the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the
* list. A simple queue may only be traversed in the forward direction.
*
* A tail queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or
* after an existing element, at the head of the list, or at the end of
* the list. A tail queue may be traversed in either direction.
*
* A circle queue is headed by a pair of pointers, one to the head of the
* list and the other to the tail of the list. The elements are doubly
* linked so that an arbitrary element can be removed without a need to
* traverse the list. New elements can be added to the list before or after
* an existing element, at the head of the list, or at the end of the list.
* A circle queue may be traversed in either direction, but has a more
* complex end of list detection.
*
* For details on the use of these macros, see the queue(3) manual page.
*/
/*
* Singly-linked List definitions.
*/
#define SLIST_HEAD(name, type) \
struct name { \
struct type *slh_first; /* first element */ \
}
#define SLIST_HEAD_INITIALIZER(head) \
{ NULL }
#ifndef WIN32
#define SLIST_ENTRY(type) \
struct { \
struct type *sle_next; /* next element */ \
}
#endif
/*
* Singly-linked List access methods.
*/
#define SLIST_FIRST(head) ((head)->slh_first)
#define SLIST_END(head) NULL
#define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head))
#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
#define SLIST_FOREACH(var, head, field) \
for((var) = SLIST_FIRST(head); \
(var) != SLIST_END(head); \
(var) = SLIST_NEXT(var, field))
/*
* Singly-linked List functions.
*/
#define SLIST_INIT(head) { \
SLIST_FIRST(head) = SLIST_END(head); \
}
#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
(elm)->field.sle_next = (slistelm)->field.sle_next; \
(slistelm)->field.sle_next = (elm); \
} while (0)
#define SLIST_INSERT_HEAD(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
(head)->slh_first = (elm); \
} while (0)
#define SLIST_REMOVE_HEAD(head, field) do { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (0)
/*
* List definitions.
*/
#define LIST_HEAD(name, type) \
struct name { \
struct type *lh_first; /* first element */ \
}
#define LIST_HEAD_INITIALIZER(head) \
{ NULL }
#define LIST_ENTRY(type) \
struct { \
struct type *le_next; /* next element */ \
struct type **le_prev; /* address of previous next element */ \
}
/*
* List access methods
*/
#define LIST_FIRST(head) ((head)->lh_first)
#define LIST_END(head) NULL
#define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head))
#define LIST_NEXT(elm, field) ((elm)->field.le_next)
#define LIST_FOREACH(var, head, field) \
for((var) = LIST_FIRST(head); \
(var)!= LIST_END(head); \
(var) = LIST_NEXT(var, field))
/*
* List functions.
*/
#define LIST_INIT(head) do { \
LIST_FIRST(head) = LIST_END(head); \
} while (0)
#define LIST_INSERT_AFTER(listelm, elm, field) do { \
if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
(listelm)->field.le_next->field.le_prev = \
&(elm)->field.le_next; \
(listelm)->field.le_next = (elm); \
(elm)->field.le_prev = &(listelm)->field.le_next; \
} while (0)
#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.le_prev = (listelm)->field.le_prev; \
(elm)->field.le_next = (listelm); \
*(listelm)->field.le_prev = (elm); \
(listelm)->field.le_prev = &(elm)->field.le_next; \
} while (0)
#define LIST_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.le_next = (head)->lh_first) != NULL) \
(head)->lh_first->field.le_prev = &(elm)->field.le_next;\
(head)->lh_first = (elm); \
(elm)->field.le_prev = &(head)->lh_first; \
} while (0)
#define LIST_REMOVE(elm, field) do { \
if ((elm)->field.le_next != NULL) \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
} while (0)
#define LIST_REPLACE(elm, elm2, field) do { \
if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
(elm2)->field.le_next->field.le_prev = \
&(elm2)->field.le_next; \
(elm2)->field.le_prev = (elm)->field.le_prev; \
*(elm2)->field.le_prev = (elm2); \
} while (0)
/*
* Simple queue definitions.
*/
#define SIMPLEQ_HEAD(name, type) \
struct name { \
struct type *sqh_first; /* first element */ \
struct type **sqh_last; /* addr of last next element */ \
}
#define SIMPLEQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).sqh_first }
#define SIMPLEQ_ENTRY(type) \
struct { \
struct type *sqe_next; /* next element */ \
}
/*
* Simple queue access methods.
*/
#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
#define SIMPLEQ_END(head) NULL
#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
#define SIMPLEQ_FOREACH(var, head, field) \
for((var) = SIMPLEQ_FIRST(head); \
(var) != SIMPLEQ_END(head); \
(var) = SIMPLEQ_NEXT(var, field))
/*
* Simple queue functions.
*/
#define SIMPLEQ_INIT(head) do { \
(head)->sqh_first = NULL; \
(head)->sqh_last = &(head)->sqh_first; \
} while (0)
#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
(head)->sqh_last = &(elm)->field.sqe_next; \
(head)->sqh_first = (elm); \
} while (0)
#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.sqe_next = NULL; \
*(head)->sqh_last = (elm); \
(head)->sqh_last = &(elm)->field.sqe_next; \
} while (0)
#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
(head)->sqh_last = &(elm)->field.sqe_next; \
(listelm)->field.sqe_next = (elm); \
} while (0)
#define SIMPLEQ_REMOVE_HEAD(head, elm, field) do { \
if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) \
(head)->sqh_last = &(head)->sqh_first; \
} while (0)
/*
* Tail queue definitions.
*/
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; /* first element */ \
struct type **tqh_last; /* addr of last next element */ \
}
#define TAILQ_HEAD_INITIALIZER(head) \
{ NULL, &(head).tqh_first }
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
}
/*
* tail queue access methods
*/
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_END(head) NULL
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_LAST(head, headname) \
(*(((struct headname *)((head)->tqh_last))->tqh_last))
/* XXX */
#define TAILQ_PREV(elm, headname, field) \
(*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
#define TAILQ_EMPTY(head) \
(TAILQ_FIRST(head) == TAILQ_END(head))
#define TAILQ_FOREACH(var, head, field) \
for((var) = TAILQ_FIRST(head); \
(var) != TAILQ_END(head); \
(var) = TAILQ_NEXT(var, field))
#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
for((var) = TAILQ_LAST(head, headname); \
(var) != TAILQ_END(head); \
(var) = TAILQ_PREV(var, headname, field))
/*
* Tail queue functions.
*/
#define TAILQ_INIT(head) do { \
(head)->tqh_first = NULL; \
(head)->tqh_last = &(head)->tqh_first; \
} while (0)
#define TAILQ_INSERT_HEAD(head, elm, field) do { \
if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
(head)->tqh_first->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(head)->tqh_first = (elm); \
(elm)->field.tqe_prev = &(head)->tqh_first; \
} while (0)
#define TAILQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.tqe_next = NULL; \
(elm)->field.tqe_prev = (head)->tqh_last; \
*(head)->tqh_last = (elm); \
(head)->tqh_last = &(elm)->field.tqe_next; \
} while (0)
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
(elm)->field.tqe_next->field.tqe_prev = \
&(elm)->field.tqe_next; \
else \
(head)->tqh_last = &(elm)->field.tqe_next; \
(listelm)->field.tqe_next = (elm); \
(elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
} while (0)
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (0)
#define TAILQ_REMOVE(head, elm, field) do { \
if (((elm)->field.tqe_next) != NULL) \
(elm)->field.tqe_next->field.tqe_prev = \
(elm)->field.tqe_prev; \
else \
(head)->tqh_last = (elm)->field.tqe_prev; \
*(elm)->field.tqe_prev = (elm)->field.tqe_next; \
} while (0)
#define TAILQ_REPLACE(head, elm, elm2, field) do { \
if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \
(elm2)->field.tqe_next->field.tqe_prev = \
&(elm2)->field.tqe_next; \
else \
(head)->tqh_last = &(elm2)->field.tqe_next; \
(elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
*(elm2)->field.tqe_prev = (elm2); \
} while (0)
/*
* Circular queue definitions.
*/
#define CIRCLEQ_HEAD(name, type) \
struct name { \
struct type *cqh_first; /* first element */ \
struct type *cqh_last; /* last element */ \
}
#define CIRCLEQ_HEAD_INITIALIZER(head) \
{ CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
#define CIRCLEQ_ENTRY(type) \
struct { \
struct type *cqe_next; /* next element */ \
struct type *cqe_prev; /* previous element */ \
}
/*
* Circular queue access methods
*/
#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
#define CIRCLEQ_LAST(head) ((head)->cqh_last)
#define CIRCLEQ_END(head) ((void *)(head))
#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
#define CIRCLEQ_EMPTY(head) \
(CIRCLEQ_FIRST(head) == CIRCLEQ_END(head))
#define CIRCLEQ_FOREACH(var, head, field) \
for((var) = CIRCLEQ_FIRST(head); \
(var) != CIRCLEQ_END(head); \
(var) = CIRCLEQ_NEXT(var, field))
#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
for((var) = CIRCLEQ_LAST(head); \
(var) != CIRCLEQ_END(head); \
(var) = CIRCLEQ_PREV(var, field))
/*
* Circular queue functions.
*/
#define CIRCLEQ_INIT(head) do { \
(head)->cqh_first = CIRCLEQ_END(head); \
(head)->cqh_last = CIRCLEQ_END(head); \
} while (0)
#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm)->field.cqe_next; \
(elm)->field.cqe_prev = (listelm); \
if ((listelm)->field.cqe_next == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm); \
else \
(listelm)->field.cqe_next->field.cqe_prev = (elm); \
(listelm)->field.cqe_next = (elm); \
} while (0)
#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
(elm)->field.cqe_next = (listelm); \
(elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
if ((listelm)->field.cqe_prev == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm); \
else \
(listelm)->field.cqe_prev->field.cqe_next = (elm); \
(listelm)->field.cqe_prev = (elm); \
} while (0)
#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
(elm)->field.cqe_next = (head)->cqh_first; \
(elm)->field.cqe_prev = CIRCLEQ_END(head); \
if ((head)->cqh_last == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm); \
else \
(head)->cqh_first->field.cqe_prev = (elm); \
(head)->cqh_first = (elm); \
} while (0)
#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
(elm)->field.cqe_next = CIRCLEQ_END(head); \
(elm)->field.cqe_prev = (head)->cqh_last; \
if ((head)->cqh_first == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm); \
else \
(head)->cqh_last->field.cqe_next = (elm); \
(head)->cqh_last = (elm); \
} while (0)
#define CIRCLEQ_REMOVE(head, elm, field) do { \
if ((elm)->field.cqe_next == CIRCLEQ_END(head)) \
(head)->cqh_last = (elm)->field.cqe_prev; \
else \
(elm)->field.cqe_next->field.cqe_prev = \
(elm)->field.cqe_prev; \
if ((elm)->field.cqe_prev == CIRCLEQ_END(head)) \
(head)->cqh_first = (elm)->field.cqe_next; \
else \
(elm)->field.cqe_prev->field.cqe_next = \
(elm)->field.cqe_next; \
} while (0)
#define CIRCLEQ_REPLACE(head, elm, elm2, field) do { \
if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \
CIRCLEQ_END(head)) \
(head).cqh_last = (elm2); \
else \
(elm2)->field.cqe_next->field.cqe_prev = (elm2); \
if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \
CIRCLEQ_END(head)) \
(head).cqh_first = (elm2); \
else \
(elm2)->field.cqe_prev->field.cqe_next = (elm2); \
} while (0)
#endif /* !_SYS_QUEUE_H_ */

Просмотреть файл

@ -1,99 +0,0 @@
#! /bin/sh
# Wrapper for compilers which do not understand `-c -o'.
# Copyright 1999, 2000 Free Software Foundation, Inc.
# Written by Tom Tromey <tromey@cygnus.com>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
# Usage:
# compile PROGRAM [ARGS]...
# `-o FOO.o' is removed from the args passed to the actual compile.
prog=$1
shift
ofile=
cfile=
args=
while test $# -gt 0; do
case "$1" in
-o)
# configure might choose to run compile as `compile cc -o foo foo.c'.
# So we do something ugly here.
ofile=$2
shift
case "$ofile" in
*.o | *.obj)
;;
*)
args="$args -o $ofile"
ofile=
;;
esac
;;
*.c)
cfile=$1
args="$args $1"
;;
*)
args="$args $1"
;;
esac
shift
done
if test -z "$ofile" || test -z "$cfile"; then
# If no `-o' option was seen then we might have been invoked from a
# pattern rule where we don't need one. That is ok -- this is a
# normal compilation that the losing compiler can handle. If no
# `.c' file was seen then we are probably linking. That is also
# ok.
exec "$prog" $args
fi
# Name of file we expect compiler to create.
cofile=`echo $cfile | sed -e 's|^.*/||' -e 's/\.c$/.o/'`
# Create the lock directory.
# Note: use `[/.-]' here to ensure that we don't use the same name
# that we are using for the .o file. Also, base the name on the expected
# object file name, since that is what matters with a parallel build.
lockdir=`echo $cofile | sed -e 's|[/.-]|_|g'`.d
while true; do
if mkdir $lockdir > /dev/null 2>&1; then
break
fi
sleep 1
done
# FIXME: race condition here if user kills between mkdir and trap.
trap "rmdir $lockdir; exit 1" 1 2 15
# Run the compile.
"$prog" $args
status=$?
if test -f "$cofile"; then
mv "$cofile" "$ofile"
fi
rmdir $lockdir
exit $status

Просмотреть файл

@ -1,859 +0,0 @@
dnl configure.in for libevent
dnl Dug Song <dugsong@monkey.org>
dnl
dnl Updates for Autoconf 2.68 by Ralph Castain <rhc@open-mpi.org> and
dnl Jeff Squyres <jsquyres@cisco.com>. Also added an addition of
dnl --enable-* options for Open MPI-specific stuff (clearly marked in
dnl comments).
dnl
AC_INIT([libevent], [2.0.7-rc-openmpi])
AC_PREREQ(2.64)
AC_CONFIG_MACRO_DIR([m4])
AM_INIT_AUTOMAKE(libevent,2.0.7-rc-openmpi)
# If Automake supports silent rules, enable them.
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
AM_CONFIG_HEADER(config.h)
AC_DEFINE(NUMERIC_VERSION, 0x02000700, [Numeric representation of the version])
AC_LANG([C])
dnl Initialize prefix.
if test "$prefix" = "NONE"; then
prefix="/usr/local"
fi
dnl Checks for programs.
AC_PROG_CC
AC_PROG_INSTALL
AC_PROG_LN_S
AC_PROG_MKDIR_P
AC_PROG_GCC_TRADITIONAL
if test "$GCC" = "yes" ; then
# Enable many gcc warnings by default...
CFLAGS="$CFLAGS -Wall"
# And disable the strict-aliasing optimization, since it breaks
# our sockaddr-handling code in strange ways.
CFLAGS="$CFLAGS -fno-strict-aliasing"
fi
AC_ARG_ENABLE(gcc-warnings,
AS_HELP_STRING(--enable-gcc-warnings, enable verbose warnings with GCC))
AC_ARG_ENABLE(thread-support,
AS_HELP_STRING(--disable-thread-support, disable support for threading),
[], [enable_thread_support=yes])
AC_ARG_ENABLE(malloc-replacement,
AS_HELP_STRING(--disable-malloc-replacement, disable support for replacing the memory mgt functions),
[], [enable_malloc_replacement=yes])
AC_ARG_ENABLE(openssl,
AS_HELP_STRING(--disable-openssl, disable support for openssl encryption),
[], [enable_openssl=yes])
AC_ARG_ENABLE(debug-mode,
AS_HELP_STRING(--disable-debug-mode, disable support for running in debug mode),
[], [enable_debug_mode=yes])
dnl ******* Open MPI ******
dnl Disable many options unless they are specifically enabled. The
dnl defaults for these may change upstream.
AC_ARG_ENABLE([dns],
[AS_HELP_STRING([--disable-dns], [disable DNS support])],
[], [enable_dns=no])
AM_CONDITIONAL(DNS, [test "$enable_dns" = "yes"])
AC_DEFINE(HAVE_DNS, test "$enable_dns" = "yes", [whether we want DNS support])
AC_ARG_ENABLE([http],
[AS_HELP_STRING([--disable-http], [disable HTTP support])],
[], [enable_http=no])
AM_CONDITIONAL(HTTP, [test "$enable_http" = "yes"])
AC_DEFINE(HAVE_HTTP, test "$enable_http" = "yes", [whether we want HTTP suport])
AC_ARG_ENABLE([rpc],
[AS_HELP_STRING([--disable-rpc], [disable RPC support])],
[], [enable_rpc=no])
AM_CONDITIONAL(RPC, [test "$enable_rpc" = "yes"])
AC_DEFINE(HAVE_RPC, [test "$enable_rpc" = "yes"], [whether we want RPC suport])
AC_ARG_ENABLE([select],
[AS_HELP_STRING([--disable-select], [disable select support])],
[], [enable_select=yes])
AC_ARG_ENABLE([poll],
[AS_HELP_STRING([--disable-poll], [disable poll support])],
[], [enable_poll=yes])
AC_ARG_ENABLE([devpoll],
[AS_HELP_STRING([--disable-devpoll], [disable devpoll support])],
[], [enable_devpoll=yes])
AC_ARG_ENABLE([kqueue],
[AS_HELP_STRING([--disable-kqueue], [disable kqueue support])],
[], [enable_kqueue=yes])
AC_ARG_ENABLE([epoll],
[AS_HELP_STRING([--disable-epoll], [disable epoll support])],
[], [enable_epoll=yes])
AC_ARG_ENABLE([evport],
[AS_HELP_STRING([--disable-evport], [disable evport support])],
[], [enable_evport=yes])
AC_ARG_ENABLE([signal],
[AS_HELP_STRING([--disable-signal], [disable signal support])],
[], [enable_signal=yes])
AC_ARG_ENABLE([hidden-symbols],
[AS_HELP_STRING([--enable-hidden-symbols],
[Use linker "visibility" functionality to hide libevent symbols])],
[], [enable_hidden_symbols=no])
dnl ****** OPEN MPI ******
AC_LANG([C])
AC_PROG_LIBTOOL
dnl Uncomment "AC_DISABLE_SHARED" to make shared librraries not get
dnl built by default. You can also turn shared libs on and off from
dnl the command line with --enable-shared and --disable-shared.
dnl AC_DISABLE_SHARED
AC_SUBST(LIBTOOL_DEPS)
dnl Checks for libraries.
AC_SEARCH_LIBS([inet_ntoa], [nsl])
AC_SEARCH_LIBS([socket], [socket])
AC_SEARCH_LIBS([inet_aton], [resolv])
AC_SEARCH_LIBS([clock_gettime], [rt])
AC_SEARCH_LIBS([sendfile], [sendfile])
dnl Determine if we have zlib for regression tests
dnl Don't put this one in LIBS
save_LIBS="$LIBS"
LIBS=""
ZLIB_LIBS=""
have_zlib=no
AC_SEARCH_LIBS([inflateEnd], [z],
[have_zlib=yes
ZLIB_LIBS="$LIBS"
AC_DEFINE(HAVE_LIBZ, 1, [Define if the system has zlib])])
LIBS="$save_LIBS"
AC_SUBST(ZLIB_LIBS)
AM_CONDITIONAL(ZLIB_REGRESS, [test "$have_zlib" = "yes"])
dnl See if we have openssl. This doesn't go in LIBS either.
if test "$enable_openssl" = "yes"; then
save_LIBS="$LIBS"
LIBS=""
OPENSSL_LIBS=""
have_openssl=no
AC_SEARCH_LIBS([SSL_new], [ssl],
[have_openssl=yes
OPENSSL_LIBS="$LIBS"
AC_DEFINE(HAVE_OPENSSL, 1, [Define if the system has openssl])])
LIBS="$save_LIBS"
AC_SUBST(OPENSSL_LIBS)
fi
dnl Checks for header files.
AC_HEADER_STDC
AC_CHECK_HEADERS(fcntl.h stdarg.h inttypes.h stdint.h stddef.h poll.h unistd.h sys/epoll.h sys/time.h sys/queue.h sys/event.h sys/param.h sys/ioctl.h sys/select.h sys/devpoll.h port.h netinet/in.h netinet/in6.h sys/socket.h sys/uio.h arpa/inet.h sys/eventfd.h sys/mman.h sys/sendfile.h sys/wait.h netdb.h)
AC_CHECK_HEADERS(sys/sysctl.h, [], [], [
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
])
if test "x$ac_cv_header_sys_queue_h" = "xyes"; then
AC_MSG_CHECKING(for TAILQ_FOREACH in sys/queue.h)
AC_EGREP_CPP(yes,
[
#include <sys/queue.h>
#ifdef TAILQ_FOREACH
yes
#endif
], [AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_TAILQFOREACH, 1,
[Define if TAILQ_FOREACH is defined in <sys/queue.h>])],
AC_MSG_RESULT(no)
)
fi
if test "x$ac_cv_header_sys_time_h" = "xyes"; then
AC_MSG_CHECKING(for timeradd in sys/time.h)
AC_EGREP_CPP(yes,
[
#include <sys/time.h>
#ifdef timeradd
yes
#endif
], [ AC_DEFINE(HAVE_TIMERADD, 1,
[Define if timeradd is defined in <sys/time.h>])
AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
)
fi
if test "x$ac_cv_header_sys_time_h" = "xyes"; then
AC_MSG_CHECKING(for timercmp in sys/time.h)
AC_EGREP_CPP(yes,
[
#include <sys/time.h>
#ifdef timercmp
yes
#endif
], [ AC_DEFINE(HAVE_TIMERCMP, 1,
[Define if timercmp is defined in <sys/time.h>])
AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
)
fi
if test "x$ac_cv_header_sys_time_h" = "xyes"; then
AC_MSG_CHECKING(for timerclear in sys/time.h)
AC_EGREP_CPP(yes,
[
#include <sys/time.h>
#ifdef timerclear
yes
#endif
], [ AC_DEFINE(HAVE_TIMERCLEAR, 1,
[Define if timerclear is defined in <sys/time.h>])
AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
)
fi
if test "x$ac_cv_header_sys_time_h" = "xyes"; then
AC_MSG_CHECKING(for timerisset in sys/time.h)
AC_EGREP_CPP(yes,
[
#include <sys/time.h>
#ifdef timerisset
yes
#endif
], [ AC_DEFINE(HAVE_TIMERISSET, 1,
[Define if timerisset is defined in <sys/time.h>])
AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
)
fi
if test "x$ac_cv_header_sys_sysctl_h" = "xyes"; then
AC_CHECK_DECLS([CTL_KERN, KERN_RANDOM, RANDOM_UUID, KERN_ARND], [], [],
[[#include <sys/types.h>
#include <sys/sysctl.h>]]
)
fi
dnl - check if the macro WIN32 is defined on this compiler.
dnl - (this is how we check for a windows version of GCC)
AC_MSG_CHECKING(for WIN32)
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#ifndef WIN32
die horribly
#endif
]], [[int i;]])],
[bwin32=true; AC_MSG_RESULT(yes)],
[bwin32=false; AC_MSG_RESULT(no)]
)
AM_CONDITIONAL(BUILD_WIN32, test x$bwin32 = xtrue)
if test x$bwin32 = xtrue; then
AC_SEARCH_LIBS([getservbyname],[ws2_32])
fi
dnl Checks for typedefs, structures, and compiler characteristics.
AC_C_CONST
AC_C_INLINE
AC_HEADER_TIME
dnl Checks for library functions.
AC_CHECK_FUNCS(gettimeofday vasprintf fcntl clock_gettime strtok_r strsep getaddrinfo getnameinfo strlcpy inet_ntop inet_pton signal sigaction strtoll inet_aton pipe eventfd sendfile mmap splice arc4random arc4random_buf issetugid geteuid getegid getservbyname getprotobynumber setenv unsetenv putenv)
# Check for gethostbyname_r in all its glorious incompatible versions.
# (This is cut-and-pasted from Tor, which based its logic on
# Python's configure.in.)
AH_TEMPLATE(HAVE_GETHOSTBYNAME_R,
[Define this if you have any gethostbyname_r()])
AC_CHECK_FUNC(gethostbyname_r, [
AC_MSG_CHECKING([how many arguments gethostbyname_r() wants])
OLD_CFLAGS=$CFLAGS
CFLAGS="$CFLAGS $MY_CPPFLAGS $MY_THREAD_CPPFLAGS $MY_CFLAGS"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <netdb.h>
]], [[
char *cp1, *cp2;
struct hostent *h1, *h2;
int i1, i2;
(void)gethostbyname_r(cp1,h1,cp2,i1,&h2,&i2);
]])],[
AC_DEFINE(HAVE_GETHOSTBYNAME_R)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_6_ARG, 1,
[Define this if gethostbyname_r takes 6 arguments])
AC_MSG_RESULT(6)
], [
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <netdb.h>
]], [[
char *cp1, *cp2;
struct hostent *h1;
int i1, i2;
(void)gethostbyname_r(cp1,h1,cp2,i1,&i2);
]])], [
AC_DEFINE(HAVE_GETHOSTBYNAME_R)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_5_ARG, 1,
[Define this if gethostbyname_r takes 5 arguments])
AC_MSG_RESULT(5)
], [
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <netdb.h>
]], [[
char *cp1;
struct hostent *h1;
struct hostent_data hd;
(void) gethostbyname_r(cp1,h1,&hd);
]])], [
AC_DEFINE(HAVE_GETHOSTBYNAME_R)
AC_DEFINE(HAVE_GETHOSTBYNAME_R_3_ARG, 1,
[Define this if gethostbyname_r takes 3 arguments])
AC_MSG_RESULT(3)
], [
AC_MSG_RESULT(0)
])
])
])
CFLAGS=$OLD_CFLAGS
])
AC_CHECK_SIZEOF(long)
dnl See if we need to check and set visibility capabilities
AS_IF([test "$enable_hidden_symbols" = "yes"],
[OMPI_CHECK_VISIBILITY])
dnl Check backend support methods
AC_MSG_CHECKING(for F_SETFD in fcntl.h)
AC_EGREP_CPP(yes,
[
#define _GNU_SOURCE
#include <fcntl.h>
#ifdef F_SETFD
yes
#endif
], [ AC_DEFINE(HAVE_SETFD, 1,
[Define if F_SETFD is defined in <fcntl.h>])
AC_MSG_RESULT(yes) ], AC_MSG_RESULT(no))
needsignal=no
haveselect=no
if test x$bwin32 != xtrue -a "$enable_select" != "no" ; then
AC_CHECK_FUNCS(select, [haveselect=yes], )
if test "x$haveselect" = "xyes" ; then
needsignal=yes
fi
fi
AM_CONDITIONAL(SELECT_BACKEND, [test "x$haveselect" = "xyes" -a "$enable_select" != "no"])
AC_MSG_CHECKING([for select support])
AS_IF([test "$enable_select" != "no" && test "x$haveselect" = "xyes"],
[AC_DEFINE(HAVE_SELECT, 1, [Have select support])
AC_MSG_RESULT([yes]) ],
[AC_DEFINE(HAVE_SELECT, 0, [Have select support])
AC_MSG_RESULT([no])])
dnl Check for poll. Acknowledge the documented break of "poll" for
dnl Mac Darwin.
if test "$enable_poll" != "no" ; then
if test "$host" = "apple-darwin" ; then
havepoll=no
else
havepoll=no
AC_CHECK_FUNCS(poll, [havepoll=yes], )
fi
if test "x$havepoll" = "xyes" ; then
needsignal=yes
fi
fi
AM_CONDITIONAL(POLL_BACKEND, [test "x$havepoll" = "xyes" -a "$enable_poll" != "no"])
AC_MSG_CHECKING([for poll support])
AS_IF([test "$enable_poll" != "no" && test "x$havepoll" = "xyes"],
[AC_DEFINE(HAVE_POLL, 1, [Have poll support])
AC_MSG_RESULT([yes])],
[AC_DEFINE(HAVE_POLL, 0, [No poll support])
AC_MSG_RESULT([no])])
AC_MSG_CHECKING([for /dev/poll support])
AM_CONDITIONAL(DEVPOLL_BACKEND, [test "x$ac_cv_header_sys_devpoll_h" = "xyes" -a "$enable_devpoll" != "no"])
AS_IF([test "$enable_devpoll" != "no" && test "x$ac_cv_header_sys_devpoll_h" = "xyes"],
[AC_DEFINE(HAVE_DEVPOLL, 1,
[Define if /dev/poll is available])
AC_MSG_RESULT([yes])],
[AC_DEFINE(HAVE_DEVPOLL, 0,
[Devpoll support available])
AC_MSG_RESULT([no])])
dnl Check for kqueue. Stringent test is due to the fact that Mac
dnl Darwin has documented problems.
havekqueue=no
if test "x$ac_cv_header_sys_event_h" = "xyes"; then
# All versions of Mac OS X before at least 10.5.2 are completely
# broken when kqueue is used with pty. So, until they get fixed,
# completely disable kqueue on Mac OS X (note: kqueue/pty support
# has not been tested with 10.6 or beyond).
case "$host" in
*apple-darwin*)
AC_MSG_CHECKING(for working kqueue)
AC_MSG_RESULT([no (MAC OS X)])
;;
*)
AC_CHECK_FUNCS(kqueue, [havekqueue=yes], )
;;
esac
if test "x$havekqueue" = "xyes" ; then
AC_MSG_CHECKING(for working kqueue)
AC_RUN_IFELSE([AC_LANG_PROGRAM([[
#include <sys/types.h>
#include <sys/time.h>
#include <sys/event.h>
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdlib.h>
]],[[
int
main(int argc, char **argv)
{
int kq;
int n;
int fd[[2]];
struct kevent ev;
struct timespec ts;
char buf[[8000]];
if (pipe(fd) == -1)
exit(1);
if (fcntl(fd[[1]], F_SETFL, O_NONBLOCK) == -1)
exit(1);
while ((n = write(fd[[1]], buf, sizeof(buf))) == sizeof(buf))
;
if ((kq = kqueue()) == -1)
exit(1);
memset(&ev, 0, sizeof(ev));
ev.ident = fd[[1]];
ev.filter = EVFILT_WRITE;
ev.flags = EV_ADD | EV_ENABLE;
n = kevent(kq, &ev, 1, NULL, 0, NULL);
if (n == -1)
exit(1);
read(fd[[0]], buf, sizeof(buf));
ts.tv_sec = 0;
ts.tv_nsec = 0;
n = kevent(kq, NULL, 0, &ev, 1, &ts);
if (n == -1 || n == 0)
exit(1);
exit(0);
}]])], [AC_MSG_RESULT(yes)
havekqueue=yes
], AC_MSG_RESULT(no), AC_MSG_RESULT(no))
fi
fi
AM_CONDITIONAL(KQUEUE_BACKEND, [test "x$havekqueue" = "xyes" -a "$enable_kqueue" != "no"])
AC_MSG_CHECKING([for kqueue support])
AS_IF([test "$enable_kqueue" != "no" && test "x$havekqueue" = "xyes"],
[AC_DEFINE(HAVE_WORKING_KQUEUE, 1,
[Define if kqueue works correctly with pipes])
AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])])
haveepollsyscall=no
haveepoll=no
AC_MSG_CHECKING([for epoll support])
if test "$enable_epoll" != "no" ; then
AC_CHECK_FUNCS(epoll_ctl, [haveepoll=yes], )
if test "x$haveepoll" = "xyes" -a "$cross_compiling" != "yes" ; then
# Unfortunately, it's not sufficient to just test for the
# existence of the epoll_ctl symbol on some Linux distros
# (e.g., Fedora 9), where the function is defined and you can
# link against it, but it's hardwired to return ENOSYS -- and
# /usr/include/gnu/stubs.h fails to define __stub_epoll_ctl
# (the usual mechanism in glibc to indicate that a function is
# a stub and isn't really implemented). Hence, checking for
# the symbol succeeds because it thinks it can use epoll_ctl
# (and friends). So we have to do an actual test after we
# determine that epoll_ctl is linkable. Grumble. If we are
# cross compiling, all we can do is trust AC_CHECK_FUNCS and
# pray.
# Unfortunately, there's also another potential
# incompatibility. The event_poll struct is defined in the
# sys/epoll.h file. The structure is the interface between
# the application and the kernel and is therefore compiled
# into both. The event_poll struct is defined with a compiler
# directive __attribute__ ((__packed__). It turns out that
# there is at least one compiler (Sun Studio) that does not
# currently recognize this directive. This means that the
# event_poll struct may be packed in the kernel, but not in
# the libevent library. Badness ensues. Therefore, check to
# see that this struct gets correctly passed between userspace
# and the kernel.
# In this test, we use epoll in Level Triggered mode. We create
# a pipe and the write only file descriptor of the pipe is
# added to the epoll set. The test is successful if
# epoll_wait() returns 1 indicating that the fd is ready to be
# written to.
haveepoll=no
AC_MSG_RESULT([yes])
AC_MSG_CHECKING([for working epoll library interface])
AC_RUN_IFELSE([AC_LANG_PROGRAM([
AC_INCLUDES_DEFAULT
#include <sys/epoll.h>
],[[
int main(int argc, char **argv)
{
struct epoll_event epevin;
struct epoll_event epevout;
int res;
int epfd;
int fildes[[2]];
if ((epfd = epoll_create(1)) == -1)
exit(1);
if (pipe(&fildes[[0]]) < 0)
exit(1);
memset(&epevin, 0, sizeof(epevin));
memset(&epevout, 0, sizeof(epevout));
memset(&epevin.data.ptr, 5, sizeof(epevin.data.ptr));
epevin.events = EPOLLIN | EPOLLOUT;
if (epoll_ctl(epfd, EPOLL_CTL_ADD, fildes[[1]], &epevin) == -1)
exit(1);
res = epoll_wait(epfd, &epevout, 1, 0);
if (res != 1) {
exit(1);
} else {
if (epevout.data.ptr != epevin.data.ptr) {
exit(1);
}
}
/* SUCCESS */
}
]])],
[haveepoll=yes
# OMPI: Don't use AC_LIBOBJ
needsignal=yes],
[haveepoll=no], [])
AC_MSG_RESULT([$haveepoll])
fi
fi
AM_CONDITIONAL(EPOLL_BACKEND, [test "x$haveepoll" = "xyes" -a "$enable_epoll" != "no"])
AS_IF([test "$enable_epoll" != "no" && test "x$haveepoll" = "xyes"],
[AC_DEFINE(HAVE_EPOLL, 1,
[Define if your system supports the epoll interface])
AC_MSG_RESULT([yes])],
[AC_DEFINE(HAVE_EPOLL, 0,
[No epoll interface support])
AC_MSG_RESULT([no])])
dnl Check for epollsyscall if epoll not found.
haveepollsyscall=no
if test "x$ac_cv_header_sys_epoll_h" = "xyes" -a "x$haveepoll" = "xno" -a "$cross_compiling" != "yes"; then
# See comment above. This test uses the epoll syscalls
# instead of the library interface.
AC_MSG_CHECKING(for working epoll system call)
AC_RUN_IFELSE([AC_LANG_PROGRAM([
AC_INCLUDES_DEFAULT
#include <sys/syscall.h>
#include <sys/epoll.h>
],[[
int
main(int argc, char **argv)
{
struct epoll_event epevin;
struct epoll_event epevout;
int res;
int epfd;
int fildes[[2]];
if ((epfd = syscall(__NR_epoll_create, 1)) == -1)
exit(1);
if (pipe(&fildes[[0]]) < 0)
exit(1);
memset(&epevin, 0, sizeof(epevin));
memset(&epevout, 0, sizeof(epevout));
memset(&epevin.data.ptr, 5, sizeof(epevin.data.ptr));
epevin.events = EPOLLIN | EPOLLOUT;
if (syscall(__NR_epoll_ctl, epfd,
EPOLL_CTL_ADD, fildes[[1]], &epevin) == -1)
exit(1);
res = syscall(__NR_epoll_wait, epfd, &epevout, 1, 0);
if (res != 1) {
exit(1);
} else {
if (epevout.data.ptr != epevin.data.ptr) {
exit(1);
}
}
/* SUCCESS */
}
]])],
[haveepollsyscall=yes
# OMPI: don't use AC_LIBOBJ
needsignal=yes])
AC_MSG_RESULT([$haveepollsyscall])
fi
AC_MSG_CHECKING([for epoll syscall support])
AS_IF([test "$enable_epoll" != "no" && test "x$haveepollsyscall" = "xyes"],
[AC_DEFINE(HAVE_EPOLL, 1,
[Define if your system supports the epoll interface])
AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])])
haveeventports=no
if test "$enable_evport" != "no" ; then
AC_CHECK_FUNCS(port_create, [haveeventports=yes], )
if test "x$haveeventports" = "xyes" -a "$enable_evport" != "no" ; then
needsignal=yes
fi
fi
AM_CONDITIONAL(EVPORT_BACKEND, [test "x$haveeventports" = "xyes" -a "$enable_evport" != "no"])
AC_MSG_CHECKING([for evport support])
AS_IF([test "$enable_evport" != "no" && test "x$haveeventports" = "xyes"],
[AC_DEFINE(HAVE_EVENT_PORTS, 1,
[Define if your system supports event ports])
AC_MSG_RESULT([yes])],
[AC_DEFINE(HAVE_EVENT_PORTS, 0,
[No event port support])
AC_MSG_RESULT([no])])
dnl Check for eventops
AC_MSG_CHECKING(event_ops)
if test "$enable_select" != "no" && test "x$haveselect" = "xyes" ; then
have_ops=yes
elif test "$enable_poll" != "no" && test "x$havepoll" = "xyes" ; then
have_ops=yes
elif test "$enable_kqueue" != "no" && test "x$havekqueue" = "xyes" ; then
have_ops=yes
elif test "$enable_epoll" != "no" && test "x$haveepoll" = "xyes" ; then
have_ops=yes
else
have_ops=no
fi
AS_IF([test "$have_ops" = "yes"],
[AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_WORKING_EVENTOPS, 1, [Define if there is a working event op])],
[AC_MSG_RESULT(no)])
AC_MSG_CHECKING([for working ops])
AS_IF([test "$have_ops" = "yes"],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])])
if test "x$bwin32" = "xtrue"; then
needsignal=yes
fi
AM_CONDITIONAL(SIGNAL_SUPPORT, [test "x$needsignal" = "xyes" -a "$enable_signal" != "no"])
AC_MSG_CHECKING([for needsignal support])
AS_IF([test "$enable_signal" != "no" && test "x$needsignal" = "xyes"],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])])
AC_TYPE_PID_T
AC_TYPE_SIZE_T
AC_TYPE_SSIZE_T
AC_CHECK_TYPES([uint64_t, uint32_t, uint16_t, uint8_t, uintptr_t], , ,
[#ifdef HAVE_STDINT_H
#include <stdint.h>
#elif defined(HAVE_INTTYPES_H)
#include <inttypes.h>
#endif
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif])
AC_CHECK_TYPES([fd_mask], , ,
[#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif])
AC_CHECK_SIZEOF(long long)
AC_CHECK_SIZEOF(long)
AC_CHECK_SIZEOF(int)
AC_CHECK_SIZEOF(short)
AC_CHECK_SIZEOF(size_t)
AC_CHECK_SIZEOF(void *)
AC_CHECK_TYPES([struct in6_addr, struct sockaddr_in6, sa_family_t, struct addrinfo], , ,
[#define _GNU_SOURCE
#include <sys/types.h>
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
#endif
#ifdef HAVE_NETINET_IN6_H
#include <netinet/in6.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_NETDB_H
#include <netdb.h>
#endif
#ifdef WIN32
#define WIN32_WINNT 0x400
#define _WIN32_WINNT 0x400
#define WIN32_LEAN_AND_MEAN
#if defined(_MSC_VER) && (_MSC_VER < 1300)
#include <winsock.h>
#else
#include <winsock2.h>
#include <ws2tcpip.h>
#endif
#endif
])
AC_CHECK_MEMBERS([struct in6_addr.s6_addr32, struct in6_addr.s6_addr16, struct sockaddr_in.sin_len, struct sockaddr_in6.sin6_len], , ,
[#include <sys/types.h>
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
#endif
#ifdef HAVE_NETINET_IN6_H
#include <netinet/in6.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef WIN32
#define WIN32_WINNT 0x400
#define _WIN32_WINNT 0x400
#define WIN32_LEAN_AND_MEAN
#if defined(_MSC_VER) && (_MSC_VER < 1300)
#include <winsock.h>
#else
#include <winsock2.h>
#include <ws2tcpip.h>
#endif
#endif
])
AC_MSG_CHECKING([for socklen_t])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <sys/types.h>
#include <sys/socket.h>
]],
[[socklen_t x;]])],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])
AC_DEFINE(socklen_t, unsigned int,
[Define to unsigned int if you dont have it])])
AC_MSG_CHECKING([whether our compiler supports __func__])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#
]], [[
const char *cp = __func__; ]])],
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])
AC_MSG_CHECKING([whether our compiler supports __FUNCTION__])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#
]], [[
const char *cp = __FUNCTION__; ]])],
[AC_MSG_RESULT([yes])
AC_DEFINE(__func__, __FUNCTION__,
[Define to appropriate substitue if compiler doesnt have __func__])],
[AC_MSG_RESULT([no])
AC_DEFINE(__func__, __FILE__,
[Define to appropriate substitue if compiler doesnt have __func__])])])
# check if we can compile with pthreads
have_pthreads=no
if test x$bwin32 != xtrue && test "$enable_thread_support" != "no"; then
ACX_PTHREAD([
AC_DEFINE(HAVE_PTHREADS, 1,
[Define if we have pthreads on this system])
have_pthreads=yes])
CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
fi
AM_CONDITIONAL(PTHREADS, [test "$have_pthreads" != "no" && test "$enable_thread_support" != "no"])
# check if we should compile locking into the library
if test x$enable_thread_support = xno; then
AC_DEFINE(DISABLE_THREAD_SUPPORT, 1,
[Define if libevent should not be compiled with thread support])
fi
# check if we should hard-code the mm functions.
if test x$enable_malloc_replacement = xno; then
AC_DEFINE(DISABLE_MM_REPLACEMENT, 1,
[Define if libevent should not allow replacing the mm functions])
fi
# check if we should hard-code debugging out
if test x$enable_debug_mode = xno; then
AC_DEFINE(DISABLE_DEBUG_MODE, 1,
[Define if libevent should build without support for a debug mode])
fi
# check if we have and should use openssl
AM_CONDITIONAL(OPENSSL, [test "$enable_openssl" != "no" && test "$have_openssl" = "yes"])
# Add some more warnings which we use in development but not in the
# released versions. (Some relevant gcc versions can't handle these.)
if test x$enable_gcc_warnings = xyes; then
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#if !defined(__GNUC__) || (__GNUC__ < 4)
#error
#endif
]], [[int i;]])], [have_gcc4=yes], [have_gcc4=no])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 2)
#error
#endif
]], [[int i;]])], [have_gcc42=yes], [have_gcc42=no])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 5)
#error
#endif
]], [[int i;]])], [have_gcc45=yes], [have_gcc45=no])
CFLAGS="$CFLAGS -W -Wfloat-equal -Wundef -Wpointer-arith -Wstrict-prototypes -Wmissing-prototypes -Wwrite-strings -Wredundant-decls -Wchar-subscripts -Wcomment -Wformat -Wwrite-strings -Wmissing-declarations -Wredundant-decls -Wnested-externs -Wbad-function-cast -Wswitch-enum -Werror"
CFLAGS="$CFLAGS -Wno-unused-parameter -Wno-sign-compare -Wstrict-aliasing"
if test x$have_gcc4 = xyes ; then
# These warnings break gcc 3.3.5 and work on gcc 4.0.2
CFLAGS="$CFLAGS -Winit-self -Wmissing-field-initializers -Wdeclaration-after-statement"
#CFLAGS="$CFLAGS -Wold-style-definition"
fi
if test x$have_gcc42 = xyes ; then
# These warnings break gcc 4.0.2 and work on gcc 4.2
CFLAGS="$CFLAGS -Waddress -Wnormalized=id -Woverride-init"
fi
if test x$have_gcc45 = xyes ; then
# These warnings work on gcc 4.5
CFLAGS="$CFLAGS -Wlogical-op"
fi
##This will break the world on some 64-bit architectures
# CFLAGS="$CFLAGS -Winline"
fi
AC_CONFIG_FILES( [libevent.pc libevent_openssl.pc libevent_pthreads.pc] )
AC_OUTPUT(Makefile include/Makefile)

Просмотреть файл

@ -1,100 +0,0 @@
/*
* Copyright (c) 2009-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _DEFER_INTERNAL_H_
#define _DEFER_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "event2/event-config.h"
#include <sys/queue.h>
struct deferred_cb;
typedef void (*deferred_cb_fn)(struct deferred_cb *, void *);
/** A deferred_cb is a callback that can be scheduled to run as part of
* an event_base's event_loop, rather than running immediately. */
struct deferred_cb {
/** Links to the adjacent active (pending) deferred_cb objects. */
TAILQ_ENTRY (deferred_cb) cb_next;
/** True iff this deferred_cb is pending in an event_base. */
unsigned queued : 1;
/** The function to execute when the callback runs. */
deferred_cb_fn cb;
/** The function's second argument. */
void *arg;
};
/** A deferred_cb_queue is a list of deferred_cb that we can add to and run. */
struct deferred_cb_queue {
/** Lock used to protect the queue. */
void *lock;
/** How many entries are in the queue? */
int active_count;
/** Function called when adding to the queue from another thread. */
void (*notify_fn)(struct deferred_cb_queue *, void *);
void *notify_arg;
/** Deferred callback management: a list of deferred callbacks to
* run active the active events. */
TAILQ_HEAD (deferred_cb_list, deferred_cb) deferred_cb_list;
};
/**
Initialize an empty, non-pending deferred_cb.
@param deferred The deferred_cb structure to initialize.
@param cb The function to run when the deferred_cb executes.
@param arg The function's second argument.
*/
void event_deferred_cb_init(struct deferred_cb *, deferred_cb_fn, void *);
/**
Cancel a deferred_cb if it is currently scheduled in an event_base.
*/
void event_deferred_cb_cancel(struct deferred_cb_queue *, struct deferred_cb *);
/**
Activate a deferred_cb if it is not currently scheduled in an event_base.
*/
void event_deferred_cb_schedule(struct deferred_cb_queue *, struct deferred_cb *);
#define LOCK_DEFERRED_QUEUE(q) \
EVLOCK_LOCK((q)->lock, 0)
#define UNLOCK_DEFERRED_QUEUE(q) \
EVLOCK_UNLOCK((q)->lock, 0)
#ifdef __cplusplus
}
#endif
void event_deferred_cb_queue_init(struct deferred_cb_queue *);
struct deferred_cb_queue *event_base_get_deferred_cb_queue(struct event_base *);
#endif /* _EVENT_INTERNAL_H_ */

Просмотреть файл

@ -1,309 +0,0 @@
/*
* Copyright 2000-2009 Niels Provos <provos@citi.umich.edu>
* Copyright 2009-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#include <sys/types.h>
#include <sys/resource.h>
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <sys/queue.h>
#include <sys/devpoll.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include "event2/event.h"
#include "event2/event_struct.h"
#include "event2/thread.h"
#include "event-internal.h"
#include "evsignal-internal.h"
#include "log-internal.h"
#include "evmap-internal.h"
#include "evthread-internal.h"
struct devpollop {
struct pollfd *events;
int nevents;
int dpfd;
struct pollfd *changes;
int nchanges;
};
static void *devpoll_init(struct event_base *);
static int devpoll_add(struct event_base *, int fd, short old, short events, void *);
static int devpoll_del(struct event_base *, int fd, short old, short events, void *);
static int devpoll_dispatch(struct event_base *, struct timeval *);
static void devpoll_dealloc(struct event_base *);
const struct eventop devpollops = {
"devpoll",
devpoll_init,
devpoll_add,
devpoll_del,
devpoll_dispatch,
devpoll_dealloc,
1, /* need reinit */
EV_FEATURE_FDS|EV_FEATURE_O1,
0
};
#define NEVENT 32000
static int
devpoll_commit(struct devpollop *devpollop)
{
/*
* Due to a bug in Solaris, we have to use pwrite with an offset of 0.
* Write is limited to 2GB of data, until it will fail.
*/
if (pwrite(devpollop->dpfd, devpollop->changes,
sizeof(struct pollfd) * devpollop->nchanges, 0) == -1)
return (-1);
devpollop->nchanges = 0;
return (0);
}
static int
devpoll_queue(struct devpollop *devpollop, int fd, int events) {
struct pollfd *pfd;
if (devpollop->nchanges >= devpollop->nevents) {
/*
* Change buffer is full, must commit it to /dev/poll before
* adding more
*/
if (devpoll_commit(devpollop) != 0)
return (-1);
}
pfd = &devpollop->changes[devpollop->nchanges++];
pfd->fd = fd;
pfd->events = events;
pfd->revents = 0;
return (0);
}
static void *
devpoll_init(struct event_base *base)
{
int dpfd, nfiles = NEVENT;
struct rlimit rl;
struct devpollop *devpollop;
if (!(devpollop = mm_calloc(1, sizeof(struct devpollop))))
return (NULL);
if (getrlimit(RLIMIT_NOFILE, &rl) == 0 &&
rl.rlim_cur != RLIM_INFINITY)
nfiles = rl.rlim_cur;
/* Initialize the kernel queue */
if ((dpfd = open("/dev/poll", O_RDWR)) == -1) {
event_warn("open: /dev/poll");
mm_free(devpollop);
return (NULL);
}
devpollop->dpfd = dpfd;
/* Initialize fields */
/* FIXME: allocating 'nfiles' worth of space here can be
* expensive and unnecessary. See how epoll.c does it instead. */
devpollop->events = mm_calloc(nfiles, sizeof(struct pollfd));
if (devpollop->events == NULL) {
mm_free(devpollop);
close(dpfd);
return (NULL);
}
devpollop->nevents = nfiles;
devpollop->changes = mm_calloc(nfiles, sizeof(struct pollfd));
if (devpollop->changes == NULL) {
mm_free(devpollop->events);
mm_free(devpollop);
close(dpfd);
return (NULL);
}
evsig_init(base);
return (devpollop);
}
static int
devpoll_dispatch(struct event_base *base, struct timeval *tv)
{
struct devpollop *devpollop = base->evbase;
struct pollfd *events = devpollop->events;
struct dvpoll dvp;
int i, res, timeout = -1;
if (devpollop->nchanges)
devpoll_commit(devpollop);
if (tv != NULL)
timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
dvp.dp_fds = devpollop->events;
dvp.dp_nfds = devpollop->nevents;
dvp.dp_timeout = timeout;
EVBASE_RELEASE_LOCK(base, th_base_lock);
res = ioctl(devpollop->dpfd, DP_POLL, &dvp);
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
if (res == -1) {
if (errno != EINTR) {
event_warn("ioctl: DP_POLL");
return (-1);
}
evsig_process(base);
return (0);
} else if (base->sig.evsig_caught) {
evsig_process(base);
}
event_debug(("%s: devpoll_wait reports %d", __func__, res));
for (i = 0; i < res; i++) {
int which = 0;
int what = events[i].revents;
if (what & POLLHUP)
what |= POLLIN | POLLOUT;
else if (what & POLLERR)
what |= POLLIN | POLLOUT;
if (what & POLLIN)
which |= EV_READ;
if (what & POLLOUT)
which |= EV_WRITE;
if (!which)
continue;
/* XXX(niels): not sure if this works for devpoll */
evmap_io_active(base, events[i].fd, which);
}
return (0);
}
static int
devpoll_add(struct event_base *base, int fd, short old, short events, void *p)
{
struct devpollop *devpollop = base->evbase;
int res;
(void)p;
/*
* It's not necessary to OR the existing read/write events that we
* are currently interested in with the new event we are adding.
* The /dev/poll driver ORs any new events with the existing events
* that it has cached for the fd.
*/
res = 0;
if (events & EV_READ)
res |= POLLIN;
if (events & EV_WRITE)
res |= POLLOUT;
if (devpoll_queue(devpollop, fd, res) != 0)
return (-1);
return (0);
}
static int
devpoll_del(struct event_base *base, int fd, short old, short events, void *p)
{
struct devpollop *devpollop = base->evbase;
int res;
(void)p;
res = 0;
if (events & EV_READ)
res |= POLLIN;
if (events & EV_WRITE)
res |= POLLOUT;
/*
* The only way to remove an fd from the /dev/poll monitored set is
* to use POLLREMOVE by itself. This removes ALL events for the fd
* provided so if we care about two events and are only removing one
* we must re-add the other event after POLLREMOVE.
*/
if (devpoll_queue(devpollop, fd, POLLREMOVE) != 0)
return (-1);
if ((res & (POLLIN|POLLOUT)) != (POLLIN|POLLOUT)) {
/*
* We're not deleting all events, so we must resubmit the
* event that we are still interested in if one exists.
*/
if ((res & POLLIN) && (old & EV_WRITE)) {
/* Deleting read, still care about write */
devpoll_queue(devpollop, fd, POLLOUT);
} else if ((res & POLLOUT) && (old & EV_READ)) {
/* Deleting write, still care about read */
devpoll_queue(devpollop, fd, POLLIN);
}
}
return (0);
}
static void
devpoll_dealloc(struct event_base *base)
{
struct devpollop *devpollop = base->evbase;
evsig_dealloc(base);
if (devpollop->events)
mm_free(devpollop->events);
if (devpollop->changes)
mm_free(devpollop->changes);
if (devpollop->dpfd >= 0)
close(devpollop->dpfd);
memset(devpollop, 0, sizeof(struct devpollop));
mm_free(devpollop);
}

Просмотреть файл

@ -1,383 +0,0 @@
/*
* Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright 2007-2010 Niels Provos, Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#include <stdint.h>
#include <sys/types.h>
#include <sys/resource.h>
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <sys/queue.h>
#include <sys/epoll.h>
#include <signal.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#ifdef _EVENT_HAVE_FCNTL_H
#include <fcntl.h>
#endif
#include "event-internal.h"
#include "evsignal-internal.h"
#include "event2/thread.h"
#include "evthread-internal.h"
#include "log-internal.h"
#include "evmap-internal.h"
#include "changelist-internal.h"
struct epollop {
struct epoll_event *events;
int nevents;
int epfd;
};
static void *epoll_init(struct event_base *);
static int epoll_dispatch(struct event_base *, struct timeval *);
static void epoll_dealloc(struct event_base *);
const struct eventop epollops = {
"epoll",
epoll_init,
event_changelist_add,
event_changelist_del,
epoll_dispatch,
epoll_dealloc,
1, /* need reinit */
EV_FEATURE_ET|EV_FEATURE_O1,
EVENT_CHANGELIST_FDINFO_SIZE
};
#define INITIAL_NEVENT 32
#define MAX_NEVENT 4096
/* On Linux kernels at least up to 2.6.24.4, epoll can't handle timeout
* values bigger than (LONG_MAX - 999ULL)/HZ. HZ in the wild can be
* as big as 1000, and LONG_MAX can be as small as (1<<31)-1, so the
* largest number of msec we can support here is 2147482. Let's
* round that down by 47 seconds.
*/
#define MAX_EPOLL_TIMEOUT_MSEC (35*60*1000)
static void *
epoll_init(struct event_base *base)
{
int epfd;
struct epollop *epollop;
/* Initialize the kernel queue. (The size field is ignored since
* 2.6.8.) */
if ((epfd = epoll_create(32000)) == -1) {
if (errno != ENOSYS)
event_warn("epoll_create");
return (NULL);
}
evutil_make_socket_closeonexec(epfd);
if (!(epollop = mm_calloc(1, sizeof(struct epollop))))
return (NULL);
epollop->epfd = epfd;
/* Initialize fields */
epollop->events = mm_calloc(INITIAL_NEVENT, sizeof(struct epoll_event));
if (epollop->events == NULL) {
mm_free(epollop);
return (NULL);
}
epollop->nevents = INITIAL_NEVENT;
evsig_init(base);
return (epollop);
}
static const char *
change_to_string(int change)
{
change &= (EV_CHANGE_ADD|EV_CHANGE_DEL);
if (change == EV_CHANGE_ADD) {
return "add";
} else if (change == EV_CHANGE_DEL) {
return "del";
} else if (change == 0) {
return "none";
} else {
return "???";
}
}
static const char *
epoll_op_to_string(int op)
{
return op == EPOLL_CTL_ADD?"ADD":
op == EPOLL_CTL_DEL?"DEL":
op == EPOLL_CTL_MOD?"MOD":
"???";
}
static int
epoll_apply_changes(struct event_base *base)
{
struct event_changelist *changelist = &base->changelist;
struct epollop *epollop = base->evbase;
struct event_change *ch;
struct epoll_event epev;
int i;
int op, events;
for (i = 0; i < changelist->n_changes; ++i) {
int precautionary_add = 0;
ch = &changelist->changes[i];
events = 0;
/* The logic here is a little tricky. If we had no events set
on the fd before, we need to set op="ADD" and set
events=the events we want to add. If we had any events set
on the fd before, and we want any events to remain on the
fd, we need to say op="MOD" and set events=the events we
want to remain. But if we want to delete the last event,
we say op="DEL" and set events=the remaining events. What
fun!
*/
if ((ch->read_change & EV_CHANGE_ADD) ||
(ch->write_change & EV_CHANGE_ADD)) {
/* If we are adding anything at all, we'll want to do
* either an ADD or a MOD. */
short new_events = ch->old_events;
events = 0;
op = EPOLL_CTL_ADD;
if (ch->read_change & EV_CHANGE_ADD) {
events |= EPOLLIN;
new_events |= EV_READ;
} else if (ch->read_change & EV_CHANGE_DEL) {
new_events &= ~EV_READ;
} else if (ch->old_events & EV_READ) {
events |= EPOLLIN;
}
if (ch->write_change & EV_CHANGE_ADD) {
events |= EPOLLOUT;
new_events |= EV_WRITE;
} else if (ch->write_change & EV_CHANGE_DEL) {
new_events &= ~EV_WRITE;
} else if (ch->old_events & EV_WRITE) {
events |= EPOLLOUT;
}
if ((ch->read_change|ch->write_change) & EV_ET)
events |= EPOLLET;
if (new_events == ch->old_events) {
/*
If the changelist has an "add" operation,
but no visible change to the events enabled
on the fd, we need to try the ADD anyway, in
case the fd was closed at some in the
middle. If it wasn't, the ADD operation
will fail with; that's okay.
*/
precautionary_add = 1;
} else if (ch->old_events) {
op = EPOLL_CTL_MOD;
}
} else if ((ch->read_change & EV_CHANGE_DEL) ||
(ch->write_change & EV_CHANGE_DEL)) {
/* If we're deleting anything, we'll want to do a MOD
* or a DEL. */
op = EPOLL_CTL_DEL;
if (ch->read_change & EV_CHANGE_DEL) {
if (ch->write_change & EV_CHANGE_DEL) {
events = EPOLLIN|EPOLLOUT;
} else if (ch->old_events & EV_WRITE) {
events = EPOLLOUT;
op = EPOLL_CTL_MOD;
} else {
events = EPOLLIN;
}
} else if (ch->write_change & EV_CHANGE_DEL) {
if (ch->old_events & EV_READ) {
events = EPOLLIN;
op = EPOLL_CTL_MOD;
} else {
events = EPOLLOUT;
}
}
}
if (!events)
continue;
memset(&epev, 0, sizeof(epev));
epev.data.fd = ch->fd;
epev.events = events;
if (epoll_ctl(epollop->epfd, op, ch->fd, &epev) == -1) {
if (op == EPOLL_CTL_MOD && errno == ENOENT) {
/* If a MOD operation fails with ENOENT, the
* fd was probably closed and re-opened. We
* should retry the operation as an ADD.
*/
if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, ch->fd, &epev) == -1) {
event_warn("Epoll MOD retried as ADD; that failed too");
} else {
event_debug((" Retried as ADD; succeeded."));
}
} else if (op == EPOLL_CTL_ADD && errno == EEXIST &&
precautionary_add) {
/* If a precautionary ADD operation fails with
EEXIST, that's fine too.
*/
event_debug((" ADD was redundant"));
} else if (op == EPOLL_CTL_DEL &&
(errno == ENOENT || errno == EBADF ||
errno == EPERM)) {
/* If a delete fails with one of these errors,
* that's fine too: we closed the fd before we
* got around to calling epoll_dispatch. */
event_debug((" DEL was unnecessary."));
} else {
event_warn("Epoll %s on fd %d failed. Old events were %d; read change was %d (%s); write change was %d (%s).",
epoll_op_to_string(op),
ch->fd,
ch->old_events,
ch->read_change,
change_to_string(ch->read_change),
ch->write_change,
change_to_string(ch->write_change));
}
} else {
event_debug(("Epoll %s(%d) on fd %d okay. [old events were %d; read change was %d; write change was %d]",
epoll_op_to_string(op),
(int)epev.events,
(int)ch->fd,
ch->old_events,
ch->read_change,
ch->write_change));
}
}
return (0);
}
static int
epoll_dispatch(struct event_base *base, struct timeval *tv)
{
struct epollop *epollop = base->evbase;
struct epoll_event *events = epollop->events;
int i, res;
long timeout = -1;
if (tv != NULL) {
timeout = evutil_tv_to_msec(tv);
if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) {
/* Linux kernels can wait forever if the timeout is
* too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */
timeout = MAX_EPOLL_TIMEOUT_MSEC;
}
}
epoll_apply_changes(base);
event_changelist_remove_all(&base->changelist, base);
EVBASE_RELEASE_LOCK(base, th_base_lock);
res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
if (res == -1) {
if (errno != EINTR) {
event_warn("epoll_wait");
return (-1);
}
evsig_process(base);
return (0);
} else if (base->sig.evsig_caught) {
evsig_process(base);
}
event_debug(("%s: epoll_wait reports %d", __func__, res));
EVUTIL_ASSERT(res <= epollop->nevents);
for (i = 0; i < res; i++) {
int what = events[i].events;
short ev = 0;
if (what & (EPOLLHUP|EPOLLERR)) {
ev = EV_READ | EV_WRITE;
} else {
if (what & EPOLLIN)
ev |= EV_READ;
if (what & EPOLLOUT)
ev |= EV_WRITE;
}
if (!events)
continue;
evmap_io_active(base, events[i].data.fd, ev | EV_ET);
}
if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) {
/* We used all of the event space this time. We should
be ready for more events next time. */
int new_nevents = epollop->nevents * 2;
struct epoll_event *new_events;
new_events = mm_realloc(epollop->events,
new_nevents * sizeof(struct epoll_event));
if (new_events) {
epollop->events = new_events;
epollop->nevents = new_nevents;
}
}
return (0);
}
static void
epoll_dealloc(struct event_base *base)
{
struct epollop *epollop = base->evbase;
evsig_dealloc(base);
if (epollop->events)
mm_free(epollop->events);
if (epollop->epfd >= 0)
close(epollop->epfd);
memset(epollop, 0, sizeof(struct epollop));
mm_free(epollop);
}

Просмотреть файл

@ -1,52 +0,0 @@
/*
* Copyright 2003-2009 Niels Provos <provos@citi.umich.edu>
* Copyright 2009-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdint.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/syscall.h>
#include <sys/epoll.h>
#include <unistd.h>
int
epoll_create(int size)
{
return (syscall(__NR_epoll_create, size));
}
int
epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
{
return (syscall(__NR_epoll_ctl, epfd, op, fd, event));
}
int
epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
{
return (syscall(__NR_epoll_wait, epfd, events, maxevents, timeout));
}

Просмотреть файл

@ -1,275 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVBUFFER_INTERNAL_H_
#define _EVBUFFER_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "event2/event-config.h"
#include "event2/util.h"
#include "util-internal.h"
#include "defer-internal.h"
/* Experimental cb flag: "never deferred." Implementation note:
* these callbacks may get an inaccurate view of n_del/n_added in their
* arguments. */
#define EVBUFFER_CB_NODEFER 2
#ifdef WIN32
#include <winsock2.h>
#endif
#include <sys/queue.h>
/* Minimum allocation for a chain. We define this so that we're burning no
* more than 5% of each allocation on overhead. It would be nice to lose even
* less space, though. */
#if _EVENT_SIZEOF_VOID_P < 8
#define MIN_BUFFER_SIZE 512
#else
#define MIN_BUFFER_SIZE 1024
#endif
/** A single evbuffer callback for an evbuffer. This function will be invoked
* when bytes are added to or removed from the evbuffer. */
struct evbuffer_cb_entry {
/** Structures to implement a doubly-linked queue of callbacks */
TAILQ_ENTRY(evbuffer_cb_entry) next;
/** The callback function to invoke when this callback is called.
If EVBUFFER_CB_OBSOLETE is set in flags, the cb_obsolete field is
valid; otherwise, cb_func is valid. */
union {
evbuffer_cb_func cb_func;
evbuffer_cb cb_obsolete;
} cb;
/** Argument to pass to cb. */
void *cbarg;
/** Currently set flags on this callback. */
ev_uint32_t flags;
};
struct bufferevent;
struct evbuffer_chain;
struct evbuffer {
/** The first chain in this buffer's linked list of chains. */
struct evbuffer_chain *first;
/** The last chain in this buffer's linked list of chains. */
struct evbuffer_chain *last;
/** Pointer to the next pointer pointing at the 'last_with_data' chain.
*
* To unpack:
*
* The last_with_data chain is the last chain that has any data in it.
* If all chains in the buffer are empty, it is the first chain.
* If the buffer has no chains, it is NULL.
*
* The last_with_datap pointer points at _whatever 'next' pointer_
* points at the last_with_datap chain. If the last_with_data chain
* is the first chain, or it is NULL, then the last_with_datap pointer
* is &buf->first.
*/
struct evbuffer_chain **last_with_datap;
/** Total amount of bytes stored in all chains.*/
size_t total_len;
/** Number of bytes we have added to the buffer since we last tried to
* invoke callbacks. */
size_t n_add_for_cb;
/** Number of bytes we have removed from the buffer since we last
* tried to invoke callbacks. */
size_t n_del_for_cb;
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
/** A lock used to mediate access to this buffer. */
void *lock;
#endif
/** True iff we should free the lock field when we free this
* evbuffer. */
unsigned own_lock : 1;
/** True iff we should not allow changes to the front of the buffer
* (drains or prepends). */
unsigned freeze_start : 1;
/** True iff we should not allow changes to the end of the buffer
* (appends) */
unsigned freeze_end : 1;
/** True iff this evbuffer's callbacks are not invoked immediately
* upon a change in the buffer, but instead are deferred to be invoked
* from the event_base's loop. Useful for preventing enormous stack
* overflows when we have mutually recursive callbacks, and for
* serializing callbacks in a single thread. */
unsigned deferred_cbs : 1;
#ifdef WIN32
/** True iff this buffer is set up for overlapped IO. */
unsigned is_overlapped : 1;
#endif
/** Used to implement deferred callbacks. */
struct deferred_cb_queue *cb_queue;
/** A reference count on this evbuffer. When the reference count
* reaches 0, the buffer is destroyed. Manipulated with
* evbuffer_incref and evbuffer_decref_and_unlock and
* evbuffer_free. */
int refcnt;
/** A deferred_cb handle to make all of this buffer's callbacks
* invoked from the event loop. */
struct deferred_cb deferred;
/** A doubly-linked-list of callback functions */
TAILQ_HEAD(evbuffer_cb_queue, evbuffer_cb_entry) callbacks;
/** The parent bufferevent object this evbuffer belongs to.
* NULL if the evbuffer stands alone. */
struct bufferevent *parent;
};
/** A single item in an evbuffer. */
struct evbuffer_chain {
/** points to next buffer in the chain */
struct evbuffer_chain *next;
/** total allocation available in the buffer field. */
size_t buffer_len;
/** unused space at the beginning of buffer or an offset into a
* file for sendfile buffers. */
off_t misalign;
/** Offset into buffer + misalign at which to start writing.
* In other words, the total number of bytes actually stored
* in buffer. */
size_t off;
/** Set if special handling is required for this chain */
unsigned flags;
#define EVBUFFER_MMAP 0x0001 /**< memory in buffer is mmaped */
#define EVBUFFER_SENDFILE 0x0002 /**< a chain used for sendfile */
#define EVBUFFER_REFERENCE 0x0004 /**< a chain with a mem reference */
#define EVBUFFER_IMMUTABLE 0x0008 /**< read-only chain */
/** a chain that mustn't be reallocated or freed, or have its contents
* memmoved, until the chain is un-pinned. */
#define EVBUFFER_MEM_PINNED_R 0x0010
#define EVBUFFER_MEM_PINNED_W 0x0020
#define EVBUFFER_MEM_PINNED_ANY (EVBUFFER_MEM_PINNED_R|EVBUFFER_MEM_PINNED_W)
/** a chain that should be freed, but can't be freed until it is
* un-pinned. */
#define EVBUFFER_DANGLING 0x0040
/** Usually points to the read-write memory belonging to this
* buffer allocated as part of the evbuffer_chain allocation.
* For mmap, this can be a read-only buffer and
* EVBUFFER_IMMUTABLE will be set in flags. For sendfile, it
* may point to NULL.
*/
unsigned char *buffer;
};
/* this is currently used by both mmap and sendfile */
/* TODO(niels): something strange needs to happen for Windows here, I am not
* sure what that is, but it needs to get looked into.
*/
struct evbuffer_chain_fd {
int fd; /**< the fd associated with this chain */
};
/** callback for a reference buffer; lets us know what to do with it when
* we're done with it. */
struct evbuffer_chain_reference {
evbuffer_ref_cleanup_cb cleanupfn;
void *extra;
};
#define EVBUFFER_CHAIN_SIZE sizeof(struct evbuffer_chain)
/** Return a pointer to extra data allocated along with an evbuffer. */
#define EVBUFFER_CHAIN_EXTRA(t, c) (t *)((struct evbuffer_chain *)(c) + 1)
/** Assert that we are holding the lock on an evbuffer */
#define ASSERT_EVBUFFER_LOCKED(buffer) \
EVLOCK_ASSERT_LOCKED((buffer)->lock)
#define EVBUFFER_LOCK(buffer) \
do { \
EVLOCK_LOCK((buffer)->lock, 0); \
} while (0)
#define EVBUFFER_UNLOCK(buffer) \
do { \
EVLOCK_UNLOCK((buffer)->lock, 0); \
} while (0)
#define EVBUFFER_LOCK2(buffer1, buffer2) \
do { \
EVLOCK_LOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \
} while (0)
#define EVBUFFER_UNLOCK2(buffer1, buffer2) \
do { \
EVLOCK_UNLOCK2((buffer1)->lock, (buffer2)->lock, 0, 0); \
} while (0)
/** Increase the reference count of buf by one. */
void _evbuffer_incref(struct evbuffer *buf);
/** Increase the reference count of buf by one and acquire the lock. */
void _evbuffer_incref_and_lock(struct evbuffer *buf);
/** Pin a single buffer chain using a given flag. A pinned chunk may not be
* moved or freed until it is unpinned. */
void _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag);
/** Unpin a single buffer chain using a given flag. */
void _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag);
/** As evbuffer_free, but requires that we hold a lock on the buffer, and
* releases the lock before freeing it and the buffer. */
void _evbuffer_decref_and_unlock(struct evbuffer *buffer);
/** As evbuffer_expand, but does not guarantee that the newly allocated memory
* is contiguous. Instead, it may be split across two or more chunks. */
int _evbuffer_expand_fast(struct evbuffer *, size_t, int);
/** Helper: prepares for a readv/WSARecv call by expanding the buffer to
* hold enough memory to read 'howmuch' bytes in possibly noncontiguous memory.
* Sets up the one or two iovecs in 'vecs' to point to the free memory and its
* extent, and *chainp to point to the first chain that we'll try to read into.
* Returns the number of vecs used.
*/
int _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
struct evbuffer_iovec *vecs, int n_vecs, struct evbuffer_chain ***chainp,
int exact);
/* Helper macro: copies an evbuffer_iovec in ei to a win32 WSABUF in i. */
#define WSABUF_FROM_EVBUFFER_IOV(i,ei) do { \
(i)->buf = (ei)->iov_base; \
(i)->len = (ei)->iov_len; \
} while (0)
/** Set the parent bufferevent object for buf to bev */
void evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev);
#ifdef __cplusplus
}
#endif
#endif /* _EVBUFFER_INTERNAL_H_ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,35 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVDNS_H_
#define _EVDNS_H_
#include <event.h>
#include <event2/dns.h>
#include <event2/dns_compat.h>
#include <event2/dns_struct.h>
#endif /* _EVDNS_H_ */

Просмотреть файл

@ -1,331 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT_INTERNAL_H_
#define _EVENT_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "event2/event-config.h"
#include <sys/queue.h>
#include "event2/event_struct.h"
#include "minheap-internal.h"
#include "evsignal-internal.h"
#include "mm-internal.h"
#include "defer-internal.h"
/* map union members back */
/* mutually exclusive */
#define ev_signal_next _ev.ev_signal.ev_signal_next
#define ev_io_next _ev.ev_io.ev_io_next
#define ev_io_timeout _ev.ev_io.ev_timeout
/* used only by signals */
#define ev_ncalls _ev.ev_signal.ev_ncalls
#define ev_pncalls _ev.ev_signal.ev_pncalls
/* Possible values for ev_closure in struct event. */
#define EV_CLOSURE_NONE 0
#define EV_CLOSURE_SIGNAL 1
#define EV_CLOSURE_PERSIST 2
/** Structure to define the backend of a given event_base. */
struct eventop {
/** The name of this backend. */
const char *name;
/** Function to set up an event_base to use this backend. It should
* create a new structure holding whatever information is needed to
* run the backend, and return it. The returned pointer will get
* stored by event_init into the event_base.evbase field. On failure,
* this function should return NULL. */
void *(*init)(struct event_base *);
/** Enable reading/writing on a given fd or signal. 'events' will be
* the events that we're trying to enable: one or more of EV_READ,
* EV_WRITE, EV_SIGNAL, and EV_ET. 'old' will be those events that
* were enabled on this fd previously. 'fdinfo' will be a structure
* associated with the fd by the evmap; its size is defined by the
* fdinfo field below. It will be set to 0 the first time the fd is
* added. The function should return 0 on success and -1 on error.
*/
int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
/** As "add", except 'events' contains the events we mean to disable. */
int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
/** Function to implement the core of an event loop. It must see which
added events are ready, and cause event_active to be called for each
active event (usually via event_io_active or such). It should
return 0 on success and -1 on error.
*/
int (*dispatch)(struct event_base *, struct timeval *);
/** Function to clean up and free our data from the event_base. */
void (*dealloc)(struct event_base *);
/** Flag: set if we need to reinitialize the event base after we fork.
*/
int need_reinit;
/** Bit-array of supported event_method_features that this backend can
* provide. */
enum event_method_feature features;
/** Length of the extra information we should record for each fd that
has one or more active events. This information is recorded
as part of the evmap entry for each fd, and passed as an argument
to the add and del functions above.
*/
size_t fdinfo_len;
};
#ifdef WIN32
/* If we're on win32, then file descriptors are not nice low densely packed
integers. Instead, they are pointer-like windows handles, and we want to
use a hashtable instead of an array to map fds to events.
*/
#define EVMAP_USE_HT
#endif
/* #define HT_CACHE_HASH_VALS */
#ifdef EVMAP_USE_HT
#include "ht-internal.h"
struct event_map_entry;
HT_HEAD(event_io_map, event_map_entry);
#else
#define event_io_map event_signal_map
#endif
/* Used to map signal numbers to a list of events. If EVMAP_USE_HT is not
defined, this structure is also used as event_io_map, which maps fds to a
list of events.
*/
struct event_signal_map {
/* An array of evmap_io * or of evmap_signal *; empty entries are
* set to NULL. */
void **entries;
/* The number of entries available in entries */
int nentries;
};
/* A list of events waiting on a given 'common' timeout value. Ordinarily,
* events waiting for a timeout wait on a minheap. Sometimes, however, a
* queue can be faster.
**/
struct common_timeout_list {
/* List of events currently waiting in the queue. */
struct event_list events;
/* 'magic' timeval used to indicate the duration of events in this
* queue. */
struct timeval duration;
/* Event that triggers whenever one of the events in the queue is
* ready to activate */
struct event timeout_event;
/* The event_base that this timeout list is part of */
struct event_base *base;
};
struct event_change;
/* List of 'changes' since the last call to eventop.dispatch. Only maintained
* if the backend is using changesets. */
struct event_changelist {
struct event_change *changes;
int n_changes;
int changes_size;
};
#ifndef _EVENT_DISABLE_DEBUG_MODE
/* Global internal flag: set to one if debug mode is on. */
extern int _event_debug_mode_on;
#define EVENT_DEBUG_MODE_IS_ON() (_event_debug_mode_on)
#else
#define EVENT_DEBUG_MODE_IS_ON() (0)
#endif
struct event_base {
/** Function pointers and other data to describe this event_base's
* backend. */
const struct eventop *evsel;
/** Pointer to backend-specific data. */
void *evbase;
/** List of changes to tell backend about at next dispatch. Only used
* by the O(1) backends. */
struct event_changelist changelist;
/** Function pointers used to describe the backend that this event_base
* uses for signals */
const struct eventop *evsigsel;
/** Pointer to signal backend-specific data*/
void *evsigbase;
/** Data to implement the common signal handelr code. */
struct evsig_info sig;
/** Number of virtual events */
int virtual_event_count;
/** Number of total events added to this event_base */
int event_count;
/** Number of total events active in this event_base */
int event_count_active;
/** Set if we should terminate the loop once we're done processing
* events. */
int event_gotterm;
/** Set if we should terminate the loop immediately */
int event_break;
/** Set if we're running the event_base_loop function, to prevent
* reentrant invocation. */
int running_loop;
/* Active event management. */
/** An array of nactivequeues queues for active events (ones that
* have triggered, and whose callbacks need to be called). Low
* priority numbers are more important, and stall higher ones.
*/
struct event_list *activequeues;
/** The length of the activequeues array */
int nactivequeues;
/* common timeout logic */
/** An array of common_timeout_list* for all of the common timeout
* values we know. */
struct common_timeout_list **common_timeout_queues;
/** The number of entries used in common_timeout_queues */
int n_common_timeouts;
/** The total size of common_timeout_queues. */
int n_common_timeouts_allocated;
/** List of defered_cb that are active. We run these after the active
* events. */
struct deferred_cb_queue defer_queue;
/** Mapping from file descriptors to enabled (added) events */
struct event_io_map io;
/** Mapping from signal numbers to enabled (added) events. */
struct event_signal_map sigmap;
/** All events that have been enabled (added) in this event_base */
struct event_list eventqueue;
/** Stored timeval; used to detect when time is running backwards. */
struct timeval event_tv;
/** Priority queue of events with timeouts. */
struct min_heap timeheap;
/** Stored timeval: used to avoid calling gettimeofday too often. */
struct timeval tv_cache;
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
/* threading support */
/** The thread currently running the event_loop for this base */
unsigned long th_owner_id;
/** A lock to prevent conflicting accesses to this event_base */
void *th_base_lock;
/** The event whose callback is executing right now */
struct event *current_event;
/** A condition that gets signalled when we're done processing an
* event with waiters on it. */
void *current_event_cond;
/** Number of threads blocking on current_event_cond. */
int current_event_waiters;
#endif
#ifdef WIN32
/** IOCP support structure, if IOCP is enabled. */
struct event_iocp_port *iocp;
#endif
/** Flags that this base was configured with */
enum event_base_config_flag flags;
/* Notify main thread to wake up break, etc. */
/** True if the base already has a pending notify, and we don't need
* to add any more. */
int is_notify_pending;
/** A socketpair used by some th_notify functions to wake up the main
* thread. */
int th_notify_fd[2];
/** An event used by some th_notify functions to wake up the main
* thread. */
struct event th_notify;
/** A function used to wake up the main thread from another thread. */
int (*th_notify_fn)(struct event_base *base);
};
struct event_config_entry {
TAILQ_ENTRY(event_config_entry) next;
const char *avoid_method;
};
/** Internal structure: describes the configuration we want for an event_base
* that we're about to allocate. */
struct event_config {
TAILQ_HEAD(event_configq, event_config_entry) entries;
int n_cpus_hint;
enum event_method_feature require_features;
enum event_base_config_flag flags;
};
/* Internal use only: Functions that might be missing from <sys/queue.h> */
#if defined(_EVENT_HAVE_SYS_QUEUE_H) && !defined(_EVENT_HAVE_TAILQFOREACH)
#define TAILQ_FIRST(head) ((head)->tqh_first)
#define TAILQ_END(head) NULL
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
#define TAILQ_FOREACH(var, head, field) \
for ((var) = TAILQ_FIRST(head); \
(var) != TAILQ_END(head); \
(var) = TAILQ_NEXT(var, field))
#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
(elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
(elm)->field.tqe_next = (listelm); \
*(listelm)->field.tqe_prev = (elm); \
(listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
} while (0)
#endif /* TAILQ_FOREACH */
#define N_ACTIVE_CALLBACKS(base) \
((base)->event_count_active + (base)->defer_queue.active_count)
int _evsig_set_handler(struct event_base *base, int evsignal,
void (*fn)(int));
int _evsig_restore_handler(struct event_base *base, int evsignal);
void event_active_nolock(struct event *ev, int res, short count);
/* FIXME document. */
void event_base_add_virtual(struct event_base *base);
void event_base_del_virtual(struct event_base *base);
#ifdef __cplusplus
}
#endif
#endif /* _EVENT_INTERNAL_H_ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,206 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT_H_
#define _EVENT_H_
/** @mainpage
@section intro Introduction
libevent is an event notification library for developing scalable network
servers. The libevent API provides a mechanism to execute a callback
function when a specific event occurs on a file descriptor or after a
timeout has been reached. Furthermore, libevent also support callbacks due
to signals or regular timeouts.
libevent is meant to replace the event loop found in event driven network
servers. An application just needs to call event_dispatch() and then add or
remove events dynamically without having to change the event loop.
Currently, libevent supports /dev/poll, kqueue(2), select(2), poll(2) and
epoll(4). It also has experimental support for real-time signals. The
internal event mechanism is completely independent of the exposed event API,
and a simple update of libevent can provide new functionality without having
to redesign the applications. As a result, Libevent allows for portable
application development and provides the most scalable event notification
mechanism available on an operating system. Libevent can also be used for
multi-threaded applications; see Steven Grimm's explanation. Libevent should
compile on Linux, *BSD, Mac OS X, Solaris and Windows.
@section usage Standard usage
Every program that uses libevent must include the <event.h> header, and pass
the -levent flag to the linker. Before using any of the functions in the
library, you must call event_init() or event_base_new() to perform one-time
initialization of the libevent library.
@section event Event notification
For each file descriptor that you wish to monitor, you must declare an event
structure and call event_set() to initialize the members of the structure.
To enable notification, you add the structure to the list of monitored
events by calling event_add(). The event structure must remain allocated as
long as it is active, so it should be allocated on the heap. Finally, you
call event_dispatch() to loop and dispatch events.
@section bufferevent I/O Buffers
libevent provides an abstraction on top of the regular event callbacks. This
abstraction is called a buffered event. A buffered event provides input and
output buffers that get filled and drained automatically. The user of a
buffered event no longer deals directly with the I/O, but instead is reading
from input and writing to output buffers.
Once initialized via bufferevent_new(), the bufferevent structure can be
used repeatedly with bufferevent_enable() and bufferevent_disable().
Instead of reading and writing directly to a socket, you would call
bufferevent_read() and bufferevent_write().
When read enabled the bufferevent will try to read from the file descriptor
and call the read callback. The write callback is executed whenever the
output buffer is drained below the write low watermark, which is 0 by
default.
@section timers Timers
libevent can also be used to create timers that invoke a callback after a
certain amount of time has expired. The evtimer_set() function prepares an
event struct to be used as a timer. To activate the timer, call
evtimer_add(). Timers can be deactivated by calling evtimer_del().
@section timeouts Timeouts
In addition to simple timers, libevent can assign timeout events to file
descriptors that are triggered whenever a certain amount of time has passed
with no activity on a file descriptor. The timeout_set() function
initializes an event struct for use as a timeout. Once initialized, the
event must be activated by using timeout_add(). To cancel the timeout, call
timeout_del().
@section evdns Asynchronous DNS resolution
libevent provides an asynchronous DNS resolver that should be used instead
of the standard DNS resolver functions. These functions can be imported by
including the <evdns.h> header in your program. Before using any of the
resolver functions, you must call evdns_init() to initialize the library. To
convert a hostname to an IP address, you call the evdns_resolve_ipv4()
function. To perform a reverse lookup, you would call the
evdns_resolve_reverse() function. All of these functions use callbacks to
avoid blocking while the lookup is performed.
@section evhttp Event-driven HTTP servers
libevent provides a very simple event-driven HTTP server that can be
embedded in your program and used to service HTTP requests.
To use this capability, you need to include the <evhttp.h> header in your
program. You create the server by calling evhttp_new(). Add addresses and
ports to listen on with evhttp_bind_socket(). You then register one or more
callbacks to handle incoming requests. Each URI can be assigned a callback
via the evhttp_set_cb() function. A generic callback function can also be
registered via evhttp_set_gencb(); this callback will be invoked if no other
callbacks have been registered for a given URI.
@section evrpc A framework for RPC servers and clients
libevent provides a framework for creating RPC servers and clients. It
takes care of marshaling and unmarshaling all data structures.
@section api API Reference
To browse the complete documentation of the libevent API, click on any of
the following links.
event2/event.h
The primary libevent header
event2/buffer.h
Buffer management for network reading and writing
event2/dns.h
Asynchronous DNS resolution
event2/http.h
An embedded libevent-based HTTP server
evrpc.h
A framework for creating RPC servers and clients
*/
/** @file libevent/event.h
A library for writing event-driven network servers
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef _EVENT_HAVE_STDINT_H
#include <stdint.h>
#endif
#include <stdarg.h>
/* For int types. */
#include <evutil.h>
#ifdef WIN32
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <winsock2.h>
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
typedef unsigned char u_char;
typedef unsigned short u_short;
#endif
#include <event2/event_struct.h>
#include <event2/event.h>
#include <event2/event_compat.h>
#include <event2/buffer.h>
#include <event2/buffer_compat.h>
#include <event2/bufferevent.h>
#include <event2/bufferevent_struct.h>
#include <event2/bufferevent_compat.h>
#include <event2/tag.h>
#include <event2/tag_compat.h>
#ifdef __cplusplus
}
#endif
#endif /* _EVENT_H_ */

Просмотреть файл

@ -1,286 +0,0 @@
/*
* Copyright (c) 2009-2010 Niels Provos, Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <winsock2.h>
#include <windows.h>
#include <process.h>
#include <stdio.h>
#include <mswsock.h>
#include "event2/util.h"
#include "util-internal.h"
#include "iocp-internal.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "event-internal.h"
#include "evthread-internal.h"
#define NOTIFICATION_KEY ((ULONG_PTR)-1)
void
event_overlapped_init(struct event_overlapped *o, iocp_callback cb)
{
memset(o, 0, sizeof(struct event_overlapped));
o->cb = cb;
}
static void
handle_entry(OVERLAPPED *o, ULONG_PTR completion_key, DWORD nBytes, int ok)
{
struct event_overlapped *eo =
EVUTIL_UPCAST(o, struct event_overlapped, overlapped);
eo->cb(eo, completion_key, nBytes, ok);
}
static void
loop(void *_port)
{
struct event_iocp_port *port = _port;
long ms = port->ms;
HANDLE p = port->port;
if (ms <= 0)
ms = INFINITE;
while (1) {
OVERLAPPED *overlapped=NULL;
ULONG_PTR key=0;
DWORD bytes=0;
int ok = GetQueuedCompletionStatus(p, &bytes, &key,
&overlapped, ms);
EnterCriticalSection(&port->lock);
if (port->shutdown) {
if (--port->n_live_threads == 0)
ReleaseSemaphore(port->shutdownSemaphore, 1,
NULL);
LeaveCriticalSection(&port->lock);
return;
}
LeaveCriticalSection(&port->lock);
if (key != NOTIFICATION_KEY && overlapped)
handle_entry(overlapped, key, bytes, ok);
else if (!overlapped)
break;
}
event_warnx("GetQueuedCompletionStatus exited with no event.");
EnterCriticalSection(&port->lock);
if (--port->n_live_threads == 0)
ReleaseSemaphore(port->shutdownSemaphore, 1, NULL);
LeaveCriticalSection(&port->lock);
}
int
event_iocp_port_associate(struct event_iocp_port *port, evutil_socket_t fd,
ev_uintptr_t key)
{
HANDLE h;
h = CreateIoCompletionPort((HANDLE)fd, port->port, key, port->n_threads);
if (!h)
return -1;
return 0;
}
static void *
get_extension_function(SOCKET s, const GUID *which_fn)
{
void *ptr = NULL;
DWORD bytes=0;
WSAIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER,
(GUID*)which_fn, sizeof(*which_fn),
&ptr, sizeof(ptr),
&bytes, NULL, NULL);
/* No need to detect errors here: if ptr is set, then we have a good
function pointer. Otherwise, we should behave as if we had no
function pointer.
*/
return ptr;
}
/* Mingw doesn't have these in its mswsock.h. The values are copied from
wine.h. Perhaps if we copy them exactly, the cargo will come again.
*/
#ifndef WSAID_ACCEPTEX
#define WSAID_ACCEPTEX \
{0xb5367df1,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}}
#endif
#ifndef WSAID_CONNECTEX
#define WSAID_CONNECTEX \
{0x25a207b9,0xddf3,0x4660,{0x8e,0xe9,0x76,0xe5,0x8c,0x74,0x06,0x3e}}
#endif
#ifndef WSAID_GETACCEPTEXSOCKADDRS
#define WSAID_GETACCEPTEXSOCKADDRS \
{0xb5367df2,0xcbac,0x11cf,{0x95,0xca,0x00,0x80,0x5f,0x48,0xa1,0x92}}
#endif
static void
init_extension_functions(struct win32_extension_fns *ext)
{
const GUID acceptex = WSAID_ACCEPTEX;
const GUID connectex = WSAID_CONNECTEX;
const GUID getacceptexsockaddrs = WSAID_GETACCEPTEXSOCKADDRS;
SOCKET s = socket(AF_INET, SOCK_STREAM, 0);
if (s == INVALID_SOCKET)
return;
ext->AcceptEx = get_extension_function(s, &acceptex);
ext->ConnectEx = get_extension_function(s, &connectex);
ext->GetAcceptExSockaddrs = get_extension_function(s,
&getacceptexsockaddrs);
closesocket(s);
}
static struct win32_extension_fns the_extension_fns;
static int extension_fns_initialized = 0;
const struct win32_extension_fns *
event_get_win32_extension_fns(void)
{
return &the_extension_fns;
}
#define N_CPUS_DEFAULT 2
struct event_iocp_port *
event_iocp_port_launch(int n_cpus)
{
struct event_iocp_port *port;
int i;
if (!extension_fns_initialized)
init_extension_functions(&the_extension_fns);
if (!(port = mm_calloc(1, sizeof(struct event_iocp_port))))
return NULL;
if (n_cpus <= 0)
n_cpus = N_CPUS_DEFAULT;
port->n_threads = n_cpus * 2;
port->threads = calloc(port->n_threads, sizeof(HANDLE));
if (!port->threads)
goto err;
port->port = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0,
n_cpus);
port->ms = -1;
if (!port->port)
goto err;
port->shutdownSemaphore = CreateSemaphore(NULL, 0, 1, NULL);
if (!port->shutdownSemaphore)
goto err;
for (i=0; i<port->n_threads; ++i) {
ev_uintptr_t th = _beginthread(loop, 0, port);
if (th == (ev_uintptr_t)-1)
goto err;
port->threads[i] = (HANDLE)th;
++port->n_live_threads;
}
InitializeCriticalSectionAndSpinCount(&port->lock, 1000);
return port;
err:
if (port->port)
CloseHandle(port->port);
if (port->threads)
mm_free(port->threads);
if (port->shutdownSemaphore)
CloseHandle(port->shutdownSemaphore);
mm_free(port);
return NULL;
}
static void
_event_iocp_port_unlock_and_free(struct event_iocp_port *port)
{
DeleteCriticalSection(&port->lock);
CloseHandle(port->port);
CloseHandle(port->shutdownSemaphore);
mm_free(port->threads);
mm_free(port);
}
static int
event_iocp_notify_all(struct event_iocp_port *port)
{
int i, r, ok=1;
for (i=0; i<port->n_threads; ++i) {
r = PostQueuedCompletionStatus(port->port, 0, NOTIFICATION_KEY,
NULL);
if (!r)
ok = 0;
}
return ok ? 0 : -1;
}
int
event_iocp_shutdown(struct event_iocp_port *port, long waitMsec)
{
DWORD ms = INFINITE;
int n;
EnterCriticalSection(&port->lock);
port->shutdown = 1;
LeaveCriticalSection(&port->lock);
event_iocp_notify_all(port);
if (waitMsec >= 0)
ms = waitMsec;
WaitForSingleObject(port->shutdownSemaphore, ms);
EnterCriticalSection(&port->lock);
n = port->n_live_threads;
LeaveCriticalSection(&port->lock);
if (n == 0) {
_event_iocp_port_unlock_and_free(port);
return 0;
} else {
return -1;
}
}
int
event_iocp_activate_overlapped(
struct event_iocp_port *port, struct event_overlapped *o,
ev_uintptr_t key, ev_uint32_t n)
{
BOOL r;
r = PostQueuedCompletionStatus(port->port, n, key, &o->overlapped);
return (r==0) ? -1 : 0;
}
struct event_iocp_port *
event_base_get_iocp(struct event_base *base)
{
#ifdef WIN32
return base->iocp;
#else
return NULL;
#endif
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,586 +0,0 @@
/*
* Copyright (c) 2003-2009 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2009-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <winsock2.h>
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#else
#include <sys/ioctl.h>
#endif
#include <sys/queue.h>
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifndef WIN32
#include <syslog.h>
#endif
#ifdef _EVENT_HAVE_UNISTD_H
#include <unistd.h>
#endif
#include "event2/event.h"
#include "event2/tag.h"
#include "event2/buffer.h"
#include "log-internal.h"
#include "mm-internal.h"
#include "util-internal.h"
/*
Here's our wire format:
Stream = TaggedData*
TaggedData = Tag Length Data
where the integer value of 'Length' is the length of 'data'.
Tag = HByte* LByte
where HByte is a byte with the high bit set, and LByte is a byte
with the high bit clear. The integer value of the tag is taken
by concatenating the lower 7 bits from all the tags. So for example,
the tag 0x66 is encoded as [66], whereas the tag 0x166 is encoded as
[82 66]
Length = Integer
Integer = NNibbles Nibble* Padding?
where NNibbles is a 4-bit value encoding the number of nibbles-1,
and each Nibble is 4 bits worth of encoded integer, in big-endian
order. If the total encoded integer size is an odd number of nibbles,
a final padding nibble with value 0 is appended.
*/
int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf);
int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag);
int evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf);
void
evtag_init(void)
{
}
/*
* We encode integers by nibbles; the first nibble contains the number
* of significant nibbles - 1; this allows us to encode up to 64-bit
* integers. This function is byte-order independent.
*
* @param number a 32-bit unsigned integer to encode
* @param data a pointer to where the data should be written. Must
* have at least 5 bytes free.
* @return the number of bytes written into data.
*/
#define ENCODE_INT_INTERNAL(data, number) do { \
int off = 1, nibbles = 0; \
\
memset(data, 0, sizeof(number)+1); \
while (number) { \
if (off & 0x1) \
data[off/2] = (data[off/2] & 0xf0) | (number & 0x0f); \
else \
data[off/2] = (data[off/2] & 0x0f) | \
((number & 0x0f) << 4); \
number >>= 4; \
off++; \
} \
\
if (off > 2) \
nibbles = off - 2; \
\
/* Off - 1 is the number of encoded nibbles */ \
data[0] = (data[0] & 0x0f) | ((nibbles & 0x0f) << 4); \
\
return ((off + 1) / 2); \
} while (0)
static inline int
encode_int_internal(ev_uint8_t *data, ev_uint32_t number)
{
ENCODE_INT_INTERNAL(data, number);
}
static inline int
encode_int64_internal(ev_uint8_t *data, ev_uint64_t number)
{
ENCODE_INT_INTERNAL(data, number);
}
void
evtag_encode_int(struct evbuffer *evbuf, ev_uint32_t number)
{
ev_uint8_t data[5];
int len = encode_int_internal(data, number);
evbuffer_add(evbuf, data, len);
}
void
evtag_encode_int64(struct evbuffer *evbuf, ev_uint64_t number)
{
ev_uint8_t data[9];
int len = encode_int64_internal(data, number);
evbuffer_add(evbuf, data, len);
}
/*
* Support variable length encoding of tags; we use the high bit in each
* octet as a continuation signal.
*/
int
evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag)
{
int bytes = 0;
ev_uint8_t data[5];
memset(data, 0, sizeof(data));
do {
ev_uint8_t lower = tag & 0x7f;
tag >>= 7;
if (tag)
lower |= 0x80;
data[bytes++] = lower;
} while (tag);
if (evbuf != NULL)
evbuffer_add(evbuf, data, bytes);
return (bytes);
}
static int
decode_tag_internal(ev_uint32_t *ptag, struct evbuffer *evbuf, int dodrain)
{
ev_uint32_t number = 0;
int len = evbuffer_get_length(evbuf);
ev_uint8_t *data;
int count = 0, shift = 0, done = 0;
/*
* the encoding of a number is at most one byte more than its
* storage size. however, it may also be much smaller.
*/
data = evbuffer_pullup(
evbuf, len < sizeof(number) + 1 ? len : sizeof(number) + 1);
while (count++ < len) {
ev_uint8_t lower = *data++;
number |= (lower & 0x7f) << shift;
shift += 7;
if (!(lower & 0x80)) {
done = 1;
break;
}
}
if (!done)
return (-1);
if (dodrain)
evbuffer_drain(evbuf, count);
if (ptag != NULL)
*ptag = number;
return (count);
}
int
evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf)
{
return (decode_tag_internal(ptag, evbuf, 1 /* dodrain */));
}
/*
* Marshal a data type, the general format is as follows:
*
* tag number: one byte; length: var bytes; payload: var bytes
*/
void
evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag,
const void *data, ev_uint32_t len)
{
evtag_encode_tag(evbuf, tag);
evtag_encode_int(evbuf, len);
evbuffer_add(evbuf, (void *)data, len);
}
void
evtag_marshal_buffer(struct evbuffer *evbuf, ev_uint32_t tag,
struct evbuffer *data)
{
evtag_encode_tag(evbuf, tag);
evtag_encode_int(evbuf, evbuffer_get_length(data));
evbuffer_add_buffer(evbuf, data);
}
/* Marshaling for integers */
void
evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag, ev_uint32_t integer)
{
ev_uint8_t data[5];
int len = encode_int_internal(data, integer);
evtag_encode_tag(evbuf, tag);
evtag_encode_int(evbuf, len);
evbuffer_add(evbuf, data, len);
}
void
evtag_marshal_int64(struct evbuffer *evbuf, ev_uint32_t tag,
ev_uint64_t integer)
{
ev_uint8_t data[9];
int len = encode_int64_internal(data, integer);
evtag_encode_tag(evbuf, tag);
evtag_encode_int(evbuf, len);
evbuffer_add(evbuf, data, len);
}
void
evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag, const char *string)
{
evtag_marshal(buf, tag, string, strlen(string));
}
void
evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag, struct timeval *tv)
{
ev_uint8_t data[10];
int len = encode_int_internal(data, tv->tv_sec);
len += encode_int_internal(data + len, tv->tv_usec);
evtag_marshal(evbuf, tag, data, len);
}
#define DECODE_INT_INTERNAL(number, maxnibbles, pnumber, evbuf, offset) \
do { \
ev_uint8_t *data; \
int len = evbuffer_get_length(evbuf) - offset; \
int nibbles = 0; \
\
if (len <= 0) \
return (-1); \
\
/* XXX(niels): faster? */ \
data = evbuffer_pullup(evbuf, offset + 1) + offset; \
\
nibbles = ((data[0] & 0xf0) >> 4) + 1; \
if (nibbles > maxnibbles || (nibbles >> 1) + 1 > len) \
return (-1); \
len = (nibbles >> 1) + 1; \
\
data = evbuffer_pullup(evbuf, offset + len) + offset; \
\
while (nibbles > 0) { \
number <<= 4; \
if (nibbles & 0x1) \
number |= data[nibbles >> 1] & 0x0f; \
else \
number |= (data[nibbles >> 1] & 0xf0) >> 4; \
nibbles--; \
} \
\
*pnumber = number; \
\
return (len); \
} while (0)
/* Internal: decode an integer from an evbuffer, without draining it.
* Only integers up to 32-bits are supported.
*
* @param evbuf the buffer to read from
* @param offset an index into the buffer at which we should start reading.
* @param pnumber a pointer to receive the integer.
* @return The length of the number as encoded, or -1 on error.
*/
static int
decode_int_internal(ev_uint32_t *pnumber, struct evbuffer *evbuf, int offset)
{
ev_uint32_t number = 0;
DECODE_INT_INTERNAL(number, 8, pnumber, evbuf, offset);
}
static int
decode_int64_internal(ev_uint64_t *pnumber, struct evbuffer *evbuf, int offset)
{
ev_uint64_t number = 0;
DECODE_INT_INTERNAL(number, 16, pnumber, evbuf, offset);
}
int
evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf)
{
int res = decode_int_internal(pnumber, evbuf, 0);
if (res != -1)
evbuffer_drain(evbuf, res);
return (res == -1 ? -1 : 0);
}
int
evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf)
{
int res = decode_int64_internal(pnumber, evbuf, 0);
if (res != -1)
evbuffer_drain(evbuf, res);
return (res == -1 ? -1 : 0);
}
int
evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag)
{
return (decode_tag_internal(ptag, evbuf, 0 /* dodrain */));
}
int
evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength)
{
int res, len;
len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
if (len == -1)
return (-1);
res = decode_int_internal(plength, evbuf, len);
if (res == -1)
return (-1);
*plength += res + len;
return (0);
}
int
evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength)
{
int res, len;
len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
if (len == -1)
return (-1);
res = decode_int_internal(plength, evbuf, len);
if (res == -1)
return (-1);
return (0);
}
/* just unmarshals the header and returns the length of the remaining data */
int
evtag_unmarshal_header(struct evbuffer *evbuf, ev_uint32_t *ptag)
{
ev_uint32_t len;
if (decode_tag_internal(ptag, evbuf, 1 /* dodrain */) == -1)
return (-1);
if (evtag_decode_int(&len, evbuf) == -1)
return (-1);
if (evbuffer_get_length(evbuf) < len)
return (-1);
return (len);
}
int
evtag_consume(struct evbuffer *evbuf)
{
int len;
if ((len = evtag_unmarshal_header(evbuf, NULL)) == -1)
return (-1);
evbuffer_drain(evbuf, len);
return (0);
}
/* Reads the data type from an event buffer */
int
evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag, struct evbuffer *dst)
{
int len;
if ((len = evtag_unmarshal_header(src, ptag)) == -1)
return (-1);
if (evbuffer_add(dst, evbuffer_pullup(src, len), len) == -1)
return (-1);
evbuffer_drain(src, len);
return (len);
}
/* Marshaling for integers */
int
evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
ev_uint32_t *pinteger)
{
ev_uint32_t tag;
ev_uint32_t len;
int result;
if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
return (-1);
if (need_tag != tag)
return (-1);
if (evtag_decode_int(&len, evbuf) == -1)
return (-1);
if (evbuffer_get_length(evbuf) < len)
return (-1);
result = decode_int_internal(pinteger, evbuf, 0);
evbuffer_drain(evbuf, len);
if (result < 0 || (size_t)result > len) /* XXX Should this be != rather than > ?*/
return (-1);
else
return result;
}
int
evtag_unmarshal_int64(struct evbuffer *evbuf, ev_uint32_t need_tag,
ev_uint64_t *pinteger)
{
ev_uint32_t tag;
ev_uint32_t len;
int result;
if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
return (-1);
if (need_tag != tag)
return (-1);
if (evtag_decode_int(&len, evbuf) == -1)
return (-1);
if (evbuffer_get_length(evbuf) < len)
return (-1);
result = decode_int64_internal(pinteger, evbuf, 0);
evbuffer_drain(evbuf, len);
if (result < 0 || (size_t)result > len) /* XXX Should this be != rather than > ?*/
return (-1);
else
return result;
}
/* Unmarshal a fixed length tag */
int
evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag, void *data,
size_t len)
{
ev_uint32_t tag;
int tag_len;
/* Now unmarshal a tag and check that it matches the tag we want */
if ((tag_len = evtag_unmarshal_header(src, &tag)) == -1 ||
tag != need_tag)
return (-1);
if (tag_len != len)
return (-1);
evbuffer_remove(src, data, len);
return (0);
}
int
evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
char **pstring)
{
ev_uint32_t tag;
int tag_len;
if ((tag_len = evtag_unmarshal_header(evbuf, &tag)) == -1 ||
tag != need_tag)
return (-1);
*pstring = mm_malloc(tag_len + 1);
if (*pstring == NULL) {
event_warn("%s: malloc", __func__);
return -1;
}
evbuffer_remove(evbuf, *pstring, tag_len);
(*pstring)[tag_len] = '\0';
return (0);
}
int
evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
struct timeval *ptv)
{
ev_uint32_t tag;
ev_uint32_t integer;
int len, offset, offset2;
int result = -1;
if ((len = evtag_unmarshal_header(evbuf, &tag)) == -1)
return (-1);
if (tag != need_tag)
goto done;
if ((offset = decode_int_internal(&integer, evbuf, 0)) == -1)
goto done;
ptv->tv_sec = integer;
if ((offset2 = decode_int_internal(&integer, evbuf, offset)) == -1)
goto done;
ptv->tv_usec = integer;
if (offset + offset2 > len) /* XXX Should this be != instead of > ? */
goto done;
result = 0;
done:
evbuffer_drain(evbuf, len);
return result;
}

Просмотреть файл

@ -1,35 +0,0 @@
/*
* Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVHTTP_H_
#define _EVHTTP_H_
#include <event.h>
#include <event2/http.h>
#include <event2/http_struct.h>
#include <event2/http_compat.h>
#endif /* _EVHTTP_H_ */

Просмотреть файл

@ -1,90 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVMAP_H_
#define _EVMAP_H_
/** @file evmap-internal.h
*
* An event_map is a utility structure to map each fd or signal to zero or
* more events. Functions to manipulate event_maps should only be used from
* inside libevent. They generally need to hold the lock on the corresponding
* event_base.
**/
struct event_base;
struct event;
/** Initialize an event_map for use.
*/
void evmap_io_initmap(struct event_io_map* ctx);
void evmap_signal_initmap(struct event_signal_map* ctx);
/** Remove all entries from an event_map.
@param ctx the map to clear.
*/
void evmap_io_clear(struct event_io_map* ctx);
void evmap_signal_clear(struct event_signal_map* ctx);
/** Add an IO event (some combination of EV_READ or EV_WRITE) to an
event_base's list of events on a given file descriptor, and tell the
underlying eventops about the fd if its state has changed.
Requires that ev is not already added.
@param base the event_base to operate on.
@param fd the file descriptor corresponding to ev.
@param ev the event to add.
*/
int evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev);
/** Remove an IO event (some combination of EV_READ or EV_WRITE) to an
event_base's list of events on a given file descriptor, and tell the
underlying eventops about the fd if its state has changed.
@param base the event_base to operate on.
@param fd the file descriptor corresponding to ev.
@param ev the event to remove.
*/
int evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev);
/** Active the set of events waiting on an event_base for a given fd.
@param base the event_base to operate on.
@param fd the file descriptor that has become active.
@param events a bitmask of EV_READ|EV_WRITE|EV_ET.
*/
void evmap_io_active(struct event_base *base, evutil_socket_t fd, short events);
/* These functions behave in the same way as evmap_io_*, except they work on
* signals rather than fds. signals use a linear map everywhere; fds use
* either a linear map or a hashtable. */
int evmap_signal_add(struct event_base *base, int signum, struct event *ev);
int evmap_signal_del(struct event_base *base, int signum, struct event *ev);
void evmap_signal_active(struct event_base *base, evutil_socket_t signum, int ncalls);
void *evmap_io_get_fdinfo(struct event_io_map *ctx, evutil_socket_t fd);
#endif /* _EVMAP_H_ */

Просмотреть файл

@ -1,724 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#ifdef WIN32
#include <winsock2.h>
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#endif
#include <sys/types.h>
#if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
#include <sys/time.h>
#endif
#include <sys/queue.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef WIN32
#include <unistd.h>
#endif
#include <errno.h>
#include <signal.h>
#include <string.h>
#include <time.h>
#include "event-internal.h"
#include "evmap-internal.h"
#include "mm-internal.h"
#include "changelist-internal.h"
/** An entry for an evmap_io list: notes all the events that want to read or
write on a given fd, and the number of each.
*/
struct evmap_io {
struct event_list events;
ev_uint16_t nread;
ev_uint16_t nwrite;
};
/* An entry for an evmap_signal list: notes all the events that want to know
when a signal triggers. */
struct evmap_signal {
struct event_list events;
};
/* On some platforms, fds start at 0 and increment by 1 as they are
allocated, and old numbers get used. For these platforms, we
implement io maps just like signal maps: as an array of pointers to
struct evmap_io. But on other platforms (windows), sockets are not
0-indexed, not necessarily consecutive, and not necessarily reused.
There, we use a hashtable to implement evmap_io.
*/
#ifdef EVMAP_USE_HT
struct event_map_entry {
HT_ENTRY(event_map_entry) map_node;
evutil_socket_t fd;
union { /* This is a union in case we need to make more things that can
be in the hashtable. */
struct evmap_io evmap_io;
} ent;
};
/* Helper used by the event_io_map hashtable code; tries to return a good hash
* of the fd in e->fd. */
static inline unsigned
hashsocket(struct event_map_entry *e)
{
/* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
* matter. Our hashtable implementation really likes low-order bits,
* though, so let's do the rotate-and-add trick. */
unsigned h = (unsigned) e->fd;
h += (h >> 2) | (h << 30);
return h;
}
/* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
* have the same e->fd. */
static inline int
eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
{
return e1->fd == e2->fd;
}
HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket);
HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
0.5, mm_malloc, mm_realloc, mm_free);
#define GET_IO_SLOT(x, map, slot, type) \
do { \
struct event_map_entry _key, *_ent; \
_key.fd = slot; \
_ent = HT_FIND(event_io_map, map, &_key); \
(x) = _ent ? &_ent->ent.type : NULL; \
} while (0);
#define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
do { \
struct event_map_entry _key, *_ent; \
_key.fd = slot; \
_HT_FIND_OR_INSERT(event_io_map, map_node, hashsocket, map, \
event_map_entry, &_key, ptr, \
{ \
_ent = *ptr; \
}, \
{ \
_ent = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
EVUTIL_ASSERT(_ent); \
_ent->fd = slot; \
(ctor)(&_ent->ent.type); \
_HT_FOI_INSERT(map_node, map, &_key, _ent, ptr) \
}); \
(x) = &_ent->ent.type; \
} while (0)
void evmap_io_initmap(struct event_io_map *ctx)
{
HT_INIT(event_io_map, ctx);
}
void evmap_io_clear(struct event_io_map *ctx)
{
struct event_map_entry **ent, **next, *this;
for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
this = *ent;
next = HT_NEXT_RMV(event_io_map, ctx, ent);
mm_free(this);
}
}
#endif
/* Set the variable 'x' to the field in event_map 'map' with fields of type
'struct type *' corresponding to the fd or signal 'slot'. Set 'x' to NULL
if there are no entries for 'slot'. Does no bounds-checking. */
#define GET_SIGNAL_SLOT(x, map, slot, type) \
(x) = (struct type *)((map)->entries[slot])
/* As GET_SLOT, but construct the entry for 'slot' if it is not present,
by allocating enough memory for a 'struct type', and initializing the new
value by calling the function 'ctor' on it.
*/
#define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len) \
do { \
if ((map)->entries[slot] == NULL) { \
EVUTIL_ASSERT(ctor != NULL); \
(map)->entries[slot] = \
mm_calloc(1,sizeof(struct type)+fdinfo_len); \
EVUTIL_ASSERT((map)->entries[slot] != NULL); \
(ctor)((struct type *)(map)->entries[slot]); \
} \
(x) = (struct type *)((map)->entries[slot]); \
} while (0)
/* If we aren't using hashtables, then define the IO_SLOT macros and functions
as thin aliases over the SIGNAL_SLOT versions. */
#ifndef EVMAP_USE_HT
#define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
#define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len) \
GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
#define FDINFO_OFFSET sizeof(struct evmap_io)
void
evmap_io_initmap(struct event_io_map* ctx)
{
evmap_signal_initmap(ctx);
}
void
evmap_io_clear(struct event_io_map* ctx)
{
evmap_signal_clear(ctx);
}
#endif
/** Expand 'map' with new entries of width 'msize' until it is big enough
to store a value in 'slot'.
*/
static int
evmap_make_space(struct event_signal_map *map, int slot, int msize)
{
if (map->nentries <= slot) {
int nentries = map->nentries ? map->nentries : 32;
void **tmp;
while (nentries <= slot)
nentries <<= 1;
tmp = (void **)mm_realloc(map->entries, nentries * msize);
if (tmp == NULL)
return (-1);
memset(&tmp[map->nentries], 0,
(nentries - map->nentries) * msize);
map->nentries = nentries;
map->entries = tmp;
}
return (0);
}
void
evmap_signal_initmap(struct event_signal_map *ctx)
{
ctx->nentries = 0;
ctx->entries = NULL;
}
void
evmap_signal_clear(struct event_signal_map *ctx)
{
if (ctx->entries != NULL) {
int i;
for (i = 0; i < ctx->nentries; ++i) {
if (ctx->entries[i] != NULL)
mm_free(ctx->entries[i]);
}
mm_free(ctx->entries);
ctx->entries = NULL;
}
ctx->nentries = 0;
}
/* code specific to file descriptors */
/** Constructor for struct evmap_io */
static void
evmap_io_init(struct evmap_io *entry)
{
TAILQ_INIT(&entry->events);
entry->nread = 0;
entry->nwrite = 0;
}
/* return -1 on error, 0 on success if nothing changed in the event backend,
* and 1 on success if something did. */
int
evmap_io_add(struct event_base *base, evutil_socket_t fd, struct event *ev)
{
const struct eventop *evsel = base->evsel;
struct event_io_map *io = &base->io;
struct evmap_io *ctx = NULL;
int nread, nwrite, retval = 0;
short res = 0, old = 0;
struct event *old_ev;
EVUTIL_ASSERT(fd == ev->ev_fd);
if (fd < 0)
return 0;
#ifndef EVMAP_USE_HT
if (fd >= io->nentries) {
if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
return (-1);
}
#endif
GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
evsel->fdinfo_len);
nread = ctx->nread;
nwrite = ctx->nwrite;
if (nread)
old |= EV_READ;
if (nwrite)
old |= EV_WRITE;
if (ev->ev_events & EV_READ) {
if (++nread == 1)
res |= EV_READ;
}
if (ev->ev_events & EV_WRITE) {
if (++nwrite == 1)
res |= EV_WRITE;
}
if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff)) {
event_warnx("Too many events reading or writing on fd %d",
(int)fd);
return -1;
}
if (EVENT_DEBUG_MODE_IS_ON() &&
(old_ev = TAILQ_FIRST(&ctx->events)) &&
(old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
event_warnx("Tried to mix edge-triggered and non-edge-triggered"
" events on fd %d", (int)fd);
return -1;
}
if (res) {
void *extra = ((char*)ctx) + sizeof(struct evmap_io);
/* XXX(niels): we cannot mix edge-triggered and
* level-triggered, we should probably assert on
* this. */
if (evsel->add(base, ev->ev_fd,
old, (ev->ev_events & EV_ET) | res, extra) == -1)
return (-1);
retval = 1;
}
ctx->nread = (ev_uint16_t) nread;
ctx->nwrite = (ev_uint16_t) nwrite;
TAILQ_INSERT_TAIL(&ctx->events, ev, ev_io_next);
return (retval);
}
/* return -1 on error, 0 on success if nothing changed in the event backend,
* and 1 on success if something did. */
int
evmap_io_del(struct event_base *base, evutil_socket_t fd, struct event *ev)
{
const struct eventop *evsel = base->evsel;
struct event_io_map *io = &base->io;
struct evmap_io *ctx;
int nread, nwrite, retval = 0;
short res = 0, old = 0;
if (fd < 0)
return 0;
EVUTIL_ASSERT(fd == ev->ev_fd);
#ifndef EVMAP_USE_HT
if (fd >= io->nentries)
return (-1);
#endif
GET_IO_SLOT(ctx, io, fd, evmap_io);
nread = ctx->nread;
nwrite = ctx->nwrite;
if (nread)
old |= EV_READ;
if (nwrite)
old |= EV_WRITE;
if (ev->ev_events & EV_READ) {
if (--nread == 0)
res |= EV_READ;
EVUTIL_ASSERT(nread >= 0);
}
if (ev->ev_events & EV_WRITE) {
if (--nwrite == 0)
res |= EV_WRITE;
EVUTIL_ASSERT(nwrite >= 0);
}
if (res) {
void *extra = ((char*)ctx) + sizeof(struct evmap_io);
if (evsel->del(base, ev->ev_fd, old, res, extra) == -1)
return (-1);
retval = 1;
}
ctx->nread = nread;
ctx->nwrite = nwrite;
TAILQ_REMOVE(&ctx->events, ev, ev_io_next);
return (retval);
}
void
evmap_io_active(struct event_base *base, evutil_socket_t fd, short events)
{
struct event_io_map *io = &base->io;
struct evmap_io *ctx;
struct event *ev;
#ifndef EVMAP_USE_HT
EVUTIL_ASSERT(fd < io->nentries);
#endif
GET_IO_SLOT(ctx, io, fd, evmap_io);
EVUTIL_ASSERT(ctx);
TAILQ_FOREACH(ev, &ctx->events, ev_io_next) {
if (ev->ev_events & events)
event_active_nolock(ev, ev->ev_events & events, 1);
}
}
/* code specific to signals */
static void
evmap_signal_init(struct evmap_signal *entry)
{
TAILQ_INIT(&entry->events);
}
int
evmap_signal_add(struct event_base *base, int sig, struct event *ev)
{
const struct eventop *evsel = base->evsigsel;
struct event_signal_map *map = &base->sigmap;
struct evmap_signal *ctx = NULL;
if (sig >= map->nentries) {
if (evmap_make_space(
map, sig, sizeof(struct evmap_signal *)) == -1)
return (-1);
}
GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
base->evsigsel->fdinfo_len);
if (TAILQ_EMPTY(&ctx->events)) {
if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
== -1)
return (-1);
}
TAILQ_INSERT_TAIL(&ctx->events, ev, ev_signal_next);
return (1);
}
int
evmap_signal_del(struct event_base *base, int sig, struct event *ev)
{
const struct eventop *evsel = base->evsigsel;
struct event_signal_map *map = &base->sigmap;
struct evmap_signal *ctx;
if (sig >= map->nentries)
return (-1);
GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
if (TAILQ_FIRST(&ctx->events) == TAILQ_LAST(&ctx->events, event_list)) {
if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
return (-1);
}
TAILQ_REMOVE(&ctx->events, ev, ev_signal_next);
return (1);
}
void
evmap_signal_active(struct event_base *base, evutil_socket_t sig, int ncalls)
{
struct event_signal_map *map = &base->sigmap;
struct evmap_signal *ctx;
struct event *ev;
EVUTIL_ASSERT(sig < map->nentries);
GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
TAILQ_FOREACH(ev, &ctx->events, ev_signal_next)
event_active_nolock(ev, EV_SIGNAL, ncalls);
}
void *
evmap_io_get_fdinfo(struct event_io_map *map, evutil_socket_t fd)
{
struct evmap_io *ctx;
GET_IO_SLOT(ctx, map, fd, evmap_io);
if (ctx)
return ((char*)ctx) + sizeof(struct evmap_io);
else
return NULL;
}
/** Per-fd structure for use with changelists. It keeps track, for each fd or
* signal using the changelist, of where its entry in the changelist is.
*/
struct event_changelist_fdinfo {
int idxplus1; /* this is the index +1, so that memset(0) will make it
* a no-such-element */
};
void
event_changelist_init(struct event_changelist *changelist)
{
changelist->changes = NULL;
changelist->changes_size = 0;
changelist->n_changes = 0;
}
/** Helper: return the changelist_fdinfo corresponding to a given change. */
static inline struct event_changelist_fdinfo *
event_change_get_fdinfo(struct event_base *base,
const struct event_change *change)
{
char *ptr;
if (change->read_change & EV_CHANGE_SIGNAL) {
struct evmap_signal *ctx;
GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
ptr = ((char*)ctx) + sizeof(struct evmap_signal);
} else {
struct evmap_io *ctx;
GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
ptr = ((char*)ctx) + sizeof(struct evmap_io);
}
return (void*)ptr;
}
#ifdef DEBUG_CHANGELIST
/** Make sure that the changelist is consistent with the evmap structures. */
static void
event_changelist_check(struct event_base *base)
{
int i;
struct event_changelist *changelist = &base->changelist;
EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
for (i = 0; i < changelist->n_changes; ++i) {
struct event_change *c = &changelist->changes[i];
struct event_changelist_fdinfo *f;
EVUTIL_ASSERT(c->fd >= 0);
f = event_change_get_fdinfo(base, c);
EVUTIL_ASSERT(f);
EVUTIL_ASSERT(f->idxplus1 == i + 1);
}
for (i = 0; i < base->io.nentries; ++i) {
struct evmap_io *io = base->io.entries[i];
struct event_changelist_fdinfo *f;
if (!io)
continue;
f = (void*)
( ((char*)io) + sizeof(struct evmap_io) );
if (f->idxplus1) {
struct event_change *c = &changelist->changes[f->idxplus1 - 1];
EVUTIL_ASSERT(c->fd == i);
}
}
}
#else
#define event_changelist_check(base) ((void)0)
#endif
void
event_changelist_remove_all(struct event_changelist *changelist,
struct event_base *base)
{
int i;
event_changelist_check(base);
for (i = 0; i < changelist->n_changes; ++i) {
struct event_change *ch = &changelist->changes[i];
struct event_changelist_fdinfo *fdinfo =
event_change_get_fdinfo(base, ch);
EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
fdinfo->idxplus1 = 0;
}
changelist->n_changes = 0;
event_changelist_check(base);
}
void
event_changelist_freemem(struct event_changelist *changelist)
{
if (changelist->changes)
mm_free(changelist->changes);
event_changelist_init(changelist); /* zero it all out. */
}
/** Increase the size of 'changelist' to hold more changes. */
static int
event_changelist_grow(struct event_changelist *changelist)
{
int new_size;
struct event_change *new_changes;
if (changelist->changes_size < 64)
new_size = 64;
else
new_size = changelist->changes_size * 2;
new_changes = mm_realloc(changelist->changes,
new_size * sizeof(struct event_change));
if (EVUTIL_UNLIKELY(new_changes == NULL))
return (-1);
changelist->changes = new_changes;
changelist->changes_size = new_size;
return (0);
}
/** Return a pointer to the changelist entry for the file descriptor or signal
* 'fd', whose fdinfo is 'fdinfo'. If none exists, construct it, setting its
* old_events field to old_events.
*/
static struct event_change *
event_changelist_get_or_construct(struct event_changelist *changelist,
evutil_socket_t fd,
short old_events,
struct event_changelist_fdinfo *fdinfo)
{
struct event_change *change;
if (fdinfo->idxplus1 == 0) {
int idx;
EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
if (changelist->n_changes == changelist->changes_size) {
if (event_changelist_grow(changelist) < 0)
return NULL;
}
idx = changelist->n_changes++;
change = &changelist->changes[idx];
fdinfo->idxplus1 = idx + 1;
memset(change, 0, sizeof(struct event_change));
change->fd = fd;
change->old_events = old_events;
} else {
change = &changelist->changes[fdinfo->idxplus1 - 1];
EVUTIL_ASSERT(change->fd == fd);
}
return change;
}
int
event_changelist_add(struct event_base *base, evutil_socket_t fd, short old, short events,
void *p)
{
struct event_changelist *changelist = &base->changelist;
struct event_changelist_fdinfo *fdinfo = p;
struct event_change *change;
event_changelist_check(base);
change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
if (!change)
return -1;
/* An add replaces any previous delete, but doesn't result in a no-op,
* since the delete might fail (because the fd had been closed since
* the last add, for instance. */
if (events & (EV_READ|EV_SIGNAL)) {
change->read_change = EV_CHANGE_ADD |
(events & (EV_ET|EV_PERSIST|EV_SIGNAL));
}
if (events & EV_WRITE) {
change->write_change = EV_CHANGE_ADD |
(events & (EV_ET|EV_PERSIST|EV_SIGNAL));
}
event_changelist_check(base);
return (0);
}
int
event_changelist_del(struct event_base *base, evutil_socket_t fd, short old, short events,
void *p)
{
struct event_changelist *changelist = &base->changelist;
struct event_changelist_fdinfo *fdinfo = p;
struct event_change *change;
event_changelist_check(base);
change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
event_changelist_check(base);
if (!change)
return -1;
/* A delete removes any previous add, rather than replacing it:
on those platforms where "add, delete, dispatch" is not the same
as "no-op, dispatch", we want the no-op behavior.
As well as checking the current operation we should also check
the original set of events to make sure were not ignoring
the case where the add operation is present on an event that
was already set.
If we have a no-op item, we could remove it it from the list
entirely, but really there's not much point: skipping the no-op
change when we do the dispatch later is far cheaper than rejuggling
the array now.
As this stands, it also lets through deletions of events that are
not currently set.
*/
if (events & (EV_READ|EV_SIGNAL)) {
if (!(change->old_events & (EV_READ | EV_SIGNAL)) &&
(change->read_change & EV_CHANGE_ADD))
change->read_change = 0;
else
change->read_change = EV_CHANGE_DEL;
}
if (events & EV_WRITE) {
if (!(change->old_events & EV_WRITE) &&
(change->write_change & EV_CHANGE_ADD))
change->write_change = 0;
else
change->write_change = EV_CHANGE_DEL;
}
event_changelist_check(base);
return (0);
}

Просмотреть файл

@ -1,463 +0,0 @@
/*
* Submitted by David Pacheco (dp.spambait@gmail.com)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2007 Sun Microsystems. All rights reserved.
* Use is subject to license terms.
*/
/*
* evport.c: event backend using Solaris 10 event ports. See port_create(3C).
* This implementation is loosely modeled after the one used for select(2) (in
* select.c).
*
* The outstanding events are tracked in a data structure called evport_data.
* Each entry in the ed_fds array corresponds to a file descriptor, and contains
* pointers to the read and write events that correspond to that fd. (That is,
* when the file is readable, the "read" event should handle it, etc.)
*
* evport_add and evport_del update this data structure. evport_dispatch uses it
* to determine where to callback when an event occurs (which it gets from
* port_getn).
*
* Helper functions are used: grow() grows the file descriptor array as
* necessary when large fd's come in. reassociate() takes care of maintaining
* the proper file-descriptor/event-port associations.
*
* As in the select(2) implementation, signals are handled by evsignal.
*/
#include "event2/event-config.h"
#include <sys/time.h>
#include <sys/queue.h>
#include <errno.h>
#include <poll.h>
#include <port.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <event2/thread.h>
#include "evthread-internal.h"
#include "event-internal.h"
#include "log-internal.h"
#include "evsignal-internal.h"
#include "evmap-internal.h"
/*
* Default value for ed_nevents, which is the maximum file descriptor number we
* can handle. If an event comes in for a file descriptor F > nevents, we will
* grow the array of file descriptors, doubling its size.
*/
#define DEFAULT_NFDS 16
/*
* EVENTS_PER_GETN is the maximum number of events to retrieve from port_getn on
* any particular call. You can speed things up by increasing this, but it will
* (obviously) require more memory.
*/
#define EVENTS_PER_GETN 8
/*
* Per-file-descriptor information about what events we're subscribed to. These
* fields are NULL if no event is subscribed to either of them.
*/
struct fd_info {
short fdi_what; /* combinations of EV_READ and EV_WRITE */
};
#define FDI_HAS_READ(fdi) ((fdi)->fdi_what & EV_READ)
#define FDI_HAS_WRITE(fdi) ((fdi)->fdi_what & EV_WRITE)
#define FDI_HAS_EVENTS(fdi) (FDI_HAS_READ(fdi) || FDI_HAS_WRITE(fdi))
#define FDI_TO_SYSEVENTS(fdi) (FDI_HAS_READ(fdi) ? POLLIN : 0) | \
(FDI_HAS_WRITE(fdi) ? POLLOUT : 0)
struct evport_data {
int ed_port; /* event port for system events */
int ed_nevents; /* number of allocated fdi's */
struct fd_info *ed_fds; /* allocated fdi table */
/* fdi's that we need to reassoc */
int ed_pending[EVENTS_PER_GETN]; /* fd's with pending events */
};
static void* evport_init(struct event_base *);
static int evport_add(struct event_base *, int fd, short old, short events, void *);
static int evport_del(struct event_base *, int fd, short old, short events, void *);
static int evport_dispatch(struct event_base *, struct timeval *);
static void evport_dealloc(struct event_base *);
const struct eventop evportops = {
"evport",
evport_init,
evport_add,
evport_del,
evport_dispatch,
evport_dealloc,
1, /* need reinit */
0, /* features */
0, /* fdinfo length */
};
/*
* Initialize the event port implementation.
*/
static void*
evport_init(struct event_base *base)
{
struct evport_data *evpd;
int i;
if (!(evpd = mm_calloc(1, sizeof(struct evport_data))))
return (NULL);
if ((evpd->ed_port = port_create()) == -1) {
mm_free(evpd);
return (NULL);
}
/*
* Initialize file descriptor structure
*/
evpd->ed_fds = mm_calloc(DEFAULT_NFDS, sizeof(struct fd_info));
if (evpd->ed_fds == NULL) {
close(evpd->ed_port);
mm_free(evpd);
return (NULL);
}
evpd->ed_nevents = DEFAULT_NFDS;
for (i = 0; i < EVENTS_PER_GETN; i++)
evpd->ed_pending[i] = -1;
evsig_init(base);
return (evpd);
}
#ifdef CHECK_INVARIANTS
/*
* Checks some basic properties about the evport_data structure. Because it
* checks all file descriptors, this function can be expensive when the maximum
* file descriptor ever used is rather large.
*/
static void
check_evportop(struct evport_data *evpd)
{
EVUTIL_ASSERT(evpd);
EVUTIL_ASSERT(evpd->ed_nevents > 0);
EVUTIL_ASSERT(evpd->ed_port > 0);
EVUTIL_ASSERT(evpd->ed_fds > 0);
}
/*
* Verifies very basic integrity of a given port_event.
*/
static void
check_event(port_event_t* pevt)
{
/*
* We've only registered for PORT_SOURCE_FD events. The only
* other thing we can legitimately receive is PORT_SOURCE_ALERT,
* but since we're not using port_alert either, we can assume
* PORT_SOURCE_FD.
*/
EVUTIL_ASSERT(pevt->portev_source == PORT_SOURCE_FD);
EVUTIL_ASSERT(pevt->portev_user == NULL);
}
#else
#define check_evportop(epop)
#define check_event(pevt)
#endif /* CHECK_INVARIANTS */
/*
* Doubles the size of the allocated file descriptor array.
*/
static int
grow(struct evport_data *epdp, int factor)
{
struct fd_info *tmp;
int oldsize = epdp->ed_nevents;
int newsize = factor * oldsize;
EVUTIL_ASSERT(factor > 1);
check_evportop(epdp);
tmp = mm_realloc(epdp->ed_fds, sizeof(struct fd_info) * newsize);
if (NULL == tmp)
return -1;
epdp->ed_fds = tmp;
memset((char*) (epdp->ed_fds + oldsize), 0,
(newsize - oldsize)*sizeof(struct fd_info));
epdp->ed_nevents = newsize;
check_evportop(epdp);
return 0;
}
/*
* (Re)associates the given file descriptor with the event port. The OS events
* are specified (implicitly) from the fd_info struct.
*/
static int
reassociate(struct evport_data *epdp, struct fd_info *fdip, int fd)
{
int sysevents = FDI_TO_SYSEVENTS(fdip);
if (sysevents != 0) {
if (port_associate(epdp->ed_port, PORT_SOURCE_FD,
fd, sysevents, NULL) == -1) {
event_warn("port_associate");
return (-1);
}
}
check_evportop(epdp);
return (0);
}
/*
* Main event loop - polls port_getn for some number of events, and processes
* them.
*/
static int
evport_dispatch(struct event_base *base, struct timeval *tv)
{
int i, res;
struct evport_data *epdp = base->evbase;
port_event_t pevtlist[EVENTS_PER_GETN];
/*
* port_getn will block until it has at least nevents events. It will
* also return how many it's given us (which may be more than we asked
* for, as long as it's less than our maximum (EVENTS_PER_GETN)) in
* nevents.
*/
int nevents = 1;
/*
* We have to convert a struct timeval to a struct timespec
* (only difference is nanoseconds vs. microseconds). If no time-based
* events are active, we should wait for I/O (and tv == NULL).
*/
struct timespec ts;
struct timespec *ts_p = NULL;
if (tv != NULL) {
ts.tv_sec = tv->tv_sec;
ts.tv_nsec = tv->tv_usec * 1000;
ts_p = &ts;
}
/*
* Before doing anything else, we need to reassociate the events we hit
* last time which need reassociation. See comment at the end of the
* loop below.
*/
for (i = 0; i < EVENTS_PER_GETN; ++i) {
struct fd_info *fdi = NULL;
if (epdp->ed_pending[i] != -1) {
fdi = &(epdp->ed_fds[epdp->ed_pending[i]]);
}
if (fdi != NULL && FDI_HAS_EVENTS(fdi)) {
int fd = epdp->ed_pending[i];
reassociate(epdp, fdi, fd);
epdp->ed_pending[i] = -1;
}
}
EVBASE_RELEASE_LOCK(base, th_base_lock);
res = port_getn(epdp->ed_port, pevtlist, EVENTS_PER_GETN,
(unsigned int *) &nevents, ts_p);
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
if (res == -1) {
if (errno == EINTR || errno == EAGAIN) {
evsig_process(base);
return (0);
} else if (errno == ETIME) {
if (nevents == 0)
return (0);
} else {
event_warn("port_getn");
return (-1);
}
} else if (base->sig.evsig_caught) {
evsig_process(base);
}
event_debug(("%s: port_getn reports %d events", __func__, nevents));
for (i = 0; i < nevents; ++i) {
struct fd_info *fdi;
port_event_t *pevt = &pevtlist[i];
int fd = (int) pevt->portev_object;
check_evportop(epdp);
check_event(pevt);
epdp->ed_pending[i] = fd;
/*
* Figure out what kind of event it was
* (because we have to pass this to the callback)
*/
res = 0;
if (pevt->portev_events & POLLIN)
res |= EV_READ;
if (pevt->portev_events & POLLOUT)
res |= EV_WRITE;
EVUTIL_ASSERT(epdp->ed_nevents > fd);
fdi = &(epdp->ed_fds[fd]);
evmap_io_active(base, fd, res);
} /* end of all events gotten */
check_evportop(epdp);
return (0);
}
/*
* Adds the given event (so that you will be notified when it happens via
* the callback function).
*/
static int
evport_add(struct event_base *base, int fd, short old, short events, void *p)
{
struct evport_data *evpd = base->evbase;
struct fd_info *fdi;
int factor;
(void)p;
check_evportop(evpd);
/*
* If necessary, grow the file descriptor info table
*/
factor = 1;
while (fd >= factor * evpd->ed_nevents)
factor *= 2;
if (factor > 1) {
if (-1 == grow(evpd, factor)) {
return (-1);
}
}
fdi = &evpd->ed_fds[fd];
fdi->fdi_what |= events;
return reassociate(evpd, fdi, fd);
}
/*
* Removes the given event from the list of events to wait for.
*/
static int
evport_del(struct event_base *base, int fd, short old, short events, void *p)
{
struct evport_data *evpd = base->evbase;
struct fd_info *fdi;
int i;
int associated = 1;
(void)p;
check_evportop(evpd);
if (evpd->ed_nevents < fd) {
return (-1);
}
for (i = 0; i < EVENTS_PER_GETN; ++i) {
if (evpd->ed_pending[i] == fd) {
associated = 0;
break;
}
}
fdi = &evpd->ed_fds[fd];
if (events & EV_READ)
fdi->fdi_what &= ~EV_READ;
if (events & EV_WRITE)
fdi->fdi_what &= ~EV_WRITE;
if (associated) {
if (!FDI_HAS_EVENTS(fdi) &&
port_dissociate(evpd->ed_port, PORT_SOURCE_FD, fd) == -1) {
/*
* Ignore EBADFD error the fd could have been closed
* before event_del() was called.
*/
if (errno != EBADFD) {
event_warn("port_dissociate");
return (-1);
}
} else {
if (FDI_HAS_EVENTS(fdi)) {
return (reassociate(evpd, fdi, fd));
}
}
} else {
if ((fdi->fdi_what & (EV_READ|EV_WRITE)) == 0) {
evpd->ed_pending[i] = -1;
}
}
return 0;
}
static void
evport_dealloc(struct event_base *base)
{
struct evport_data *evpd = base->evbase;
evsig_dealloc(base);
close(evpd->ed_port);
if (evpd->ed_fds)
mm_free(evpd->ed_fds);
mm_free(evpd);
}

Просмотреть файл

@ -1,204 +0,0 @@
/*
* Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVRPC_INTERNAL_H_
#define _EVRPC_INTERNAL_H_
#include "http-internal.h"
struct evrpc;
struct evrpc_request_wrapper;
#define EVRPC_URI_PREFIX "/.rpc."
struct evrpc_hook {
TAILQ_ENTRY(evrpc_hook) next;
/* returns EVRPC_TERMINATE; if the rpc should be aborted.
* a hook is is allowed to rewrite the evbuffer
*/
int (*process)(void *, struct evhttp_request *,
struct evbuffer *, void *);
void *process_arg;
};
TAILQ_HEAD(evrpc_hook_list, evrpc_hook);
/*
* this is shared between the base and the pool, so that we can reuse
* the hook adding functions; we alias both evrpc_pool and evrpc_base
* to this common structure.
*/
struct evrpc_hook_ctx;
TAILQ_HEAD(evrpc_pause_list, evrpc_hook_ctx);
struct _evrpc_hooks {
/* hooks for processing outbound and inbound rpcs */
struct evrpc_hook_list in_hooks;
struct evrpc_hook_list out_hooks;
struct evrpc_pause_list pause_requests;
};
#define input_hooks common.in_hooks
#define output_hooks common.out_hooks
#define paused_requests common.pause_requests
struct evrpc_base {
struct _evrpc_hooks common;
/* the HTTP server under which we register our RPC calls */
struct evhttp* http_server;
/* a list of all RPCs registered with us */
TAILQ_HEAD(evrpc_list, evrpc) registered_rpcs;
};
struct evrpc_req_generic;
void evrpc_reqstate_free(struct evrpc_req_generic* rpc_state);
/* A pool for holding evhttp_connection objects */
struct evrpc_pool {
struct _evrpc_hooks common;
struct event_base *base;
struct evconq connections;
int timeout;
TAILQ_HEAD(evrpc_requestq, evrpc_request_wrapper) (requests);
};
struct evrpc_hook_ctx {
TAILQ_ENTRY(evrpc_hook_ctx) next;
void *ctx;
void (*cb)(void *, enum EVRPC_HOOK_RESULT);
};
struct evrpc_meta {
TAILQ_ENTRY(evrpc_meta) next;
char *key;
void *data;
size_t data_size;
};
TAILQ_HEAD(evrpc_meta_list, evrpc_meta);
struct evrpc_hook_meta {
struct evrpc_meta_list meta_data;
struct evhttp_connection *evcon;
};
/* allows association of meta data with a request */
static void evrpc_hook_associate_meta(struct evrpc_hook_meta **pctx,
struct evhttp_connection *evcon);
/* creates a new meta data store */
static struct evrpc_hook_meta *evrpc_hook_meta_new(void);
/* frees the meta data associated with a request */
static void evrpc_hook_context_free(struct evrpc_hook_meta *ctx);
/* the server side of an rpc */
/* We alias the RPC specific structs to this voided one */
struct evrpc_req_generic {
/*
* allows association of meta data via hooks - needs to be
* synchronized with evrpc_request_wrapper
*/
struct evrpc_hook_meta *hook_meta;
/* the unmarshaled request object */
void *request;
/* the empty reply object that needs to be filled in */
void *reply;
/*
* the static structure for this rpc; that can be used to
* automatically unmarshal and marshal the http buffers.
*/
struct evrpc *rpc;
/*
* the http request structure on which we need to answer.
*/
struct evhttp_request* http_req;
/*
* Temporary data store for marshaled data
*/
struct evbuffer* rpc_data;
};
/* the client side of an rpc request */
struct evrpc_request_wrapper {
/*
* allows association of meta data via hooks - needs to be
* synchronized with evrpc_req_generic.
*/
struct evrpc_hook_meta *hook_meta;
TAILQ_ENTRY(evrpc_request_wrapper) next;
/* pool on which this rpc request is being made */
struct evrpc_pool *pool;
/* connection on which the request is being sent */
struct evhttp_connection *evcon;
/* the actual request */
struct evhttp_request *req;
/* event for implementing request timeouts */
struct event ev_timeout;
/* the name of the rpc */
char *name;
/* callback */
void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg);
void *cb_arg;
void *request;
void *reply;
/* unmarshals the buffer into the proper request structure */
void (*request_marshal)(struct evbuffer *, void *);
/* removes all stored state in the reply */
void (*reply_clear)(void *);
/* marshals the reply into a buffer */
int (*reply_unmarshal)(void *, struct evbuffer*);
};
#endif /* _EVRPC_INTERNAL_H_ */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,35 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVRPC_H_
#define _EVRPC_H_
#include <event.h>
#include <event2/rpc.h>
#include <event2/rpc_struct.h>
#include <event2/rpc_compat.h>
#endif /* _EVRPC_H_ */

Просмотреть файл

@ -1,54 +0,0 @@
/*
* Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVSIGNAL_H_
#define _EVSIGNAL_H_
#ifndef evutil_socket_t
#include "event2/util.h"
#endif
#include <signal.h>
typedef void (*ev_sighandler_t)(int);
struct evsig_info {
struct event ev_signal;
evutil_socket_t ev_signal_pair[2];
int ev_signal_added;
volatile sig_atomic_t evsig_caught;
sig_atomic_t evsigcaught[NSIG];
#ifdef _EVENT_HAVE_SIGACTION
struct sigaction **sh_old;
#else
ev_sighandler_t **sh_old;
#endif
int sh_old_max;
};
int evsig_init(struct event_base *);
void evsig_process(struct event_base *);
void evsig_dealloc(struct event_base *);
#endif /* _EVSIGNAL_H_ */

Просмотреть файл

@ -1,354 +0,0 @@
/*
* Copyright (c) 2008-2010 Niels Provos, Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVTHREAD_INTERNAL_H_
#define _EVTHREAD_INTERNAL_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/thread.h>
#include "event2/event-config.h"
#include "util-internal.h"
struct event_base;
#ifndef WIN32
/* On Windows, the way we currently make DLLs, it's not allowed for us to
* have shared global structures. Thus, we only do the direct-call-to-function
* code path if we know that the local shared library system supports it.
*/
#define EVTHREAD_EXPOSE_STRUCTS
#endif
#if ! defined(_EVENT_DISABLE_THREAD_SUPPORT) && defined(EVTHREAD_EXPOSE_STRUCTS)
/* Global function pointers to lock-related functions. NULL if locking isn't
enabled. */
extern struct evthread_lock_callbacks _evthread_lock_fns;
extern struct evthread_condition_callbacks _evthread_cond_fns;
extern unsigned long (*_evthread_id_fn)(void);
extern int _evthread_lock_debugging_enabled;
/** Return the ID of the current thread, or 1 if threading isn't enabled. */
#define EVTHREAD_GET_ID() \
(_evthread_id_fn ? _evthread_id_fn() : 1)
/** Return true iff we're in the thread that is currently (or most recently)
* running a given event_base's loop. Requires lock. */
#define EVBASE_IN_THREAD(base) \
(_evthread_id_fn == NULL || \
(base)->th_owner_id == _evthread_id_fn())
/** Return true iff we need to notify the base's main thread about changes to
* its state, because it's currently running the main loop in another
* thread. Requires lock. */
#define EVBASE_NEED_NOTIFY(base) \
(_evthread_id_fn != NULL && \
(base)->running_loop && \
(base)->th_owner_id != _evthread_id_fn())
/** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to
NULL if locking is not enabled. */
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
((lockvar) = _evthread_lock_fns.alloc ? \
_evthread_lock_fns.alloc(locktype) : NULL)
/** Free a given lock, if it is present and locking is enabled. */
#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
do { \
void *_lock_tmp_ = (lockvar); \
if (_lock_tmp_ && _evthread_lock_fns.free) \
_evthread_lock_fns.free(_lock_tmp_, (locktype)); \
} while (0)
/** Acquire a lock. */
#define EVLOCK_LOCK(lockvar,mode) \
do { \
if (lockvar) \
_evthread_lock_fns.lock(mode, lockvar); \
} while (0)
/** Release a lock */
#define EVLOCK_UNLOCK(lockvar,mode) \
do { \
if (lockvar) \
_evthread_lock_fns.unlock(mode, lockvar); \
} while (0)
/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
#define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \
do { \
if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
void *tmp = lockvar1; \
lockvar1 = lockvar2; \
lockvar2 = tmp; \
} \
} while (0)
/** Lock an event_base, if it is set up for locking. Acquires the lock
in the base structure whose field is named 'lockvar'. */
#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
EVLOCK_LOCK((base)->lockvar, 0); \
} while (0)
/** Unlock an event_base, if it is set up for locking. */
#define EVBASE_RELEASE_LOCK(base, lockvar) do { \
EVLOCK_UNLOCK((base)->lockvar, 0); \
} while (0)
/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
* locked and held by us. */
#define EVLOCK_ASSERT_LOCKED(lock) \
do { \
if ((lock) && _evthread_lock_debugging_enabled) { \
EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
} \
} while (0)
/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
* manage to get it. */
static inline int EVLOCK_TRY_LOCK(void *lock);
static inline int
EVLOCK_TRY_LOCK(void *lock)
{
if (lock && _evthread_lock_fns.lock) {
int r = _evthread_lock_fns.lock(EVTHREAD_TRY, lock);
return !r;
} else {
/* Locking is disabled either globally or for this thing;
* of course we count as having the lock. */
return 1;
}
}
/** Allocate a new condition variable and store it in the void *, condvar */
#define EVTHREAD_ALLOC_COND(condvar) \
do { \
(condvar) = _evthread_cond_fns.alloc_condition ? \
_evthread_cond_fns.alloc_condition(0) : NULL; \
} while (0)
/** Deallocate and free a condition variable in condvar */
#define EVTHREAD_FREE_COND(cond) \
do { \
if (cond) \
_evthread_cond_fns.free_condition((cond)); \
} while (0)
/** Signal one thread waiting on cond */
#define EVTHREAD_COND_SIGNAL(cond) \
( (cond) ? _evthread_cond_fns.signal_condition((cond), 0) : 0 )
/** Signal all threads waiting on cond */
#define EVTHREAD_COND_BROADCAST(cond) \
( (cond) ? _evthread_cond_fns.signal_condition((cond), 1) : 0 )
/** Wait until the condition 'cond' is signalled. Must be called while
* holding 'lock'. The lock will be released until the condition is
* signalled, at which point it will be acquired again. Returns 0 for
* success, -1 for failure. */
#define EVTHREAD_COND_WAIT(cond, lock) \
( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), NULL) : 0 )
/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
* on timeout. */
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
( (cond) ? _evthread_cond_fns.wait_condition((cond), (lock), (tv)) : 0 )
#elif ! defined(_EVENT_DISABLE_THREAD_SUPPORT)
unsigned long _evthreadimpl_get_id(void);
int _evthreadimpl_is_lock_debugging_enabled(void);
void *_evthreadimpl_lock_alloc(unsigned locktype);
void _evthreadimpl_lock_free(void *lock, unsigned locktype);
int _evthreadimpl_lock_lock(unsigned mode, void *lock);
int _evthreadimpl_lock_unlock(unsigned mode, void *lock);
void *_evthreadimpl_cond_alloc(unsigned condtype);
void _evthreadimpl_cond_free(void *cond);
int _evthreadimpl_cond_signal(void *cond, int broadcast);
int _evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv);
#define EVTHREAD_GET_ID() _evthreadimpl_get_id()
#define EVBASE_IN_THREAD(base) \
((base)->th_owner_id == _evthreadimpl_get_id())
#define EVBASE_NEED_NOTIFY(base) \
((base)->running_loop && \
((base)->th_owner_id != _evthreadimpl_get_id()))
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) \
((lockvar) = _evthreadimpl_lock_alloc(locktype))
#define EVTHREAD_FREE_LOCK(lockvar, locktype) \
do { \
void *_lock_tmp_ = (lockvar); \
if (_lock_tmp_) \
_evthreadimpl_lock_free(_lock_tmp_, (locktype)); \
} while (0)
/** Acquire a lock. */
#define EVLOCK_LOCK(lockvar,mode) \
do { \
if (lockvar) \
_evthreadimpl_lock_lock(mode, lockvar); \
} while (0)
/** Release a lock */
#define EVLOCK_UNLOCK(lockvar,mode) \
do { \
if (lockvar) \
_evthreadimpl_lock_unlock(mode, lockvar); \
} while (0)
/** Lock an event_base, if it is set up for locking. Acquires the lock
in the base structure whose field is named 'lockvar'. */
#define EVBASE_ACQUIRE_LOCK(base, lockvar) do { \
EVLOCK_LOCK((base)->lockvar, 0); \
} while (0)
/** Unlock an event_base, if it is set up for locking. */
#define EVBASE_RELEASE_LOCK(base, lockvar) do { \
EVLOCK_UNLOCK((base)->lockvar, 0); \
} while (0)
/** If lock debugging is enabled, and lock is non-null, assert that 'lock' is
* locked and held by us. */
#define EVLOCK_ASSERT_LOCKED(lock) \
do { \
if ((lock) && _evthreadimpl_is_lock_debugging_enabled()) { \
EVUTIL_ASSERT(_evthread_is_debug_lock_held(lock)); \
} \
} while (0)
/** Try to grab the lock for 'lockvar' without blocking, and return 1 if we
* manage to get it. */
static inline int EVLOCK_TRY_LOCK(void *lock);
static inline int
EVLOCK_TRY_LOCK(void *lock)
{
if (lock) {
int r = _evthreadimpl_lock_lock(EVTHREAD_TRY, lock);
return !r;
} else {
/* Locking is disabled either globally or for this thing;
* of course we count as having the lock. */
return 1;
}
}
/** Allocate a new condition variable and store it in the void *, condvar */
#define EVTHREAD_ALLOC_COND(condvar) \
do { \
(condvar) = _evthreadimpl_cond_alloc(0); \
} while (0)
/** Deallocate and free a condition variable in condvar */
#define EVTHREAD_FREE_COND(cond) \
do { \
if (cond) \
_evthreadimpl_cond_free((cond)); \
} while (0)
/** Signal one thread waiting on cond */
#define EVTHREAD_COND_SIGNAL(cond) \
( (cond) ? _evthreadimpl_cond_signal((cond), 0) : 0 )
/** Signal all threads waiting on cond */
#define EVTHREAD_COND_BROADCAST(cond) \
( (cond) ? _evthreadimpl_cond_signal((cond), 1) : 0 )
/** Wait until the condition 'cond' is signalled. Must be called while
* holding 'lock'. The lock will be released until the condition is
* signalled, at which point it will be acquired again. Returns 0 for
* success, -1 for failure. */
#define EVTHREAD_COND_WAIT(cond, lock) \
( (cond) ? _evthreadimpl_cond_wait((cond), (lock), NULL) : 0 )
/** As EVTHREAD_COND_WAIT, but gives up after 'tv' has elapsed. Returns 1
* on timeout. */
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, tv) \
( (cond) ? _evthreadimpl_cond_wait((cond), (lock), (tv)) : 0 )
#else /* _EVENT_DISABLE_THREAD_SUPPORT */
#define EVTHREAD_GET_ID() 1
#define EVTHREAD_ALLOC_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT
#define EVTHREAD_FREE_LOCK(lockvar, locktype) _EVUTIL_NIL_STMT
#define EVLOCK_LOCK(lockvar, mode) _EVUTIL_NIL_STMT
#define EVLOCK_UNLOCK(lockvar, mode) _EVUTIL_NIL_STMT
#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT
#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) _EVUTIL_NIL_STMT
#define EVBASE_IN_THREAD(base) 1
#define EVBASE_NEED_NOTIFY(base) 0
#define EVBASE_ACQUIRE_LOCK(base, lock) _EVUTIL_NIL_STMT
#define EVBASE_RELEASE_LOCK(base, lock) _EVUTIL_NIL_STMT
#define EVLOCK_ASSERT_LOCKED(lock) _EVUTIL_NIL_STMT
#define EVLOCK_TRY_LOCK(lock) 1
#define EVTHREAD_ALLOC_COND(condvar) _EVUTIL_NIL_STMT
#define EVTHREAD_FREE_COND(cond) _EVUTIL_NIL_STMT
#define EVTHREAD_COND_SIGNAL(cond) _EVUTIL_NIL_STMT
#define EVTHREAD_COND_BROADCAST(cond) _EVUTIL_NIL_STMT
#define EVTHREAD_COND_WAIT(cond, lock) _EVUTIL_NIL_STMT
#define EVTHREAD_COND_WAIT_TIMED(cond, lock, howlong) _EVUTIL_NIL_STMT
#endif
/* This code is shared between both lock impls */
#if ! defined(_EVENT_DISABLE_THREAD_SUPPORT)
/** Helper: put lockvar1 and lockvar2 into pointerwise ascending order. */
#define _EVLOCK_SORTLOCKS(lockvar1, lockvar2) \
do { \
if (lockvar1 && lockvar2 && lockvar1 > lockvar2) { \
void *tmp = lockvar1; \
lockvar1 = lockvar2; \
lockvar2 = tmp; \
} \
} while (0)
/** Acquire both lock1 and lock2. Always allocates locks in the same order,
* so that two threads locking two locks with LOCK2 will not deadlock. */
#define EVLOCK_LOCK2(lock1,lock2,mode1,mode2) \
do { \
void *_lock1_tmplock = (lock1); \
void *_lock2_tmplock = (lock2); \
_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
EVLOCK_LOCK(_lock1_tmplock,mode1); \
if (_lock2_tmplock != _lock1_tmplock) \
EVLOCK_LOCK(_lock2_tmplock,mode2); \
} while (0)
/** Release both lock1 and lock2. */
#define EVLOCK_UNLOCK2(lock1,lock2,mode1,mode2) \
do { \
void *_lock1_tmplock = (lock1); \
void *_lock2_tmplock = (lock2); \
_EVLOCK_SORTLOCKS(_lock1_tmplock,_lock2_tmplock); \
if (_lock2_tmplock != _lock1_tmplock) \
EVLOCK_UNLOCK(_lock2_tmplock,mode2); \
EVLOCK_UNLOCK(_lock1_tmplock,mode1); \
} while (0)
int _evthread_is_debug_lock_held(void *lock);
void *_evthread_debug_get_real_lock(void *lock);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _EVTHREAD_INTERNAL_H_ */

Просмотреть файл

@ -1,341 +0,0 @@
/*
* Copyright (c) 2008-2010 Niels Provos, Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
#include <event2/thread.h>
#include <stdlib.h>
#include <string.h>
#include "log-internal.h"
#include "mm-internal.h"
#include "util-internal.h"
#include "evthread-internal.h"
#ifdef EVTHREAD_EXPOSE_STRUCTS
#define GLOBAL
#else
#define GLOBAL static
#endif
/* globals */
GLOBAL int _evthread_lock_debugging_enabled = 0;
GLOBAL struct evthread_lock_callbacks _evthread_lock_fns = {
0, 0, NULL, NULL, NULL, NULL
};
GLOBAL unsigned long (*_evthread_id_fn)(void) = NULL;
GLOBAL struct evthread_condition_callbacks _evthread_cond_fns = {
0, NULL, NULL, NULL, NULL
};
/* Used for debugging */
static struct evthread_lock_callbacks _original_lock_fns = {
0, 0, NULL, NULL, NULL, NULL
};
static struct evthread_condition_callbacks _original_cond_fns = {
0, NULL, NULL, NULL, NULL
};
void
evthread_set_id_callback(unsigned long (*id_fn)(void))
{
_evthread_id_fn = id_fn;
}
int
evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
{
struct evthread_lock_callbacks *target =
_evthread_lock_debugging_enabled
? &_original_lock_fns : &_evthread_lock_fns;
if (!cbs) {
memset(target, 0, sizeof(_evthread_lock_fns));
return 0;
}
if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
memcpy(target, cbs, sizeof(_evthread_lock_fns));
return 0;
} else {
return -1;
}
}
int
evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
{
struct evthread_condition_callbacks *target =
_evthread_lock_debugging_enabled
? &_original_cond_fns : &_evthread_cond_fns;
if (!cbs) {
memset(target, 0, sizeof(_evthread_cond_fns));
} else if (cbs->alloc_condition && cbs->free_condition &&
cbs->signal_condition && cbs->wait_condition) {
memcpy(target, cbs, sizeof(_evthread_cond_fns));
}
if (_evthread_lock_debugging_enabled) {
_evthread_cond_fns.alloc_condition = cbs->alloc_condition;
_evthread_cond_fns.free_condition = cbs->free_condition;
_evthread_cond_fns.signal_condition = cbs->signal_condition;
}
return 0;
}
struct debug_lock {
unsigned locktype;
unsigned long held_by;
/* XXXX if we ever use read-write locks, we will need a separate
* lock to protect count. */
int count;
void *lock;
};
static void *
debug_lock_alloc(unsigned locktype)
{
struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
if (!result)
return NULL;
if (_original_lock_fns.alloc) {
if (!(result->lock = _original_lock_fns.alloc(
locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
mm_free(result);
return NULL;
}
} else {
result->lock = NULL;
}
result->locktype = locktype;
result->count = 0;
result->held_by = 0;
return result;
}
static void
debug_lock_free(void *lock_, unsigned locktype)
{
struct debug_lock *lock = lock_;
EVUTIL_ASSERT(lock->count == 0);
EVUTIL_ASSERT(locktype == lock->locktype);
if (_original_lock_fns.free) {
_original_lock_fns.free(lock->lock,
lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
}
lock->lock = NULL;
lock->count = -100;
mm_free(lock);
}
static void
evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
{
++lock->count;
if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
EVUTIL_ASSERT(lock->count == 1);
if (_evthread_id_fn) {
unsigned long me;
me = _evthread_id_fn();
if (lock->count > 1)
EVUTIL_ASSERT(lock->held_by == me);
lock->held_by = me;
}
}
static int
debug_lock_lock(unsigned mode, void *lock_)
{
struct debug_lock *lock = lock_;
int res = 0;
if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
else
EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
if (_original_lock_fns.lock)
res = _original_lock_fns.lock(mode, lock->lock);
if (!res) {
evthread_debug_lock_mark_locked(mode, lock);
}
return res;
}
static void
evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
{
if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
else
EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
if (_evthread_id_fn) {
EVUTIL_ASSERT(lock->held_by == _evthread_id_fn());
if (lock->count == 1)
lock->held_by = 0;
}
--lock->count;
EVUTIL_ASSERT(lock->count >= 0);
}
static int
debug_lock_unlock(unsigned mode, void *lock_)
{
struct debug_lock *lock = lock_;
int res = 0;
evthread_debug_lock_mark_unlocked(mode, lock);
if (_original_lock_fns.unlock)
res = _original_lock_fns.unlock(mode, lock->lock);
return res;
}
static int
debug_cond_wait(void *_cond, void *_lock, const struct timeval *tv)
{
int r;
struct debug_lock *lock = _lock;
EVLOCK_ASSERT_LOCKED(_lock);
evthread_debug_lock_mark_unlocked(0, lock);
r = _original_cond_fns.wait_condition(_cond, lock->lock, tv);
evthread_debug_lock_mark_locked(0, lock);
return r;
}
void
evthread_enable_lock_debuging(void)
{
struct evthread_lock_callbacks cbs = {
EVTHREAD_LOCK_API_VERSION,
EVTHREAD_LOCKTYPE_RECURSIVE,
debug_lock_alloc,
debug_lock_free,
debug_lock_lock,
debug_lock_unlock
};
if (_evthread_lock_debugging_enabled)
return;
memcpy(&_original_lock_fns, &_evthread_lock_fns,
sizeof(struct evthread_lock_callbacks));
memcpy(&_evthread_lock_fns, &cbs,
sizeof(struct evthread_lock_callbacks));
memcpy(&_original_cond_fns, &_evthread_cond_fns,
sizeof(struct evthread_condition_callbacks));
_evthread_cond_fns.wait_condition = debug_cond_wait;
_evthread_lock_debugging_enabled = 1;
}
int
_evthread_is_debug_lock_held(void *lock_)
{
struct debug_lock *lock = lock_;
if (! lock->count)
return 0;
if (_evthread_id_fn) {
unsigned long me = _evthread_id_fn();
if (lock->held_by != me)
return 0;
}
return 1;
}
void *
_evthread_debug_get_real_lock(void *lock_)
{
struct debug_lock *lock = lock_;
return lock->lock;
}
#ifndef EVTHREAD_EXPOSE_STRUCTS
unsigned long
_evthreadimpl_get_id()
{
return _evthread_id_fn ? _evthread_id_fn() : 1;
}
void *
_evthreadimpl_lock_alloc(unsigned locktype)
{
return _evthread_lock_fns.alloc ?
_evthread_lock_fns.alloc(locktype) : NULL;
}
void
_evthreadimpl_lock_free(void *lock, unsigned locktype)
{
if (_evthread_lock_fns.free)
_evthread_lock_fns.free(lock, locktype);
}
int
_evthreadimpl_lock_lock(unsigned mode, void *lock)
{
if (_evthread_lock_fns.lock)
return _evthread_lock_fns.lock(mode, lock);
else
return 0;
}
int
_evthreadimpl_lock_unlock(unsigned mode, void *lock)
{
if (_evthread_lock_fns.unlock)
return _evthread_lock_fns.unlock(mode, lock);
else
return 0;
}
void *
_evthreadimpl_cond_alloc(unsigned condtype)
{
return _evthread_cond_fns.alloc_condition ?
_evthread_cond_fns.alloc_condition(condtype) : NULL;
}
void
_evthreadimpl_cond_free(void *cond)
{
if (_evthread_cond_fns.free_condition)
_evthread_cond_fns.free_condition(cond);
}
int
_evthreadimpl_cond_signal(void *cond, int broadcast)
{
if (_evthread_cond_fns.signal_condition)
return _evthread_cond_fns.signal_condition(cond, broadcast);
else
return 0;
}
int
_evthreadimpl_cond_wait(void *cond, void *lock, const struct timeval *tv)
{
if (_evthread_cond_fns.wait_condition)
return _evthread_cond_fns.wait_condition(cond, lock, tv);
else
return 0;
}
int
_evthreadimpl_is_lock_debugging_enabled(void)
{
return _evthread_lock_debugging_enabled;
}
#endif
#endif

Просмотреть файл

@ -1,181 +0,0 @@
/*
* Copyright 2009-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
/* With glibc we need to define this to get PTHREAD_MUTEX_RECURSIVE. */
#define _GNU_SOURCE
#include <pthread.h>
struct event_base;
#include <event2/thread.h>
#include <stdlib.h>
#include "mm-internal.h"
#include "evthread-internal.h"
static pthread_mutexattr_t attr_recursive;
static void *
evthread_posix_lock_alloc(unsigned locktype)
{
pthread_mutexattr_t *attr = NULL;
pthread_mutex_t *lock = mm_malloc(sizeof(pthread_mutex_t));
if (!lock)
return NULL;
if (locktype & EVTHREAD_LOCKTYPE_RECURSIVE)
attr = &attr_recursive;
if (pthread_mutex_init(lock, attr)) {
mm_free(lock);
return NULL;
}
return lock;
}
static void
evthread_posix_lock_free(void *_lock, unsigned locktype)
{
pthread_mutex_t *lock = _lock;
pthread_mutex_destroy(lock);
mm_free(lock);
}
static int
evthread_posix_lock(unsigned mode, void *_lock)
{
pthread_mutex_t *lock = _lock;
if (mode & EVTHREAD_TRY)
return pthread_mutex_trylock(lock);
else
return pthread_mutex_lock(lock);
}
static int
evthread_posix_unlock(unsigned mode, void *_lock)
{
pthread_mutex_t *lock = _lock;
return pthread_mutex_unlock(lock);
}
static unsigned long
evthread_posix_get_id(void)
{
union {
pthread_t thr;
unsigned long id;
} r;
r.thr = pthread_self();
return r.id;
}
static void *
evthread_posix_cond_alloc(unsigned condflags)
{
pthread_cond_t *cond = mm_malloc(sizeof(pthread_cond_t));
if (!cond)
return NULL;
if (pthread_cond_init(cond, NULL)) {
mm_free(cond);
return NULL;
}
return cond;
}
static void
evthread_posix_cond_free(void *_cond)
{
pthread_cond_t *cond = _cond;
pthread_cond_destroy(cond);
mm_free(cond);
}
static int
evthread_posix_cond_signal(void *_cond, int broadcast)
{
pthread_cond_t *cond = _cond;
int r;
if (broadcast)
r = pthread_cond_broadcast(cond);
else
r = pthread_cond_signal(cond);
return r ? -1 : 0;
}
static int
evthread_posix_cond_wait(void *_cond, void *_lock, const struct timeval *tv)
{
int r;
pthread_cond_t *cond = _cond;
pthread_mutex_t *lock = _lock;
if (tv) {
struct timeval now, abstime;
struct timespec ts;
evutil_gettimeofday(&now, NULL);
evutil_timeradd(&now, tv, &abstime);
ts.tv_sec = abstime.tv_sec;
ts.tv_nsec = abstime.tv_usec*1000;
r = pthread_cond_timedwait(cond, lock, &ts);
if (r == ETIMEDOUT)
return 1;
else if (r)
return -1;
else
return 0;
} else {
r = pthread_cond_wait(cond, lock);
return r ? -1 : 0;
}
}
int
evthread_use_pthreads(void)
{
struct evthread_lock_callbacks cbs = {
EVTHREAD_LOCK_API_VERSION,
EVTHREAD_LOCKTYPE_RECURSIVE,
evthread_posix_lock_alloc,
evthread_posix_lock_free,
evthread_posix_lock,
evthread_posix_unlock
};
struct evthread_condition_callbacks cond_cbs = {
EVTHREAD_CONDITION_API_VERSION,
evthread_posix_cond_alloc,
evthread_posix_cond_free,
evthread_posix_cond_signal,
evthread_posix_cond_wait
};
/* Set ourselves up to get recursive locks. */
if (pthread_mutexattr_init(&attr_recursive))
return -1;
if (pthread_mutexattr_settype(&attr_recursive, PTHREAD_MUTEX_RECURSIVE))
return -1;
evthread_set_lock_callbacks(&cbs);
evthread_set_condition_callbacks(&cond_cbs);
evthread_set_id_callback(evthread_posix_get_id);
return 0;
}

Просмотреть файл

@ -1,335 +0,0 @@
/*
* Copyright 2009-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#ifdef WIN32
#include <winsock2.h>
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#include <sys/locking.h>
#endif
struct event_base;
#include <event2/thread.h>
#include "mm-internal.h"
#include "evthread-internal.h"
#define SPIN_COUNT 2000
static void *
evthread_win32_lock_create(unsigned locktype)
{
CRITICAL_SECTION *lock = mm_malloc(sizeof(CRITICAL_SECTION));
if (!lock)
return NULL;
if (InitializeCriticalSectionAndSpinCount(lock, SPIN_COUNT) == 0) {
mm_free(lock);
return NULL;
}
return lock;
}
static void
evthread_win32_lock_free(void *_lock, unsigned locktype)
{
CRITICAL_SECTION *lock = _lock;
DeleteCriticalSection(lock);
mm_free(lock);
}
static int
evthread_win32_lock(unsigned mode, void *_lock)
{
CRITICAL_SECTION *lock = _lock;
if ((mode & EVTHREAD_TRY)) {
return ! TryEnterCriticalSection(lock);
} else {
EnterCriticalSection(lock);
return 0;
}
}
static int
evthread_win32_unlock(unsigned mode, void *_lock)
{
CRITICAL_SECTION *lock = _lock;
LeaveCriticalSection(lock);
return 0;
}
static unsigned long
evthread_win32_get_id(void)
{
return (unsigned long) GetCurrentThreadId();
}
#ifdef WIN32_HAVE_CONDITION_VARIABLES
static void WINAPI (*InitializeConditionVariable_fn)(PCONDITION_VARIABLE)
= NULL;
static BOOL WINAPI (*SleepConditionVariableCS_fn)(
PCONDITION_VARIABLE, PCRITICAL_SECTION, DWORD) = NULL;
static void WINAPI (*WakeAllConditionVariable_fn)(PCONDITION_VARIABLE) = NULL;
static void WINAPI (*WakeConditionVariable_fn)(PCONDITION_VARIABLE) = NULL;
static int
evthread_win32_condvar_init(void)
{
HANDLE lib;
lib = GetModuleHandle(TEXT("kernel32.dll"));
if (lib == NULL)
return 0;
#define LOAD(name) \
name##_fn = GetProcAddress(lib, #name)
LOAD(InitializeConditionVariable);
LOAD(SleepConditionVariable);
LOAD(WakeAllConditionVariable);
LOAD(WakeConditionVariable);
return InitializeConditionVariable_fn && SleepConditionVariableCS_fn &&
WakeAllConditionVariable_fn && WakeConditionVariable_fn;
}
/* XXXX Even if we can build this, we don't necessarily want to: the functions
* in question didn't exist before Vista, so we'd better LoadProc them. */
static void *
evthread_win32_condvar_alloc(unsigned condflags)
{
CONDITION_VARIABLE *cond = mm_malloc(sizeof(CONDITION_VARIABLE));
if (!cond)
return NULL;
InitializeConditionVariable_fn(cond);
return cond;
}
static void
evthread_win32_condvar_free(void *_cond)
{
CONDITION_VARIABLE *cond = _cond;
/* There doesn't _seem_ to be a cleaup fn here... */
mm_free(cond);
}
static int
evthread_win32_condvar_signal(void *_cond, int broadcast)
{
CONDITION_VARIABLE *cond = _cond;
if (broadcast)
WakeAllConditionVariable_fn(cond);
else
WakeConditionVariable_fn(cond);
return 0;
}
static int
evthread_win32_condvar_wait(void *_cond, void *_lock, const struct timeval *tv)
{
CONDITION_VARIABLE *cond = _cond;
CRITICAL_SECTION *lock = _lock;
DWORD ms, err;
BOOL result;
if (tv)
ms = evutil_tv_to_msec(tv);
else
ms = INFINITE;
result = SleepConditionVariableCS_fn(cond, lock, ms);
if (result) {
if (GetLastError() == WAIT_TIMEOUT)
return 1;
else
return -1;
} else {
return 0;
}
}
#endif
struct evthread_win32_cond {
HANDLE event;
CRITICAL_SECTION lock;
int n_waiting;
int n_to_wake;
int generation;
};
static void *
evthread_win32_cond_alloc(unsigned flags)
{
struct evthread_win32_cond *cond;
if (!(cond = mm_malloc(sizeof(struct evthread_win32_cond))))
return NULL;
if (InitializeCriticalSectionAndSpinCount(&cond->lock, SPIN_COUNT)==0) {
mm_free(cond);
return NULL;
}
if ((cond->event = CreateEvent(NULL,TRUE,FALSE,NULL)) == NULL) {
DeleteCriticalSection(&cond->lock);
mm_free(cond);
return NULL;
}
cond->n_waiting = cond->n_to_wake = cond->generation = 0;
return cond;
}
static void
evthread_win32_cond_free(void *_cond)
{
struct evthread_win32_cond *cond = _cond;
DeleteCriticalSection(&cond->lock);
CloseHandle(cond->event);
mm_free(cond);
}
static int
evthread_win32_cond_signal(void *_cond, int broadcast)
{
struct evthread_win32_cond *cond = _cond;
EnterCriticalSection(&cond->lock);
if (broadcast)
cond->n_to_wake = cond->n_waiting;
else
++cond->n_to_wake;
cond->generation++;
SetEvent(cond->event);
LeaveCriticalSection(&cond->lock);
return 0;
}
static int
evthread_win32_cond_wait(void *_cond, void *_lock, const struct timeval *tv)
{
struct evthread_win32_cond *cond = _cond;
CRITICAL_SECTION *lock = _lock;
int generation_at_start;
int waiting = 1;
int result = -1;
DWORD ms = INFINITE, ms_orig = INFINITE, startTime, endTime;
if (tv)
ms_orig = ms = evutil_tv_to_msec(tv);
EnterCriticalSection(&cond->lock);
++cond->n_waiting;
generation_at_start = cond->generation;
LeaveCriticalSection(&cond->lock);
LeaveCriticalSection(lock);
startTime = GetTickCount();
do {
DWORD res;
res = WaitForSingleObject(cond->event, ms);
EnterCriticalSection(&cond->lock);
if (cond->n_to_wake &&
cond->generation != generation_at_start) {
--cond->n_to_wake;
--cond->n_waiting;
result = 0;
waiting = 0;
goto out;
} else if (res != WAIT_OBJECT_0) {
result = (res==WAIT_TIMEOUT) ? 1 : -1;
--cond->n_waiting;
waiting = 0;
goto out;
} else if (ms != INFINITE) {
endTime = GetTickCount();
if (startTime + ms_orig <= endTime) {
result = 1; /* Timeout */
--cond->n_waiting;
waiting = 0;
goto out;
} else {
ms = startTime + ms_orig - endTime;
}
}
/* If we make it here, we are still waiting. */
if (cond->n_to_wake == 0) {
/* There is nobody else who should wake up; reset
* the event. */
ResetEvent(cond->event);
}
out:
LeaveCriticalSection(&cond->lock);
} while (waiting);
EnterCriticalSection(lock);
EnterCriticalSection(&cond->lock);
if (!cond->n_waiting)
ResetEvent(cond->event);
LeaveCriticalSection(&cond->lock);
return result;
}
int
evthread_use_windows_threads(void)
{
struct evthread_lock_callbacks cbs = {
EVTHREAD_LOCK_API_VERSION,
EVTHREAD_LOCKTYPE_RECURSIVE,
evthread_win32_lock_create,
evthread_win32_lock_free,
evthread_win32_lock,
evthread_win32_unlock
};
struct evthread_condition_callbacks cond_cbs = {
EVTHREAD_CONDITION_API_VERSION,
evthread_win32_cond_alloc,
evthread_win32_cond_free,
evthread_win32_cond_signal,
evthread_win32_cond_wait
};
#ifdef WIN32_HAVE_CONDITION_VARIABLES
struct evthread_condition_callbacks condvar_cbs = {
EVTHREAD_CONDITION_API_VERSION,
evthread_win32_condvar_alloc,
evthread_win32_condvar_free,
evthread_win32_condvar_signal,
evthread_win32_condvar_wait
};
#endif
evthread_set_lock_callbacks(&cbs);
evthread_set_id_callback(evthread_win32_get_id);
#ifdef WIN32_HAVE_CONDITION_VARIABLES
if (evthread_win32_condvar_init()) {
evthread_set_condition_callbacks(&condvar_cbs);
return 0;
}
#endif
evthread_set_condition_callbacks(&cond_cbs);
return 0;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,31 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVUTIL_H_
#define _EVUTIL_H_
#include <event2/util.h>
#endif /* _EVUTIL_H_ */

Просмотреть файл

@ -1,123 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* This file has our secure PRNG code. On platforms that have arc4random(),
* we just use that. Otherwise, we include arc4random.c as a bunch of static
* functions, and wrap it lightly. We don't expose the arc4random*() APIs
* because A) they aren't in our namespace, and B) it's not nice to name your
* APIs after their implementations. We keep them in a separate file
* so that other people can rip it out and use it for whatever.
*/
#include "event2/event-config.h"
#include "util-internal.h"
#include "evthread-internal.h"
#ifdef _EVENT_HAVE_ARC4RANDOM
#include <stdlib.h>
#include <string.h>
int
evutil_secure_rng_init(void)
{
/* call arc4random() now to force it to self-initialize */
(void) arc4random();
return 0;
}
#ifndef _EVENT_HAVE_ARC4RANDOM_BUF
static void
arc4random_buf(void *buf, size_t n)
{
unsigned char *b = buf;
/* Make sure that we start out with b at a 4-byte alignment; plenty
* of CPUs care about this for 32-bit access. */
if (n >= 4 && ((ev_uintptr_t)b) & 3) {
ev_uint32_t u = arc4random();
int n_bytes = 4 - (((ev_uintptr_t)b) & 3);
memcpy(b, &u, n_bytes);
b += n_bytes;
n -= n_bytes;
}
while (n >= 4) {
*(ev_uint32_t*)b = arc4random();
b += 4;
n -= 4;
}
if (n) {
ev_uint32_t u = arc4random();
memcpy(b, &u, n);
}
}
#endif
#else /* !_EVENT_HAVE_ARC4RANDOM { */
#ifdef _EVENT_ssize_t
#define ssize_t _EVENT_SSIZE_t
#endif
#define ARC4RANDOM_EXPORT static
#define _ARC4_LOCK() EVLOCK_LOCK(arc4rand_lock, 0)
#define _ARC4_UNLOCK() EVLOCK_UNLOCK(arc4rand_lock, 0)
static void *arc4rand_lock;
#define ARC4RANDOM_UINT32 ev_uint32_t
#define ARC4RANDOM_NOSTIR
#define ARC4RANDOM_NORANDOM
#define ARC4RANDOM_NOUNIFORM
#include "./arc4random.c"
int
evutil_secure_rng_init(void)
{
int val;
if (!arc4rand_lock) {
EVTHREAD_ALLOC_LOCK(arc4rand_lock, 0);
}
_ARC4_LOCK();
if (!arc4_seeded_ok)
arc4_stir();
val = arc4_seeded_ok ? 0 : -1;
_ARC4_UNLOCK();
return val;
}
#endif /* } !_EVENT_HAVE_ARC4RANDOM */
void
evutil_secure_rng_get_bytes(void *buf, size_t n)
{
arc4random_buf(buf, n);
}
void
evutil_secure_rng_add_bytes(const char *buf, size_t n)
{
arc4random_addrandom((unsigned char*)buf, n);
}

Просмотреть файл

@ -1,486 +0,0 @@
/* Based on work Copyright 2002 Christopher Clark */
/* Copyright 2005-2010 Nick Mathewson */
/* Copyright 2009-2010 Niels Provos and Nick Mathewson */
/* See license at end. */
/* Based on ideas by Christopher Clark and interfaces from Niels Provos. */
#ifndef _EVENT_HT_H
#define _EVENT_HT_H
#define HT_HEAD(name, type) \
struct name { \
/* The hash table itself. */ \
struct type **hth_table; \
/* How long is the hash table? */ \
unsigned hth_table_length; \
/* How many elements does the table contain? */ \
unsigned hth_n_entries; \
/* How many elements will we allow in the table before resizing it? */ \
unsigned hth_load_limit; \
/* Position of hth_table_length in the primes table. */ \
int hth_prime_idx; \
}
#define HT_INITIALIZER() \
{ NULL, 0, 0, 0, -1 }
#ifdef HT_CACHE_HASH_VALUES
#define HT_ENTRY(type) \
struct { \
struct type *hte_next; \
unsigned hte_hash; \
}
#else
#define HT_ENTRY(type) \
struct { \
struct type *hte_next; \
}
#endif
#define HT_EMPTY(head) \
((head)->hth_n_entries == 0)
/* How many elements in 'head'? */
#define HT_SIZE(head) \
((head)->hth_n_entries)
#define HT_FIND(name, head, elm) name##_HT_FIND((head), (elm))
#define HT_INSERT(name, head, elm) name##_HT_INSERT((head), (elm))
#define HT_REPLACE(name, head, elm) name##_HT_REPLACE((head), (elm))
#define HT_REMOVE(name, head, elm) name##_HT_REMOVE((head), (elm))
#define HT_START(name, head) name##_HT_START(head)
#define HT_NEXT(name, head, elm) name##_HT_NEXT((head), (elm))
#define HT_NEXT_RMV(name, head, elm) name##_HT_NEXT_RMV((head), (elm))
#define HT_CLEAR(name, head) name##_HT_CLEAR(head)
#define HT_INIT(name, head) name##_HT_INIT(head)
/* Helper: */
static inline unsigned
ht_improve_hash(unsigned h)
{
/* Aim to protect against poor hash functions by adding logic here
* - logic taken from java 1.4 hashtable source */
h += ~(h << 9);
h ^= ((h >> 14) | (h << 18)); /* >>> */
h += (h << 4);
h ^= ((h >> 10) | (h << 22)); /* >>> */
return h;
}
#if 0
/** Basic string hash function, from Java standard String.hashCode(). */
static inline unsigned
ht_string_hash(const char *s)
{
unsigned h = 0;
int m = 1;
while (*s) {
h += ((signed char)*s++)*m;
m = (m<<5)-1; /* m *= 31 */
}
return h;
}
#endif
/** Basic string hash function, from Python's str.__hash__() */
static inline unsigned
ht_string_hash(const char *s)
{
unsigned h;
const unsigned char *cp = (const unsigned char *)s;
h = *cp << 7;
while (*cp) {
h = (1000003*h) ^ *cp++;
}
/* This conversion truncates the length of the string, but that's ok. */
h ^= (unsigned)(cp-(const unsigned char*)s);
return h;
}
#ifdef HT_CACHE_HASH_VALUES
#define _HT_SET_HASH(elm, field, hashfn) \
do { (elm)->field.hte_hash = hashfn(elm); } while (0)
#define _HT_SET_HASHVAL(elm, field, val) \
do { (elm)->field.hte_hash = (val); } while (0)
#define _HT_ELT_HASH(elm, field, hashfn) \
((elm)->field.hte_hash)
#else
#define _HT_SET_HASH(elm, field, hashfn) \
((void)0)
#define _HT_ELT_HASH(elm, field, hashfn) \
(hashfn(elm))
#define _HT_SET_HASHVAL(elm, field, val) \
((void)0)
#endif
/* Helper: alias for the bucket containing 'elm'. */
#define _HT_BUCKET(head, field, elm, hashfn) \
((head)->hth_table[_HT_ELT_HASH(elm,field,hashfn) % head->hth_table_length])
#define HT_FOREACH(x, name, head) \
for ((x) = HT_START(name, head); \
(x) != NULL; \
(x) = HT_NEXT(name, head, x))
#define HT_PROTOTYPE(name, type, field, hashfn, eqfn) \
int name##_HT_GROW(struct name *ht, unsigned min_capacity); \
void name##_HT_CLEAR(struct name *ht); \
int _##name##_HT_REP_IS_BAD(const struct name *ht); \
static inline void \
name##_HT_INIT(struct name *head) { \
head->hth_table_length = 0; \
head->hth_table = NULL; \
head->hth_n_entries = 0; \
head->hth_load_limit = 0; \
head->hth_prime_idx = -1; \
} \
/* Helper: returns a pointer to the right location in the table \
* 'head' to find or insert the element 'elm'. */ \
static inline struct type ** \
_##name##_HT_FIND_P(struct name *head, struct type *elm) \
{ \
struct type **p; \
if (!head->hth_table) \
return NULL; \
p = &_HT_BUCKET(head, field, elm, hashfn); \
while (*p) { \
if (eqfn(*p, elm)) \
return p; \
p = &(*p)->field.hte_next; \
} \
return p; \
} \
/* Return a pointer to the element in the table 'head' matching 'elm', \
* or NULL if no such element exists */ \
static inline struct type * \
name##_HT_FIND(const struct name *head, struct type *elm) \
{ \
struct type **p; \
struct name *h = (struct name *) head; \
_HT_SET_HASH(elm, field, hashfn); \
p = _##name##_HT_FIND_P(h, elm); \
return p ? *p : NULL; \
} \
/* Insert the element 'elm' into the table 'head'. Do not call this \
* function if the table might already contain a matching element. */ \
static inline void \
name##_HT_INSERT(struct name *head, struct type *elm) \
{ \
struct type **p; \
if (!head->hth_table || head->hth_n_entries >= head->hth_load_limit) \
name##_HT_GROW(head, head->hth_n_entries+1); \
++head->hth_n_entries; \
_HT_SET_HASH(elm, field, hashfn); \
p = &_HT_BUCKET(head, field, elm, hashfn); \
elm->field.hte_next = *p; \
*p = elm; \
} \
/* Insert the element 'elm' into the table 'head'. If there already \
* a matching element in the table, replace that element and return \
* it. */ \
static inline struct type * \
name##_HT_REPLACE(struct name *head, struct type *elm) \
{ \
struct type **p, *r; \
if (!head->hth_table || head->hth_n_entries >= head->hth_load_limit) \
name##_HT_GROW(head, head->hth_n_entries+1); \
_HT_SET_HASH(elm, field, hashfn); \
p = _##name##_HT_FIND_P(head, elm); \
r = *p; \
*p = elm; \
if (r && (r!=elm)) { \
elm->field.hte_next = r->field.hte_next; \
r->field.hte_next = NULL; \
return r; \
} else { \
++head->hth_n_entries; \
return NULL; \
} \
} \
/* Remove any element matching 'elm' from the table 'head'. If such \
* an element is found, return it; otherwise return NULL. */ \
static inline struct type * \
name##_HT_REMOVE(struct name *head, struct type *elm) \
{ \
struct type **p, *r; \
_HT_SET_HASH(elm, field, hashfn); \
p = _##name##_HT_FIND_P(head,elm); \
if (!p || !*p) \
return NULL; \
r = *p; \
*p = r->field.hte_next; \
r->field.hte_next = NULL; \
--head->hth_n_entries; \
return r; \
} \
/* Invoke the function 'fn' on every element of the table 'head', \
* using 'data' as its second argument. If the function returns \
* nonzero, remove the most recently examined element before invoking \
* the function again. */ \
static inline void \
name##_HT_FOREACH_FN(struct name *head, \
int (*fn)(struct type *, void *), \
void *data) \
{ \
unsigned idx; \
int remove; \
struct type **p, **nextp, *next; \
if (!head->hth_table) \
return; \
for (idx=0; idx < head->hth_table_length; ++idx) { \
p = &head->hth_table[idx]; \
while (*p) { \
nextp = &(*p)->field.hte_next; \
next = *nextp; \
remove = fn(*p, data); \
if (remove) { \
--head->hth_n_entries; \
*p = next; \
} else { \
p = nextp; \
} \
} \
} \
} \
/* Return a pointer to the first element in the table 'head', under \
* an arbitrary order. This order is stable under remove operations, \
* but not under others. If the table is empty, return NULL. */ \
static inline struct type ** \
name##_HT_START(struct name *head) \
{ \
unsigned b = 0; \
while (b < head->hth_table_length) { \
if (head->hth_table[b]) \
return &head->hth_table[b]; \
++b; \
} \
return NULL; \
} \
/* Return the next element in 'head' after 'elm', under the arbitrary \
* order used by HT_START. If there are no more elements, return \
* NULL. If 'elm' is to be removed from the table, you must call \
* this function for the next value before you remove it. \
*/ \
static inline struct type ** \
name##_HT_NEXT(struct name *head, struct type **elm) \
{ \
if ((*elm)->field.hte_next) { \
return &(*elm)->field.hte_next; \
} else { \
unsigned b = (_HT_ELT_HASH(*elm, field, hashfn) % head->hth_table_length)+1; \
while (b < head->hth_table_length) { \
if (head->hth_table[b]) \
return &head->hth_table[b]; \
++b; \
} \
return NULL; \
} \
} \
static inline struct type ** \
name##_HT_NEXT_RMV(struct name *head, struct type **elm) \
{ \
unsigned h = _HT_ELT_HASH(*elm, field, hashfn); \
*elm = (*elm)->field.hte_next; \
--head->hth_n_entries; \
if (*elm) { \
return elm; \
} else { \
unsigned b = (h % head->hth_table_length)+1; \
while (b < head->hth_table_length) { \
if (head->hth_table[b]) \
return &head->hth_table[b]; \
++b; \
} \
return NULL; \
} \
}
#define HT_GENERATE(name, type, field, hashfn, eqfn, load, mallocfn, \
reallocfn, freefn) \
static unsigned name##_PRIMES[] = { \
53, 97, 193, 389, \
769, 1543, 3079, 6151, \
12289, 24593, 49157, 98317, \
196613, 393241, 786433, 1572869, \
3145739, 6291469, 12582917, 25165843, \
50331653, 100663319, 201326611, 402653189, \
805306457, 1610612741 \
}; \
static unsigned name##_N_PRIMES = \
(unsigned)(sizeof(name##_PRIMES)/sizeof(name##_PRIMES[0])); \
/* Expand the internal table of 'head' until it is large enough to \
* hold 'size' elements. Return 0 on success, -1 on allocation \
* failure. */ \
int \
name##_HT_GROW(struct name *head, unsigned size) \
{ \
unsigned new_len, new_load_limit; \
int prime_idx; \
struct type **new_table; \
if (head->hth_prime_idx == (int)name##_N_PRIMES - 1) \
return 0; \
if (head->hth_load_limit > size) \
return 0; \
prime_idx = head->hth_prime_idx; \
do { \
new_len = name##_PRIMES[++prime_idx]; \
new_load_limit = (unsigned)(load*new_len); \
} while (new_load_limit <= size && \
prime_idx < (int)name##_N_PRIMES); \
if ((new_table = mallocfn(new_len*sizeof(struct type*)))) { \
unsigned b; \
memset(new_table, 0, new_len*sizeof(struct type*)); \
for (b = 0; b < head->hth_table_length; ++b) { \
struct type *elm, *next; \
unsigned b2; \
elm = head->hth_table[b]; \
while (elm) { \
next = elm->field.hte_next; \
b2 = _HT_ELT_HASH(elm, field, hashfn) % new_len; \
elm->field.hte_next = new_table[b2]; \
new_table[b2] = elm; \
elm = next; \
} \
} \
if (head->hth_table) \
freefn(head->hth_table); \
head->hth_table = new_table; \
} else { \
unsigned b, b2; \
new_table = reallocfn(head->hth_table, new_len*sizeof(struct type*)); \
if (!new_table) return -1; \
memset(new_table + head->hth_table_length, 0, \
(new_len - head->hth_table_length)*sizeof(struct type*)); \
for (b=0; b < head->hth_table_length; ++b) { \
struct type *e, **pE; \
for (pE = &new_table[b], e = *pE; e != NULL; e = *pE) { \
b2 = _HT_ELT_HASH(e, field, hashfn) % new_len; \
if (b2 == b) { \
pE = &e->field.hte_next; \
} else { \
*pE = e->field.hte_next; \
e->field.hte_next = new_table[b2]; \
new_table[b2] = e; \
} \
} \
} \
head->hth_table = new_table; \
} \
head->hth_table_length = new_len; \
head->hth_prime_idx = prime_idx; \
head->hth_load_limit = new_load_limit; \
return 0; \
} \
/* Free all storage held by 'head'. Does not free 'head' itself, or \
* individual elements. */ \
void \
name##_HT_CLEAR(struct name *head) \
{ \
if (head->hth_table) \
freefn(head->hth_table); \
head->hth_table_length = 0; \
name##_HT_INIT(head); \
} \
/* Debugging helper: return false iff the representation of 'head' is \
* internally consistent. */ \
int \
_##name##_HT_REP_IS_BAD(const struct name *head) \
{ \
unsigned n, i; \
struct type *elm; \
if (!head->hth_table_length) { \
if (!head->hth_table && !head->hth_n_entries && \
!head->hth_load_limit && head->hth_prime_idx == -1) \
return 0; \
else \
return 1; \
} \
if (!head->hth_table || head->hth_prime_idx < 0 || \
!head->hth_load_limit) \
return 2; \
if (head->hth_n_entries > head->hth_load_limit) \
return 3; \
if (head->hth_table_length != name##_PRIMES[head->hth_prime_idx]) \
return 4; \
if (head->hth_load_limit != (unsigned)(load*head->hth_table_length)) \
return 5; \
for (n = i = 0; i < head->hth_table_length; ++i) { \
for (elm = head->hth_table[i]; elm; elm = elm->field.hte_next) { \
if (_HT_ELT_HASH(elm, field, hashfn) != hashfn(elm)) \
return 1000 + i; \
if ((_HT_ELT_HASH(elm, field, hashfn) % head->hth_table_length) != i) \
return 10000 + i; \
++n; \
} \
} \
if (n != head->hth_n_entries) \
return 6; \
return 0; \
}
/** Implements an over-optimized "find and insert if absent" block;
* not meant for direct usage by typical code, or usage outside the critical
* path.*/
#define _HT_FIND_OR_INSERT(name, field, hashfn, head, eltype, elm, var, y, n) \
{ \
struct name *_##var##_head = head; \
struct eltype **var; \
if (!_##var##_head->hth_table || \
_##var##_head->hth_n_entries >= _##var##_head->hth_load_limit) \
name##_HT_GROW(_##var##_head, _##var##_head->hth_n_entries+1); \
_HT_SET_HASH((elm), field, hashfn); \
var = _##name##_HT_FIND_P(_##var##_head, (elm)); \
if (*var) { \
y; \
} else { \
n; \
} \
}
#define _HT_FOI_INSERT(field, head, elm, newent, var) \
{ \
_HT_SET_HASHVAL(newent, field, (elm)->field.hte_hash); \
newent->field.hte_next = NULL; \
*var = newent; \
++((head)->hth_n_entries); \
}
/*
* Copyright 2005, Nick Mathewson. Implementation logic is adapted from code
* by Cristopher Clark, retrofit to allow drop-in memory management, and to
* use the same interface as Niels Provos's tree.h. This is probably still
* a derived work, so the original license below still applies.
*
* Copyright (c) 2002, Christopher Clark
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * Neither the name of the original author; nor the names of any contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#endif

Просмотреть файл

@ -1,173 +0,0 @@
/*
* Copyright 2001-2007 Niels Provos <provos@citi.umich.edu>
* Copyright 2007-2010 Niels Provos and Nick Mathewson
*
* This header file contains definitions for dealing with HTTP requests
* that are internal to libevent. As user of the library, you should not
* need to know about these.
*/
#ifndef _HTTP_INTERNAL_H_
#define _HTTP_INTERNAL_H_
#include "event2/event_struct.h"
#include "util-internal.h"
#define HTTP_CONNECT_TIMEOUT 45
#define HTTP_WRITE_TIMEOUT 50
#define HTTP_READ_TIMEOUT 50
#define HTTP_PREFIX "http://"
#define HTTP_DEFAULTPORT 80
enum message_read_status {
ALL_DATA_READ = 1,
MORE_DATA_EXPECTED = 0,
DATA_CORRUPTED = -1,
REQUEST_CANCELED = -2,
DATA_TOO_LONG = -3
};
enum evhttp_connection_error {
EVCON_HTTP_TIMEOUT,
EVCON_HTTP_EOF,
EVCON_HTTP_INVALID_HEADER,
EVCON_HTTP_BUFFER_ERROR,
EVCON_HTTP_REQUEST_CANCEL
};
struct evbuffer;
struct addrinfo;
struct evhttp_request;
/* A stupid connection object - maybe make this a bufferevent later */
enum evhttp_connection_state {
EVCON_DISCONNECTED, /**< not currently connected not trying either*/
EVCON_CONNECTING, /**< tries to currently connect */
EVCON_IDLE, /**< connection is established */
EVCON_READING_FIRSTLINE,/**< reading Request-Line (incoming conn) or
**< Status-Line (outgoing conn) */
EVCON_READING_HEADERS, /**< reading request/response headers */
EVCON_READING_BODY, /**< reading request/response body */
EVCON_READING_TRAILER, /**< reading request/response chunked trailer */
EVCON_WRITING /**< writing request/response headers/body */
};
struct event_base;
struct evhttp_connection {
/* we use tailq only if they were created for an http server */
TAILQ_ENTRY(evhttp_connection) next;
evutil_socket_t fd;
struct bufferevent *bufev;
struct event retry_ev; /* for retrying connects */
char *bind_address; /* address to use for binding the src */
u_short bind_port; /* local port for binding the src */
char *address; /* address to connect to */
u_short port;
size_t max_headers_size;
ev_uint64_t max_body_size;
int flags;
#define EVHTTP_CON_INCOMING 0x0001 /* only one request on it ever */
#define EVHTTP_CON_OUTGOING 0x0002 /* multiple requests possible */
#define EVHTTP_CON_CLOSEDETECT 0x0004 /* detecting if persistent close */
int timeout; /* timeout in seconds for events */
int retry_cnt; /* retry count */
int retry_max; /* maximum number of retries */
enum evhttp_connection_state state;
/* for server connections, the http server they are connected with */
struct evhttp *http_server;
TAILQ_HEAD(evcon_requestq, evhttp_request) requests;
void (*cb)(struct evhttp_connection *, void *);
void *cb_arg;
void (*closecb)(struct evhttp_connection *, void *);
void *closecb_arg;
struct event_base *base;
struct evdns_base *dns_base;
};
struct evhttp_cb {
TAILQ_ENTRY(evhttp_cb) next;
char *what;
void (*cb)(struct evhttp_request *req, void *);
void *cbarg;
};
/* both the http server as well as the rpc system need to queue connections */
TAILQ_HEAD(evconq, evhttp_connection);
/* each bound socket is stored in one of these */
struct evhttp_bound_socket {
TAILQ_ENTRY(evhttp_bound_socket) next;
struct evconnlistener *listener;
};
struct evhttp {
TAILQ_ENTRY(evhttp) next;
TAILQ_HEAD(boundq, evhttp_bound_socket) sockets;
TAILQ_HEAD(httpcbq, evhttp_cb) callbacks;
struct evconq connections;
TAILQ_HEAD(vhostsq, evhttp) virtualhosts;
/* NULL if this server is not a vhost */
char *vhost_pattern;
int timeout;
size_t default_max_headers_size;
ev_uint64_t default_max_body_size;
void (*gencb)(struct evhttp_request *req, void *);
void *gencbarg;
struct event_base *base;
};
/* resets the connection; can be reused for more requests */
void evhttp_connection_reset(struct evhttp_connection *);
/* connects if necessary */
int evhttp_connection_connect(struct evhttp_connection *);
/* notifies the current request that it failed; resets connection */
void evhttp_connection_fail(struct evhttp_connection *,
enum evhttp_connection_error error);
void evhttp_get_request(struct evhttp *, evutil_socket_t, struct sockaddr *, ev_socklen_t);
enum message_read_status;
enum message_read_status evhttp_parse_firstline(struct evhttp_request *, struct evbuffer*);
enum message_read_status evhttp_parse_headers(struct evhttp_request *, struct evbuffer*);
void evhttp_start_read(struct evhttp_connection *);
void evhttp_make_header(struct evhttp_connection *, struct evhttp_request *);
void evhttp_write_buffer(struct evhttp_connection *,
void (*)(struct evhttp_connection *, void *), void *);
/* response sending HTML the data in the buffer */
void evhttp_response_code(struct evhttp_request *, int, const char *);
void evhttp_send_page(struct evhttp_request *, struct evbuffer *);
#endif /* _HTTP_H */

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,37 +0,0 @@
AUTOMAKE_OPTIONS = foreign
EVENT2_EXPORT = \
event2/buffer.h \
event2/buffer_compat.h \
event2/bufferevent.h \
event2/bufferevent_compat.h \
event2/bufferevent_ssl.h \
event2/bufferevent_struct.h \
event2/dns.h \
event2/dns_compat.h \
event2/dns_struct.h \
event2/event.h \
event2/event_compat.h \
event2/event_struct.h \
event2/http.h \
event2/http_compat.h \
event2/http_struct.h \
event2/keyvalq_struct.h \
event2/listener.h \
event2/rpc.h \
event2/rpc_compat.h \
event2/rpc_struct.h \
event2/tag.h \
event2/tag_compat.h \
event2/thread.h \
event2/util.h
EXTRA_SRC = $(EVENT2_EXPORT)
# Open MPI: do not install these headers (installation of the required
# headers is handled in opal/event/Makefile.am because we only install
# them conditionally).
noinst_HEADERS = $(EVENT2_EXPORT)
nodist_noinst_HEADERS = ./event2/event-config.h

Просмотреть файл

@ -1,740 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_BUFFER_H_
#define _EVENT2_BUFFER_H_
/** @file buffer.h
Functions for buffering data for network sending or receiving.
An evbuffer can be used for preparing data before sending it to
the network or conversely for reading data from the network.
Evbuffers try to avoid memory copies as much as possible. As a
result evbuffers can be used to pass data around without actually
incurring the overhead of copying the data.
A new evbuffer can be allocated with evbuffer_new(), and can be
freed with evbuffer_free().
There are several guide lines for using evbuffers.
- if you already know how much data you are going to add as a result
of calling evbuffer_add() multiple times, it makes sense to use
evbuffer_expand() first to make sure that enough memory is allocated
before hand.
- evbuffer_add_buffer() adds the contents of one buffer to the other
without incurring any unnecessary memory copies.
- evbuffer_add() and evbuffer_add_buffer() do not mix very well:
if you use them, you will wind up with fragmented memory in your
buffer.
As the contents of an evbuffer can be stored into multiple different
memory blocks, it cannot be accessed directly. Instead, evbuffer_pullup()
can be used to force a specified number of bytes to be continuous. This
will cause memory reallocation and memory copies if the data is split
across multiple blocks.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#include <stdarg.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_UIO_H
#include <sys/uio.h>
#endif
#include <event2/util.h>
struct evbuffer;
/** Points to a position within an evbuffer. Used when repeatedly searching
through a buffer. Calls to any function that modifies or re-packs the
buffer contents may invalidate all evbuffer_ptrs for that buffer. Do not
modify these values except with evbuffer_ptr_set.
*/
struct evbuffer_ptr {
ev_ssize_t pos;
/* Do not alter the values of fields. */
struct {
void *chain;
size_t pos_in_chain;
} _internal;
};
/** Describes a single extent of memory inside an evbuffer. Used for
direct-access functions.
@see evbuffer_reserve_space, evbuffer_commit_space, evbuffer_peek
*/
#ifdef _EVENT_HAVE_SYS_UIO_H
#define evbuffer_iovec iovec
/* Internal use -- defined only if we are using the native struct iovec */
#define _EVBUFFER_IOVEC_IS_NATIVE
#else
struct evbuffer_iovec {
#ifndef __WINDOWS__
/** The start of the extent of memory. */
void *iov_base;
/** The length of the extent of memory. */
size_t iov_len;
#else
WSABUF data;
#endif
};
#endif
/**
Allocate storage for a new evbuffer.
@return a pointer to a newly allocated evbuffer struct, or NULL if an error
occurred
*/
struct evbuffer *evbuffer_new(void);
/**
Deallocate storage for an evbuffer.
@param buf pointer to the evbuffer to be freed
*/
void evbuffer_free(struct evbuffer *buf);
/**
Enable locking on an evbuffer so that it can safely be used by multiple
threads at the same time.
NOTE: when locking is enabled, the lock will be held when callbacks are
invoked. This could result in deadlock if you aren't careful. Plan
accordingly!
@param buf An evbuffer to make lockable.
@param lock A lock object, or NULL if we should allocate our own.
@return 0 on success, -1 on failure.
*/
int evbuffer_enable_locking(struct evbuffer *buf, void *lock);
/**
Acquire the lock on an evbuffer. Has no effect if locking was not enabled
with evbuffer_enable_locking.
*/
void evbuffer_lock(struct evbuffer *buf);
/**
Release the lock on an evbuffer. Has no effect if locking was not enabled
with evbuffer_enable_locking.
*/
void evbuffer_unlock(struct evbuffer *buf);
/**
Returns the total number of bytes stored in the event buffer
@param buf pointer to the evbuffer
@return the number of bytes stored in the event buffer
*/
size_t evbuffer_get_length(const struct evbuffer *buf);
/**
Returns the number of contiguous available bytes in the first buffer chain.
This is useful when processing data that might be split into multiple
chains, or that might all be in the first chain. Calls to
evbuffer_pullup() that cause reallocation and copying of data can thus be
avoided.
@param buf pointer to the evbuffer
@return 0 if no data is available, otherwise the number of available bytes
in the first buffer chain.
*/
size_t evbuffer_get_contiguous_space(const struct evbuffer *buf);
/**
Expands the available space in an event buffer.
Expands the available space in the event buffer to at least datlen, so that
appending datlen additional bytes will not require any new allocations.
@param buf the event buffer to be expanded
@param datlen the new minimum length requirement
@return 0 if successful, or -1 if an error occurred
*/
int evbuffer_expand(struct evbuffer *buf, size_t datlen);
/**
Reserves space in the last chain of an event buffer.
Makes space available in the last chain of an event buffer that can
be arbitrarily written to by a user. The space does not become
available for reading until it has been committed with
evbuffer_commit_space().
The space is made available as one or more extents, represented by
an initial pointer and a length. You can force the memory to be
available as only one extent. Allowing more, however, makes the
function more efficient.
Multiple subsequent calls to this function will make the same space
available until evbuffer_commit_space() has been called.
It is an error to do anything that moves around the buffer's internal
memory structures before committing the space.
NOTE: The code currently does not ever use more than two extents.
This may change in future versions.
@param buf the event buffer in which to reserve space.
@param size how much space to make available, at minimum. The
total length of the extents may be greater than the requested
length.
@param vec an array of one or more evbuffer_iovec structures to
hold pointers to the reserved extents of memory.
@param n_vec The length of the vec array. Must be at least 1.
@return the number of provided extents, or -1 on error.
@see evbuffer_commit_space
*/
int
evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
struct evbuffer_iovec *vec, int n_vecs);
/**
Commits previously reserved space.
Commits some of the space previously reserved with
evbuffer_reserve_space(). It then becomes available for reading.
This function may return an error if the pointer in the extents do
not match those returned from evbuffer_reserve_space, or if data
has been added to the buffer since the space was reserved.
If you want to commit less data than you got reserved space for,
modify the iov_len pointer of the buffer to a smaller value. Note
that you may have received more space than you requested if it was
available!
@param buf the event buffer in which to reserve space.
@param vec one or two extents returned by evbuffer_reserve_space.
@param n_vecs the number of extents.
@return 0 on success, -1 on error
@see evbuffer_reserve_space
*/
int evbuffer_commit_space(struct evbuffer *buf,
struct evbuffer_iovec *vec, int n_vecs);
/**
Append data to the end of an evbuffer.
@param buf the event buffer to be appended to
@param data pointer to the beginning of the data buffer
@param datlen the number of bytes to be copied from the data buffer
@return 0 on success, -1 on failure.
*/
int evbuffer_add(struct evbuffer *buf, const void *data, size_t datlen);
/**
Read data from an event buffer and drain the bytes read.
@param buf the event buffer to be read from
@param data the destination buffer to store the result
@param datlen the maximum size of the destination buffer
@return the number of bytes read, or -1 if we can't drain the buffer.
*/
int evbuffer_remove(struct evbuffer *buf, void *data, size_t datlen);
/**
Read data from an event buffer, and leave the buffer unchanged.
@param buf the event buffer to be read from
@param data the destination buffer to store the result
@param datlen the maximum size of the destination buffer
@return the number of bytes read, or -1 if we can't drain the buffer.
*/
ev_ssize_t evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen);
/**
Read data from an event buffer into another event buffer draining
the bytes from the src buffer read. This function avoids memcpy
as possible.
@param src the event buffer to be read from
@param dst the destination event buffer to store the result into
@param datlen the maximum numbers of bytes to transfer
@return the number of bytes read
*/
int evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
size_t datlen);
/** Used to tell evbuffer_readln what kind of line-ending to look for.
*/
enum evbuffer_eol_style {
/** Any sequence of CR and LF characters is acceptable as an EOL. */
EVBUFFER_EOL_ANY,
/** An EOL is an LF, optionally preceded by a CR. This style is
* most useful for implementing text-based internet protocols. */
EVBUFFER_EOL_CRLF,
/** An EOL is a CR followed by an LF. */
EVBUFFER_EOL_CRLF_STRICT,
/** An EOL is a LF. */
EVBUFFER_EOL_LF
};
/**
* Read a single line from an event buffer.
*
* Reads a line terminated by an EOL as determined by the evbuffer_eol_style
* argument. Returns a newly allocated nul-terminated string; the caller must
* free the returned value. The EOL is not included in the returned string.
*
* @param buffer the evbuffer to read from
* @param n_read_out if non-NULL, points to a size_t that is set to the
* number of characters in the returned string. This is useful for
* strings that can contain NUL characters.
* @param eol_style the style of line-ending to use.
* @return pointer to a single line, or NULL if an error occurred
*/
char *evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
enum evbuffer_eol_style eol_style);
/**
Move data from one evbuffer into another evbuffer.
This is a destructive add. The data from one buffer moves into
the other buffer. However, no unnecessary memory copies occur.
@param outbuf the output buffer
@param inbuf the input buffer
@return 0 if successful, or -1 if an error occurred
*/
int evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf);
typedef void (*evbuffer_ref_cleanup_cb)(const void *data,
size_t datalen, void *extra);
/**
Reference memory into an evbuffer without copying.
The memory needs to remain valid until all the added data has been
read. This function keeps just a reference to the memory without
actually incurring the overhead of a copy.
@param outbuf the output buffer
@param data the memory to reference
@param datlen how memory to reference
@param cleanupfn callback to be invoked when the memory is no longer
referenced
@param extra optional argument to the cleanup callback
@return 0 if successful, or -1 if an error occurred
*/
int evbuffer_add_reference(struct evbuffer *outbuf,
const void *data, size_t datlen,
evbuffer_ref_cleanup_cb cleanupfn, void *extra);
/**
Move data from a file into the evbuffer for writing to a socket.
This function avoids unnecessary data copies between userland and
kernel. Where available, it uses sendfile or splice.
The function owns the resulting file descriptor and will close it
when finished transferring data.
The results of using evbuffer_remove() or evbuffer_pullup() are
undefined.
@param outbuf the output buffer
@param fd the file descriptor
@param off the offset from which to read data
@param length how much data to read
@return 0 if successful, or -1 if an error occurred
*/
int evbuffer_add_file(struct evbuffer *output, int fd, off_t offset,
off_t length);
/**
Append a formatted string to the end of an evbuffer.
@param buf the evbuffer that will be appended to
@param fmt a format string
@param ... arguments that will be passed to printf(3)
@return The number of bytes added if successful, or -1 if an error occurred.
*/
int evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
#ifdef __GNUC__
__attribute__((format(printf, 2, 3)))
#endif
;
/**
Append a va_list formatted string to the end of an evbuffer.
@param buf the evbuffer that will be appended to
@param fmt a format string
@param ap a varargs va_list argument array that will be passed to vprintf(3)
@return The number of bytes added if successful, or -1 if an error occurred.
*/
int evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap);
/**
Remove a specified number of bytes data from the beginning of an evbuffer.
@param buf the evbuffer to be drained
@param len the number of bytes to drain from the beginning of the buffer
@return 0 on success, -1 on failure.
*/
int evbuffer_drain(struct evbuffer *buf, size_t len);
/**
Write the contents of an evbuffer to a file descriptor.
The evbuffer will be drained after the bytes have been successfully written.
@param buffer the evbuffer to be written and drained
@param fd the file descriptor to be written to
@return the number of bytes written, or -1 if an error occurred
@see evbuffer_read()
*/
int evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd);
/**
Write some of the contents of an evbuffer to a file descriptor.
The evbuffer will be drained after the bytes have been successfully written.
@param buffer the evbuffer to be written and drained
@param fd the file descriptor to be written to
@param howmuch the largest allowable number of bytes to write, or -1
to write as many bytes as we can.
@return the number of bytes written, or -1 if an error occurred
@see evbuffer_read()
*/
int evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
ev_ssize_t howmuch);
/**
Read from a file descriptor and store the result in an evbuffer.
@param buf the evbuffer to store the result
@param fd the file descriptor to read from
@param howmuch the number of bytes to be read
@return the number of bytes read, or -1 if an error occurred
@see evbuffer_write()
*/
int evbuffer_read(struct evbuffer *buffer, evutil_socket_t fd, int howmuch);
/**
Search for a string within an evbuffer.
@param buffer the evbuffer to be searched
@param what the string to be searched for
@param len the length of the search string
@param start NULL or a pointer to a valid struct evbuffer_ptr.
@return a struct evbuffer_ptr whose 'pos' field has the offset of the
first occurrence of the string in the buffer after 'start'. The 'pos'
field of the result is -1 if the string was not found.
*/
struct evbuffer_ptr evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start);
/**
Search for a string within part of an evbuffer.
@param buffer the evbuffer to be searched
@param what the string to be searched for
@param len the length of the search string
@param start NULL or a pointer to a valid struct evbuffer_ptr that
indicates where we should start searching.
@param end NULL or a pointer to a valid struct evbuffer_ptr that
indicates where we should stop searching.
@return a struct evbuffer_ptr whose 'pos' field has the offset of the
first occurrence of the string in the buffer after 'start'. The 'pos'
field of the result is -1 if the string was not found.
*/
struct evbuffer_ptr evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end);
enum evbuffer_ptr_how {
/** Sets the pointer to the position; can be called on with an
uninitialized evbuffer_ptr. */
EVBUFFER_PTR_SET,
/** Advances the pointer by adding to the current position. */
EVBUFFER_PTR_ADD
};
/**
Sets the search pointer in the buffer to position.
If evbuffer_ptr is not initialized. This function can only be called
with EVBUFFER_PTR_SET.
@param buffer the evbuffer to be search
@param ptr a pointer to a struct evbuffer_ptr
@param position the position at which to start the next search
@param how determines how the pointer should be manipulated.
@returns 0 on success or -1 otherwise
*/
int
evbuffer_ptr_set(struct evbuffer *buffer, struct evbuffer_ptr *pos,
size_t position, enum evbuffer_ptr_how how);
/**
Search for an end-of-line string within an evbuffer.
@param buffer the evbuffer to be searched
@param start NULL or a pointer to a valid struct evbuffer_ptr to start
searching at.
@param eol_len_out If non-NULL, the pointed-to value will be set to
the length of the end-of-line string.
@param eol_style The kind of EOL to look for; see evbuffer_readln() for
more information
@return a struct evbuffer_ptr whose 'pos' field has the offset of the
first occurrence EOL in the buffer after 'start'. The 'pos'
field of the result is -1 if the string was not found.
*/
struct evbuffer_ptr evbuffer_search_eol(struct evbuffer *buffer,
struct evbuffer_ptr *start, size_t *eol_len_out,
enum evbuffer_eol_style eol_style);
/** Structure passed to an evbuffer callback */
struct evbuffer_cb_info {
/** The size of */
size_t orig_size;
size_t n_added;
size_t n_deleted;
};
/** Function to peek at data inside an evbuffer without removing it or
copying it out.
Pointers to the data are returned by filling the 'vec_out' array
with pointers to one or more extents of data inside the buffer.
The total data in the extents that you get back may be more than
you requested (if there is more data last extent than you asked
for), or less (if you do not provide enough evbuffer_iovecs, or if
the buffer does not have as much data as you asked to see).
@param buffer the evbuffer to peek into,
@param len the number of bytes to try to peek. If negative, we
will try to fill as much of vec_out as we can.
@param start_at an evbuffer_ptr indicating the point at which we
should start looking for data. NULL means, "At the start of the
buffer."
@param vec_out an array of evbuffer_iovec
@param n_vec the length of vec_out. If 0, we only count how many
extents would be necessary to point to the requested amount of
data.
@return The number of extents needed. This may be less than n_vec
if we didn't need all the evbuffer_iovecs we were given, or more
than n_vec if we would need more to return all the data that was
requested.
*/
int evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
struct evbuffer_ptr *start_at,
struct evbuffer_iovec *vec_out, int n_vec);
/** Type definition for a callback that is invoked whenever data is added or
removed from an evbuffer.
An evbuffer may have one or more callbacks set at a time. The order
in which they are executed is undefined.
A callback function may add more callbacks, or remove itself from the
list of callbacks, or add or remove data from the buffer. It may not
remove another callback from the list.
If a callback adds or removes data from the buffer or from another
buffer, this can cause a recursive invocation of your callback or
other callbacks. If you ask for an infinite loop, you might just get
one: watch out!
@param buffer the buffer whose size has changed
@param info a structure describing how the buffer changed.
@param arg a pointer to user data
*/
typedef void (*evbuffer_cb_func)(struct evbuffer *buffer, const struct evbuffer_cb_info *info, void *arg);
struct evbuffer_cb_entry;
/** Add a new callback to an evbuffer.
Subsequent calls to evbuffer_add_cb() add new callbacks. To remove this
callback, call evbuffer_remove_cb or evbuffer_remove_cb_entry.
@param buffer the evbuffer to be monitored
@param cb the callback function to invoke when the evbuffer is modified,
or NULL to remove all callbacks.
@param cbarg an argument to be provided to the callback function
@return a handle to the callback on success, or NULL on failure.
*/
struct evbuffer_cb_entry *evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg);
/** Remove a callback from an evbuffer, given a handle returned from
evbuffer_add_cb.
Calling this function invalidates the handle.
@return 0 if a callback was removed, or -1 if no matching callback was
found.
*/
int evbuffer_remove_cb_entry(struct evbuffer *buffer,
struct evbuffer_cb_entry *ent);
/** Remove a callback from an evbuffer, given the function and argument
used to add it.
@return 0 if a callback was removed, or -1 if no matching callback was
found.
*/
int evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg);
/** If this flag is not set, then a callback is temporarily disabled, and
* should not be invoked. */
#define EVBUFFER_CB_ENABLED 1
/** Change the flags that are set for a callback on a buffer by adding more.
@param buffer the evbuffer that the callback is watching.
@param cb the callback whose status we want to change.
@param flags EVBUFFER_CB_ENABLED to re-enable the callback.
@return 0 on success, -1 on failure.
*/
int evbuffer_cb_set_flags(struct evbuffer *buffer,
struct evbuffer_cb_entry *cb, ev_uint32_t flags);
/** Change the flags that are set for a callback on a buffer by removing some
@param buffer the evbuffer that the callback is watching.
@param cb the callback whose status we want to change.
@param flags EVBUFFER_CB_ENABLED to disable the callback.
@return 0 on success, -1 on failure.
*/
int evbuffer_cb_clear_flags(struct evbuffer *buffer,
struct evbuffer_cb_entry *cb, ev_uint32_t flags);
#if 0
/** Postpone calling a given callback until unsuspend is called later.
This is different from disabling the callback, since the callback will get
invoked later if the buffer size changes between now and when we unsuspend
it.
@param the buffer that the callback is watching.
@param cb the callback we want to suspend.
*/
void evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb);
/** Stop postponing a callback that we postponed with evbuffer_cb_suspend.
If data was added to or removed from the buffer while the callback was
suspended, the callback will get called once now.
@param the buffer that the callback is watching.
@param cb the callback we want to stop suspending.
*/
void evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb);
#endif
/**
Makes the data at the begging of an evbuffer contiguous.
@param buf the evbuffer to make contiguous
@param size the number of bytes to make contiguous, or -1 to make the
entire buffer contiguous.
@return a pointer to the contiguous memory array
*/
unsigned char *evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size);
/**
Prepends data to the beginning of the evbuffer
@param buf the evbuffer to which to prepend data
@param data a pointer to the memory to prepend
@param size the number of bytes to prepend
@return 0 if successful, or -1 otherwise
*/
int evbuffer_prepend(struct evbuffer *buf, const void *data, size_t size);
/**
Prepends all data from the src evbuffer to the beginning of the dst
evbuffer.
@param dst the evbuffer to which to prepend data
@param src the evbuffer to prepend; it will be emptied as a result
@return 0 if successful, or -1 otherwise
*/
int evbuffer_prepend_buffer(struct evbuffer *dst, struct evbuffer* src);
/**
Prevent calls that modify an evbuffer from succeeding. A buffer may
frozen at the front, at the back, or at both the front and the back.
If the front of a buffer is frozen, operations that drain data from
the front of the buffer, or that prepend data to the buffer, will
fail until it is unfrozen. If the back a buffer is frozen, operations
that append data from the buffer will fail until it is unfrozen.
@param buf The buffer to freeze
@param at_front If true, we freeze the front of the buffer. If false,
we freeze the back.
@return 0 on success, -1 on failure.
*/
int evbuffer_freeze(struct evbuffer *buf, int at_front);
/**
Re-enable calls that modify an evbuffer.
@param buf The buffer to un-freeze
@param at_front If true, we unfreeze the front of the buffer. If false,
we unfreeze the back.
@return 0 on success, -1 on failure.
*/
int evbuffer_unfreeze(struct evbuffer *buf, int at_front);
struct event_base;
/**
Force all the callbacks on an evbuffer to be run, not immediately after
the evbuffer is altered, but instead from inside the event loop.
This can be used to serialize all the callbacks to a single thread
of execution.
*/
int evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base);
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_BUFFER_H_ */

Просмотреть файл

@ -1,110 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_BUFFER_COMPAT_H_
#define _EVENT2_BUFFER_COMPAT_H_
/** @file buffer_compat.h
Obsolete and deprecated versions of the functions in buffer.h: provided
only for backward compatibility.
*/
/**
Obsolete alias for evbuffer_readln(buffer, NULL, EOL_STYLE_ANY).
@deprecated This function is deprecated because its behavior is not correct
for almost any protocol, and also because it's wholly subsumed by
evbuffer_readln().
@param buffer the evbuffer to read from
@return pointer to a single line, or NULL if an error occurred
*/
char *evbuffer_readline(struct evbuffer *buffer);
/** Type definition for a callback that is invoked whenever data is added or
removed from an evbuffer.
An evbuffer may have one or more callbacks set at a time. The order
in which they are executed is undefined.
A callback function may add more callbacks, or remove itself from the
list of callbacks, or add or remove data from the buffer. It may not
remove another callback from the list.
If a callback adds or removes data from the buffer or from another
buffer, this can cause a recursive invocation of your callback or
other callbacks. If you ask for an infinite loop, you might just get
one: watch out!
@param buffer the buffer whose size has changed
@param old_len the previous length of the buffer
@param new_len the current length of the buffer
@param arg a pointer to user data
*/
typedef void (*evbuffer_cb)(struct evbuffer *buffer, size_t old_len, size_t new_len, void *arg);
/**
Replace all callbacks on an evbuffer with a single new callback, or
remove them.
Subsequent calls to evbuffer_setcb() replace callbacks set by previous
calls. Setting the callback to NULL removes any previously set callback.
@deprecated This function is deprecated because it clears all previous
callbacks set on the evbuffer, which can cause confusing behavior if
multiple parts of the code all want to add their own callbacks on a
buffer. Instead, use evbuffer_add(), evbuffer_del(), and
evbuffer_setflags() to manage your own evbuffer callbacks without
interfering with callbacks set by others.
@param buffer the evbuffer to be monitored
@param cb the callback function to invoke when the evbuffer is modified,
or NULL to remove all callbacks.
@param cbarg an argument to be provided to the callback function
*/
void evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg);
/**
Find a string within an evbuffer.
@param buffer the evbuffer to be searched
@param what the string to be searched for
@param len the length of the search string
@return a pointer to the beginning of the search string, or NULL if the search failed.
*/
unsigned char *evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len);
/** deprecated in favor of calling the functions directly */
#define EVBUFFER_LENGTH(x) evbuffer_get_length(x)
/** deprecated in favor of calling the functions directly */
#define EVBUFFER_DATA(x) evbuffer_pullup((x), -1)
#endif

Просмотреть файл

@ -1,747 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_BUFFEREVENT_H_
#define _EVENT2_BUFFEREVENT_H_
/** @file bufferevent.h
Functions for buffering data for network sending or receiving. Bufferevents
are higher level than evbuffers: each has an underlying evbuffer for reading
and one for writing, and callbacks that are invoked under certain
circumstances.
Libevent provides an abstraction on top of the regular event callbacks.
This abstraction is called a buffered event. A buffered event provides
input and output buffers that get filled and drained automatically. The
user of a buffered event no longer deals directly with the I/O, but
instead is reading from input and writing to output buffers.
Once initialized, the bufferevent structure can be used repeatedly with
bufferevent_enable() and bufferevent_disable().
When read enabled the bufferevent will try to read from the file descriptor
and call the read callback. The write callback is executed whenever the
output buffer is drained below the write low watermark, which is 0 by
default.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
/* For int types. */
#include <event2/util.h>
/* Just for error reporting - use other constants otherwise */
#define BEV_EVENT_READING 0x01 /**< error encountered while reading */
#define BEV_EVENT_WRITING 0x02 /**< error encountered while writing */
#define BEV_EVENT_EOF 0x10 /**< eof file reached */
#define BEV_EVENT_ERROR 0x20 /**< unrecoverable error encountered */
#define BEV_EVENT_TIMEOUT 0x40 /**< user specified timeout reached */
#define BEV_EVENT_CONNECTED 0x80 /**< connect operation finished. */
struct bufferevent;
struct event_base;
struct evbuffer;
struct sockaddr;
/**
type definition for the read or write callback.
The read callback is triggered when new data arrives in the input
buffer and the amount of readable data exceed the low watermark
which is 0 by default.
The write callback is triggered if the write buffer has been
exhausted or fell below its low watermark.
@param bev the bufferevent that triggered the callback
@param ctx the user specified context for this bufferevent
*/
typedef void (*bufferevent_data_cb)(struct bufferevent *bev, void *ctx);
/**
type definition for the error callback of a bufferevent.
The error callback is triggered if either an EOF condition or another
unrecoverable error was encountered.
@param bev the bufferevent for which the error condition was reached
@param what a conjunction of flags: BEV_EVENT_READING or BEV_EVENT_WRITING
to indicate if the error was encountered on the read or write path,
and one of the following flags: BEV_EVENT_EOF, BEV_EVENT_ERROR,
BEV_EVENT_TIMEOUT, BEV_EVENT_CONNECTED.
@param ctx the user specified context for this bufferevent
*/
typedef void (*bufferevent_event_cb)(struct bufferevent *bev, short what, void *ctx);
/** Options that can be specified when creating a bufferevent */
enum bufferevent_options {
/** If set, we close the underlying file
* descriptor/bufferevent/whatever when this bufferevent is freed. */
BEV_OPT_CLOSE_ON_FREE = (1<<0),
/** If set, and threading is enabled, operations on this bufferevent
* are protected by a lock */
BEV_OPT_THREADSAFE = (1<<1),
/** If set, callbacks are run deferred in the event loop. */
BEV_OPT_DEFER_CALLBACKS = (1<<2),
/** If set, callbacks are executed without locks being held on the
* bufferevent. This option currently requires that
* BEV_OPT_DEFER_CALLBACKS also be set; a future version of Libevent
* might remove the requirement.*/
BEV_OPT_UNLOCK_CALLBACKS = (1<<3)
};
/**
Create a new socket bufferevent over an existing socket.
@param base the event base to associate with the new bufferevent.
@param fd the file descriptor from which data is read and written to.
This file descriptor is not allowed to be a pipe(2).
It is safe to set the fd to -1, so long as you later
set it with bufferevent_setfd or bufferevent_socket_connect().
@return a pointer to a newly allocated bufferevent struct, or NULL if an
error occurred
@see bufferevent_free()
*/
struct bufferevent *bufferevent_socket_new(struct event_base *base, evutil_socket_t fd, int options);
/**
Launch a connect() attempt with a socket. When the connect succeeds,
the eventcb will be invoked with BEV_EVENT_CONNECTED set.
If the bufferevent does not already have a socket set, we allocate a new
socket here and make it nonblocking before we begin.
If no address is provided, we assume that the socket is already connecting,
and configure the bufferevent so that a BEV_EVENT_CONNECTED event will be
yielded when it is done connecting.
@param bufev an existing bufferevent allocated with
bufferevent_socket_new().
@param addr the address we should connect to
@param socklen The length of the address
@return 0 on success, -1 on failure.
*/
int bufferevent_socket_connect(struct bufferevent *, struct sockaddr *, int);
struct evdns_base;
/**
Resolve the hostname 'hostname' and connect to it as with
bufferevent_socket_connect().
@param bufev An existing bufferevent allocated with bufferevent_socket_new()
@param evdns_base Optionally, an evdns_base to use for resolving hostnames
asynchronously. May be set to NULL for a blocking resolve.
@param family A preferred address family to resolve addresses to, or
AF_UNSPEC for no preference. Only AF_INET, AF_INET6, and AF_UNSPEC are
supported.
@param hostname The hostname to resolve; see below for notes on recognized
formats
@param port The port to connect to on the resolved address.
@return 0 if successful, -1 on failure.
Recognized hostname formats are:
www.example.com (hostname)
1.2.3.4 (ipv4address)
::1 (ipv6address)
[::1] ([ipv6address])
Performance note: If you do not provide an evdns_base, this function
may block while it waits for a DNS response. This is probably not
what you want.
*/
int bufferevent_socket_connect_hostname(struct bufferevent *b,
struct evdns_base *, int, const char *, int);
/**
Return the error code for the last failed DNS lookup attempt made by
bufferevent_socket_connect_hostname().
@param bev The bufferevent object.
@return DNS error code.
@see evutil_gai_strerror()
*/
int bufferevent_socket_get_dns_error(struct bufferevent *bev);
/**
Assign a bufferevent to a specific event_base.
@param base an event_base returned by event_init()
@param bufev a bufferevent struct returned by bufferevent_new()
@return 0 if successful, or -1 if an error occurred
@see bufferevent_new()
*/
int bufferevent_base_set(struct event_base *base, struct bufferevent *bufev);
/**
Assign a priority to a bufferevent.
@param bufev a bufferevent struct
@param pri the priority to be assigned
@return 0 if successful, or -1 if an error occurred
*/
int bufferevent_priority_set(struct bufferevent *bufev, int pri);
/**
Deallocate the storage associated with a bufferevent structure.
@param bufev the bufferevent structure to be freed.
*/
void bufferevent_free(struct bufferevent *bufev);
/**
Changes the callbacks for a bufferevent.
@param bufev the bufferevent object for which to change callbacks
@param readcb callback to invoke when there is data to be read, or NULL if
no callback is desired
@param writecb callback to invoke when the file descriptor is ready for
writing, or NULL if no callback is desired
@param eventcb callback to invoke when there is an event on the file
descriptor
@param cbarg an argument that will be supplied to each of the callbacks
(readcb, writecb, and errorcb)
@see bufferevent_new()
*/
void bufferevent_setcb(struct bufferevent *bufev,
bufferevent_data_cb readcb, bufferevent_data_cb writecb,
bufferevent_event_cb eventcb, void *cbarg);
/**
Changes the file descriptor on which the bufferevent operates.
Not supported for all bufferevent types.
@param bufev the bufferevent object for which to change the file descriptor
@param fd the file descriptor to operate on
*/
int bufferevent_setfd(struct bufferevent *bufev, evutil_socket_t fd);
/**
Returns the file descriptor associated with a bufferevent, or -1 if
no file descriptor is associated with the bufferevent.
*/
evutil_socket_t bufferevent_getfd(struct bufferevent *bufev);
/**
Returns the underlying bufferevent associated with a bufferevent (if
the bufferevent is a wrapper), or NULL if there is no underlying bufferevent.
*/
struct bufferevent *bufferevent_get_underlying(struct bufferevent *bufev);
/**
Write data to a bufferevent buffer.
The bufferevent_write() function can be used to write data to the file
descriptor. The data is appended to the output buffer and written to the
descriptor automatically as it becomes available for writing.
@param bufev the bufferevent to be written to
@param data a pointer to the data to be written
@param size the length of the data, in bytes
@return 0 if successful, or -1 if an error occurred
@see bufferevent_write_buffer()
*/
int bufferevent_write(struct bufferevent *bufev,
const void *data, size_t size);
/**
Write data from an evbuffer to a bufferevent buffer. The evbuffer is
being drained as a result.
@param bufev the bufferevent to be written to
@param buf the evbuffer to be written
@return 0 if successful, or -1 if an error occurred
@see bufferevent_write()
*/
int bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf);
/**
Read data from a bufferevent buffer.
The bufferevent_read() function is used to read data from the input buffer.
@param bufev the bufferevent to be read from
@param data pointer to a buffer that will store the data
@param size the size of the data buffer, in bytes
@return the amount of data read, in bytes.
*/
size_t bufferevent_read(struct bufferevent *bufev, void *data, size_t size);
/**
Read data from a bufferevent buffer into an evbuffer. This avoids
memory copies.
@param bufev the bufferevent to be read from
@param buf the evbuffer to which to add data
@return 0 if successful, or -1 if an error occurred.
*/
int bufferevent_read_buffer(struct bufferevent *bufev, struct evbuffer *buf);
/**
Returns the input buffer.
The user MUST NOT set the callback on this buffer.
@param bufev the bufferevent from which to get the evbuffer
@return the evbuffer object for the input buffer
*/
struct evbuffer *bufferevent_get_input(struct bufferevent *bufev);
/**
Returns the output buffer.
The user MUST NOT set the callback on this buffer.
When filters are being used, the filters need to be manually
triggered if the output buffer was manipulated.
@param bufev the bufferevent from which to get the evbuffer
@return the evbuffer object for the output buffer
*/
struct evbuffer *bufferevent_get_output(struct bufferevent *bufev);
/**
Enable a bufferevent.
@param bufev the bufferevent to be enabled
@param event any combination of EV_READ | EV_WRITE.
@return 0 if successful, or -1 if an error occurred
@see bufferevent_disable()
*/
int bufferevent_enable(struct bufferevent *bufev, short event);
/**
Disable a bufferevent.
@param bufev the bufferevent to be disabled
@param event any combination of EV_READ | EV_WRITE.
@return 0 if successful, or -1 if an error occurred
@see bufferevent_enable()
*/
int bufferevent_disable(struct bufferevent *bufev, short event);
/**
Return the events that are enabled on a given bufferevent.
@param bufev the bufferevent to inspect
@return A combination of EV_READ | EV_WRITE
*/
short bufferevent_get_enabled(struct bufferevent *bufev);
/**
Set the read and write timeout for a buffered event.
A bufferevent's timeout will fire the first time that the indicated
amount of time has elapsed since a successful read or write operation,
during which the bufferevent was trying to read or write.
(In other words, if reading or writing is disabled, or if the
bufferevent's read or write operation has been suspended because
there's no data to write, or not enough banwidth, or so on, the
timeout isn't active. The timeout only becomes active when we we're
willing to actually read or write.)
Calling bufferevent_enable or setting a timeout for a bufferevent
whose timeout is already pending resets its timeout.
If the timeout elapses, the corresponding operation (EV_READ or
EV_WRITE) becomes disabled until you re-enable it again. The
bufferevent's event callback is called with the
BEV_EVENT_TIMEOUT|BEV_EVENT_READING or
BEV_EVENT_TIMEOUT|BEV_EVENT_WRITING.
@param bufev the bufferevent to be modified
@param timeout_read the read timeout, or NULL
@param timeout_write the write timeout, or NULL
*/
int bufferevent_set_timeouts(struct bufferevent *bufev,
const struct timeval *timeout_read, const struct timeval *timeout_write);
/**
Sets the watermarks for read and write events.
On input, a bufferevent does not invoke the user read callback unless
there is at least low watermark data in the buffer. If the read buffer
is beyond the high watermark, the bufferevent stops reading from the network.
On output, the user write callback is invoked whenever the buffered data
falls below the low watermark. Filters that write to this bufev will try
not to write more bytes to this buffer than the high watermark would allow,
except when flushing.
@param bufev the bufferevent to be modified
@param events EV_READ, EV_WRITE or both
@param lowmark the lower watermark to set
@param highmark the high watermark to set
*/
void bufferevent_setwatermark(struct bufferevent *bufev, short events,
size_t lowmark, size_t highmark);
/**
Acquire the lock on a bufferevent. Has no effect if locking was not
enabled with BEV_OPT_THREADSAFE.
*/
void bufferevent_lock(struct bufferevent *bufev);
/**
Release the lock on a bufferevent. Has no effect if locking was not
enabled with BEV_OPT_THREADSAFE.
*/
void bufferevent_unlock(struct bufferevent *bufev);
/**
Flags that can be passed into filters to let them know how to
deal with the incoming data.
*/
enum bufferevent_flush_mode {
/** usually set when processing data */
BEV_NORMAL = 0,
/** want to checkpoint all data sent. */
BEV_FLUSH = 1,
/** encountered EOF on read or done sending data */
BEV_FINISHED = 2
};
/**
Triggers the bufferevent to produce more
data if possible.
@param bufev the bufferevent object
@param iotype either EV_READ or EV_WRITE or both.
@param mode either BEV_NORMAL or BEV_FLUSH or BEV_FINISHED
@return -1 on failure, 0 if no data was produces, 1 if data was produced
*/
int bufferevent_flush(struct bufferevent *bufev,
short iotype,
enum bufferevent_flush_mode mode);
/**
Support for filtering input and output of bufferevents.
*/
/**
Values that filters can return.
*/
enum bufferevent_filter_result {
/** everything is okay */
BEV_OK = 0,
/** the filter needs to read more data before output */
BEV_NEED_MORE = 1,
/** the filter encountered a critical error, no further data
can be processed. */
BEV_ERROR = 2
};
/** A callback function to implement a filter for a bufferevent.
@param src An evbuffer to drain data from.
@param dst An evbuffer to add data to.
@param limit A suggested upper bound of bytes to write to dst.
The filter may ignore this value, but doing so means that
it will overflow the high-water mark associated with dst.
-1 means "no limit".
@param mode Whether we should write data as may be convenient
(BEV_NORMAL), or flush as much data as we can (BEV_FLUSH),
or flush as much as we can, possibly including an end-of-stream
marker (BEV_FINISH).
@param ctx A user-supplied pointer.
@return BEV_OK if we wrote some data; BEV_NEED_MORE if we can't
produce any more output until we get some input; and BEV_ERROR
on an error.
*/
typedef enum bufferevent_filter_result (*bufferevent_filter_cb)(
struct evbuffer *src, struct evbuffer *dst, ev_ssize_t dst_limit,
enum bufferevent_flush_mode mode, void *ctx);
/**
Allocate a new filtering bufferevent on top of an existing bufferevent.
@param underlying the underlying bufferevent.
@param input_filter The filter to apply to data we read from the underlying
bufferevent
@param output_filter The filer to apply to data we write to the underlying
bufferevent
@param options A bitfield of bufferevent options.
@param free_context A function to use to free the filter context when
this bufferevent is freed.
@param ctx A context pointer to pass to the filter functions.
*/
struct bufferevent *
bufferevent_filter_new(struct bufferevent *underlying,
bufferevent_filter_cb input_filter,
bufferevent_filter_cb output_filter,
int options,
void (*free_context)(void *),
void *ctx);
/**
Allocate a pair of linked bufferevents. The bufferevents behave as would
two bufferevent_sock instances connected to opposite ends of a
socketpair(), except that no internal socketpair is allocated.
@param base The event base to associate with the socketpair.
@param options A set of options for this bufferevent
@param pair A pointer to an array to hold the two new bufferevent objects.
@return 0 on success, -1 on failure.
*/
int bufferevent_pair_new(struct event_base *base, int options,
struct bufferevent *pair[2]);
/**
Given one bufferevent returned by bufferevent_pair_new(), returns the
other one if it still exists. Otherwise returns NULL.
*/
struct bufferevent *bufferevent_pair_get_partner(struct bufferevent *bev);
/**
Abstract type used to configure rate-limiting on a bufferevent or a group
of bufferevents.
*/
struct ev_token_bucket_cfg;
/**
A group of bufferevents which are configured to respect the same rate
limit.
*/
struct bufferevent_rate_limit_group;
/**
Initialize and return a new object to configure the rate-limiting behavior
of bufferevents.
@param read_rate The maximum number of bytes to read per tick on
average.
@param read_burst The maximum number of bytes to read in any single tick.
@param write_rate The maximum number of bytes to write per tick on
average.
@param write_burst The maximum number of bytes to write in any single tick.
@param tick_len The length of a single tick. Defaults to one second.
Any fractions of a millisecond are ignored.
Note that all rate-limits hare are currently best-effort: future versions
of Libevent may implement them more tightly.
*/
struct ev_token_bucket_cfg *ev_token_bucket_cfg_new(
ev_uint32_t read_rate, ev_uint32_t read_burst,
ev_uint32_t write_rate, ev_uint32_t write_burst,
const struct timeval *tick_len);
/** Free all storage held in 'cfg'.
Note: 'cfg' is not currently reference-counted; it is not safe to free it
until no bufferevent is using it.
*/
void ev_token_bucket_cfg_free(struct ev_token_bucket_cfg *cfg);
/**
Set the rate-limit of a the bufferevent 'bev' to the one specified in
'cfg'. If 'cfg' is NULL, disable any per-bufferevent rate-limiting on
'bev'.
Note that only some bufferevent types currently respect rate-limiting.
They are: socket-based bufferevents (normal and IOCP-based), and SSL-based
bufferevents.
Return 0 on sucess, -1 on failure.
*/
int bufferevent_set_rate_limit(struct bufferevent *bev,
struct ev_token_bucket_cfg *cfg);
/**
Create a new rate-limit group for bufferevents. A rate-limit group
constrains the maximum number of bytes sent and received, in toto,
by all of its bufferevents.
@param base An event_base to run any necessary timeouts for the group.
Note that all bufferevents in the group do not necessarily need to share
this event_base.
@param cfg The rate-limit for this group.
Note that all rate-limits hare are currently best-effort: future versions
of Libevent may implement them more tightly.
Note also that only some bufferevent types currently respect rate-limiting.
They are: socket-based bufferevents (normal and IOCP-based), and SSL-based
bufferevents.
*/
struct bufferevent_rate_limit_group *bufferevent_rate_limit_group_new(
struct event_base *base,
const struct ev_token_bucket_cfg *cfg);
/**
Change the rate-limiting settings for a given rate-limiting group.
Return 0 on success, -1 on failure.
*/
int bufferevent_rate_limit_group_set_cfg(
struct bufferevent_rate_limit_group *,
const struct ev_token_bucket_cfg *);
/**
Change the smallest quantum we're willing to allocate to any single
bufferevent in a group for reading or writing at a time.
The rationale is that, because of TCP/IP protocol overheads and kernel
behavior, if a rate-limiting group is so tight on bandwidth that you're
only willing to send 1 byte per tick per bufferevent, you might instead
want to batch up the reads and writes so that you send N bytes per
1/N of the bufferevents (chosen at random) each tick, so you still wind
up send 1 byte per tick per bufferevent on average, but you don't send
so many tiny packets.
The default min-share is currently 64 bytes.
Returns 0 on success, -1 on faulre.
*/
int bufferevent_rate_limit_group_set_min_share(
struct bufferevent_rate_limit_group *, size_t);
/**
Free a rate-limiting group. The group must have no members when
this function is called.
*/
void bufferevent_rate_limit_group_free(struct bufferevent_rate_limit_group *);
/**
Add 'bev' to the list of bufferevents whose aggregate reading and writing
is restricted by 'g'. If 'g' is NULL, remove 'bev' from its current group.
A bufferevent may belong to no more than one rate-limit group at a time.
If 'bev' is already a member of a group, it will be removed from its old
group before being added to 'g'.
Return 0 on success and -1 on failure.
*/
int bufferevent_add_to_rate_limit_group(struct bufferevent *bev,
struct bufferevent_rate_limit_group *g);
/** Remove 'bev' from its current rate-limit group (if any). */
int bufferevent_remove_from_rate_limit_group(struct bufferevent *bev);
/*@{*/
/**
Return the current read or write bucket size for a bufferevent.
If it is not configured with a per-bufferevent ratelimit, return
EV_SSIZE_MAX. This function does not inspect the group limit, if any.
Note that it can return a negative value if the bufferevent has been
made to read or write more than its limit.
*/
ev_ssize_t bufferevent_get_read_limit(struct bufferevent *bev);
ev_ssize_t bufferevent_get_write_limit(struct bufferevent *bev);
/*@}*/
ev_ssize_t bufferevent_get_max_to_read(struct bufferevent *bev);
ev_ssize_t bufferevent_get_max_to_write(struct bufferevent *bev);
/*@{*/
/**
Return the read or write bucket size for a bufferevent rate limit
group. Note that it can return a negative value if bufferevents in
the group have been made to read or write more than their limits.
*/
ev_ssize_t bufferevent_rate_limit_group_get_read_limit(
struct bufferevent_rate_limit_group *);
ev_ssize_t bufferevent_rate_limit_group_get_write_limit(
struct bufferevent_rate_limit_group *);
/*@}*/
/*@{*/
/**
Subtract a number of bytes from a bufferevent's read or write bucket.
The decrement value can be negative, if you want to manually refill
the bucket. If the change puts the bucket above or below zero, the
bufferevent will resume or suspend reading writing as appropriate.
These functions make no change in the buckets for the bufferevent's
group, if any.
Returns 0 on success, -1 on internal error.
*/
int bufferevent_decrement_read_limit(struct bufferevent *bev, ev_ssize_t decr);
int bufferevent_decrement_write_limit(struct bufferevent *bev, ev_ssize_t decr);
/*@}*/
/*@{*/
/**
Subtract a number of bytes from a bufferevent rate-limiting group's
read or write bucket. The decrement value can be negative, if you
want to manually refill the bucket. If the change puts the bucket
above or below zero, the bufferevents in the group will resume or
suspend reading writing as appropriate.
Returns 0 on success, -1 on internal error.
*/
int bufferevent_rate_limit_group_decrement_read(
struct bufferevent_rate_limit_group *, ev_ssize_t);
int bufferevent_rate_limit_group_decrement_write(
struct bufferevent_rate_limit_group *, ev_ssize_t);
/*@}*/
/** Set the variable pointed to by total_read_out to the total number of bytes
* ever read on grp, and the variable pointed to by total_written_out to the
* total number of bytes ever written on grp. */
void bufferevent_rate_limit_group_get_totals(
struct bufferevent_rate_limit_group *grp,
ev_uint64_t *total_read_out, ev_uint64_t *total_written_out);
/** Reset the number of bytes read or written on grp as given by
* bufferevent_rate_limit_group_reset_totals(). */
void
bufferevent_rate_limit_group_reset_totals(
struct bufferevent_rate_limit_group *grp);
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_BUFFEREVENT_H_ */

Просмотреть файл

@ -1,96 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos, Nick Mathewson
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_BUFFEREVENT_COMPAT_H_
#define _EVENT2_BUFFEREVENT_COMPAT_H_
#define evbuffercb bufferevent_data_cb
#define everrorcb bufferevent_event_cb
/**
Create a new bufferevent for an fd.
This function is deprecated. Use bufferevent_socket_new and
bufferevent_set_callbacks instead.
Libevent provides an abstraction on top of the regular event callbacks.
This abstraction is called a buffered event. A buffered event provides
input and output buffers that get filled and drained automatically. The
user of a buffered event no longer deals directly with the I/O, but
instead is reading from input and writing to output buffers.
Once initialized, the bufferevent structure can be used repeatedly with
bufferevent_enable() and bufferevent_disable().
When read enabled the bufferevent will try to read from the file descriptor
and call the read callback. The write callback is executed whenever the
output buffer is drained below the write low watermark, which is 0 by
default.
If multiple bases are in use, bufferevent_base_set() must be called before
enabling the bufferevent for the first time.
@param fd the file descriptor from which data is read and written to.
This file descriptor is not allowed to be a pipe(2).
@param readcb callback to invoke when there is data to be read, or NULL if
no callback is desired
@param writecb callback to invoke when the file descriptor is ready for
writing, or NULL if no callback is desired
@param errorcb callback to invoke when there is an error on the file
descriptor
@param cbarg an argument that will be supplied to each of the callbacks
(readcb, writecb, and errorcb)
@return a pointer to a newly allocated bufferevent struct, or NULL if an
error occurred
@see bufferevent_base_set(), bufferevent_free()
*/
struct bufferevent *bufferevent_new(evutil_socket_t fd,
evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg);
/**
Set the read and write timeout for a buffered event.
@param bufev the bufferevent to be modified
@param timeout_read the read timeout
@param timeout_write the write timeout
*/
void bufferevent_settimeout(struct bufferevent *bufev,
int timeout_read, int timeout_write);
#define EVBUFFER_READ BEV_EVENT_READING
#define EVBUFFER_WRITE BEV_EVENT_WRITING
#define EVBUFFER_EOF BEV_EVENT_EOF
#define EVBUFFER_ERROR BEV_EVENT_ERROR
#define EVBUFFER_TIMEOUT BEV_EVENT_TIMEOUT
/** macro for getting access to the input buffer of a bufferevent */
#define EVBUFFER_INPUT(x) bufferevent_get_input(x)
/** macro for getting access to the output buffer of a bufferevent */
#define EVBUFFER_OUTPUT(x) bufferevent_get_output(x)
#endif

Просмотреть файл

@ -1,78 +0,0 @@
/*
* Copyright (c) 2009-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_BUFFEREVENT_SSL_H_
#define _EVENT2_BUFFEREVENT_SSL_H_
/** @file bufferevent_ssl.h
OpenSSL support for bufferevents.
*/
#include <event2/event-config.h>
#include <event2/bufferevent.h>
#include <event2/util.h>
#ifdef __cplusplus
extern "C" {
#endif
struct ssl_st;
enum bufferevent_ssl_state {
BUFFEREVENT_SSL_OPEN = 0,
BUFFEREVENT_SSL_CONNECTING = 1,
BUFFEREVENT_SSL_ACCEPTING = 2
};
#ifdef _EVENT_HAVE_OPENSSL
struct bufferevent *
bufferevent_openssl_filter_new(struct event_base *base,
struct bufferevent *underlying,
struct ssl_st *ssl,
enum bufferevent_ssl_state state,
int options);
struct bufferevent *
bufferevent_openssl_socket_new(struct event_base *base,
evutil_socket_t fd,
struct ssl_st *ssl,
enum bufferevent_ssl_state state,
int options);
struct ssl_st *
bufferevent_openssl_get_ssl(struct bufferevent *bufev);
int bufferevent_ssl_renegotiate(struct bufferevent *bev);
unsigned long bufferevent_get_openssl_error(struct bufferevent *bev);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_BUFFEREVENT_SSL_H_ */

Просмотреть файл

@ -1,113 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_BUFFEREVENT_STRUCT_H_
#define _EVENT2_BUFFEREVENT_STRUCT_H_
/** @file bufferevent_struct.h
Data structures for bufferevents. Using these structures may hurt forward
compatibility with later versions of Libevent: be careful!
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
/* For int types. */
#include <event2/util.h>
/* For struct event */
#include <event2/event_struct.h>
struct event_watermark {
size_t low;
size_t high;
};
/**
Shared implementation of a bufferevent.
This type is exposed only because it was exposed in previous versions,
and some people's code may rely on manipulating it. Otherwise, you
should really not rely on the layout, size, or contents of this structure:
it is fairly volatile, and WILL change in future versions of the code.
**/
struct bufferevent {
/** Event base for which this bufferevent was created. */
struct event_base *ev_base;
/** Pointer to a table of function pointers to set up how this
bufferevent behaves. */
const struct bufferevent_ops *be_ops;
/** A read event that triggers when a timeout has happened or a socket
is ready to read data. Only used by some subtypes of
bufferevent. */
struct event ev_read;
/** A write event that triggers when a timeout has happened or a socket
is ready to write data. Only used by some subtypes of
bufferevent. */
struct event ev_write;
/** An input buffer. Only the bufferevent is allowed to add data to
this buffer, though the user is allowed to drain it. */
struct evbuffer *input;
/** An input buffer. Only the bufferevent is allowed to drain data
from this buffer, though the user is allowed to add it. */
struct evbuffer *output;
struct event_watermark wm_read;
struct event_watermark wm_write;
bufferevent_data_cb readcb;
bufferevent_data_cb writecb;
/* This should be called 'eventcb', but renaming it would break
* backward compatibility */
bufferevent_event_cb errorcb;
void *cbarg;
struct timeval timeout_read;
struct timeval timeout_write;
/** Events that are currently enabled: currently EV_READ and EV_WRITE
are supported. */
short enabled;
};
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_BUFFEREVENT_STRUCT_H_ */

Просмотреть файл

@ -1,639 +0,0 @@
/*
* Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* The original DNS code is due to Adam Langley with heavy
* modifications by Nick Mathewson. Adam put his DNS software in the
* public domain. You can find his original copyright below. Please,
* aware that the code as part of Libevent is governed by the 3-clause
* BSD license above.
*
* This software is Public Domain. To view a copy of the public domain dedication,
* visit http://creativecommons.org/licenses/publicdomain/ or send a letter to
* Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
*
* I ask and expect, but do not require, that all derivative works contain an
* attribution similar to:
* Parts developed by Adam Langley <agl@imperialviolet.org>
*
* You may wish to replace the word "Parts" with something else depending on
* the amount of original code.
*
* (Derivative works does not include programs which link against, run or include
* the source verbatim in their source distributions)
*/
/** @file dns.h
*
* Welcome, gentle reader
*
* Async DNS lookups are really a whole lot harder than they should be,
* mostly stemming from the fact that the libc resolver has never been
* very good at them. Before you use this library you should see if libc
* can do the job for you with the modern async call getaddrinfo_a
* (see http://www.imperialviolet.org/page25.html#e498). Otherwise,
* please continue.
*
* The library keeps track of the state of nameservers and will avoid
* them when they go down. Otherwise it will round robin between them.
*
* Quick start guide:
* #include "evdns.h"
* void callback(int result, char type, int count, int ttl,
* void *addresses, void *arg);
* evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
* evdns_resolve("www.hostname.com", 0, callback, NULL);
*
* When the lookup is complete the callback function is called. The
* first argument will be one of the DNS_ERR_* defines in evdns.h.
* Hopefully it will be DNS_ERR_NONE, in which case type will be
* DNS_IPv4_A, count will be the number of IP addresses, ttl is the time
* which the data can be cached for (in seconds), addresses will point
* to an array of uint32_t's and arg will be whatever you passed to
* evdns_resolve.
*
* Searching:
*
* In order for this library to be a good replacement for glibc's resolver it
* supports searching. This involves setting a list of default domains, in
* which names will be queried for. The number of dots in the query name
* determines the order in which this list is used.
*
* Searching appears to be a single lookup from the point of view of the API,
* although many DNS queries may be generated from a single call to
* evdns_resolve. Searching can also drastically slow down the resolution
* of names.
*
* To disable searching:
* 1. Never set it up. If you never call evdns_resolv_conf_parse or
* evdns_search_add then no searching will occur.
*
* 2. If you do call evdns_resolv_conf_parse then don't pass
* DNS_OPTION_SEARCH (or DNS_OPTIONS_ALL, which implies it).
*
* 3. When calling evdns_resolve, pass the DNS_QUERY_NO_SEARCH flag.
*
* The order of searches depends on the number of dots in the name. If the
* number is greater than the ndots setting then the names is first tried
* globally. Otherwise each search domain is appended in turn.
*
* The ndots setting can either be set from a resolv.conf, or by calling
* evdns_search_ndots_set.
*
* For example, with ndots set to 1 (the default) and a search domain list of
* ["myhome.net"]:
* Query: www
* Order: www.myhome.net, www.
*
* Query: www.abc
* Order: www.abc., www.abc.myhome.net
*
* Internals:
*
* Requests are kept in two queues. The first is the inflight queue. In
* this queue requests have an allocated transaction id and nameserver.
* They will soon be transmitted if they haven't already been.
*
* The second is the waiting queue. The size of the inflight ring is
* limited and all other requests wait in waiting queue for space. This
* bounds the number of concurrent requests so that we don't flood the
* nameserver. Several algorithms require a full walk of the inflight
* queue and so bounding its size keeps thing going nicely under huge
* (many thousands of requests) loads.
*
* If a nameserver loses too many requests it is considered down and we
* try not to use it. After a while we send a probe to that nameserver
* (a lookup for google.com) and, if it replies, we consider it working
* again. If the nameserver fails a probe we wait longer to try again
* with the next probe.
*/
#ifndef _EVENT2_DNS_H_
#define _EVENT2_DNS_H_
#ifdef __cplusplus
extern "C" {
#endif
/* For integer types. */
#include <event2/util.h>
/** Error codes 0-5 are as described in RFC 1035. */
#define DNS_ERR_NONE 0
/** The name server was unable to interpret the query */
#define DNS_ERR_FORMAT 1
/** The name server was unable to process this query due to a problem with the
* name server */
#define DNS_ERR_SERVERFAILED 2
/** The domain name does not exist */
#define DNS_ERR_NOTEXIST 3
/** The name server does not support the requested kind of query */
#define DNS_ERR_NOTIMPL 4
/** The name server refuses to reform the specified operation for policy
* reasons */
#define DNS_ERR_REFUSED 5
/** The reply was truncated or ill-formatted */
#define DNS_ERR_TRUNCATED 65
/** An unknown error occurred */
#define DNS_ERR_UNKNOWN 66
/** Communication with the server timed out */
#define DNS_ERR_TIMEOUT 67
/** The request was canceled because the DNS subsystem was shut down. */
#define DNS_ERR_SHUTDOWN 68
/** The request was canceled via a call to evdns_cancel_request */
#define DNS_ERR_CANCEL 69
#define DNS_IPv4_A 1
#define DNS_PTR 2
#define DNS_IPv6_AAAA 3
#define DNS_QUERY_NO_SEARCH 1
#define DNS_OPTION_SEARCH 1
#define DNS_OPTION_NAMESERVERS 2
#define DNS_OPTION_MISC 4
#define DNS_OPTION_HOSTSFILE 8
#define DNS_OPTIONS_ALL 15
/* Obsolete name for DNS_QUERY_NO_SEARCH */
#define DNS_NO_SEARCH DNS_QUERY_NO_SEARCH
/**
* The callback that contains the results from a lookup.
* - result is one of the DNS_ERR_* values (DNS_ERR_NONE for success)
* - type is either DNS_IPv4_A or DNS_PTR or DNS_IPv6_AAAA
* - count contains the number of addresses of form type
* - ttl is the number of seconds the resolution may be cached for.
* - addresses needs to be cast according to type. It will be an array of
* 4-byte sequences for ipv4, or an array of 16-byte sequences for ipv6,
* or a nul-terminated string for PTR.
*/
typedef void (*evdns_callback_type) (int result, char type, int count, int ttl, void *addresses, void *arg);
struct evdns_base;
struct event_base;
/**
Initialize the asynchronous DNS library.
This function initializes support for non-blocking name resolution by
calling evdns_resolv_conf_parse() on UNIX and
evdns_config_windows_nameservers() on Windows.
@param event_base the event base to associate the dns client with
@param initialize_nameservers 1 if resolve.conf processing should occur
@return 0 if successful, or -1 if an error occurred
@see evdns_base_free()
*/
struct evdns_base * evdns_base_new(struct event_base *event_base, int initialize_nameservers);
/**
Shut down the asynchronous DNS resolver and terminate all active requests.
If the 'fail_requests' option is enabled, all active requests will return
an empty result with the error flag set to DNS_ERR_SHUTDOWN. Otherwise,
the requests will be silently discarded.
@param evdns_base the evdns base to free
@param fail_requests if zero, active requests will be aborted; if non-zero,
active requests will return DNS_ERR_SHUTDOWN.
@see evdns_base_new()
*/
void evdns_base_free(struct evdns_base *base, int fail_requests);
/**
Convert a DNS error code to a string.
@param err the DNS error code
@return a string containing an explanation of the error code
*/
const char *evdns_err_to_string(int err);
/**
Add a nameserver.
The address should be an IPv4 address in network byte order.
The type of address is chosen so that it matches in_addr.s_addr.
@param base the evdns_base to which to add the name server
@param address an IP address in network byte order
@return 0 if successful, or -1 if an error occurred
@see evdns_base_nameserver_ip_add()
*/
int evdns_base_nameserver_add(struct evdns_base *base,
unsigned long int address);
/**
Get the number of configured nameservers.
This returns the number of configured nameservers (not necessarily the
number of running nameservers). This is useful for double-checking
whether our calls to the various nameserver configuration functions
have been successful.
@param base the evdns_base to which to apply this operation
@return the number of configured nameservers
@see evdns_base_nameserver_add()
*/
int evdns_base_count_nameservers(struct evdns_base *base);
/**
Remove all configured nameservers, and suspend all pending resolves.
Resolves will not necessarily be re-attempted until evdns_resume() is called.
@param base the evdns_base to which to apply this operation
@return 0 if successful, or -1 if an error occurred
@see evdns_base_resume()
*/
int evdns_base_clear_nameservers_and_suspend(struct evdns_base *base);
/**
Resume normal operation and continue any suspended resolve requests.
Re-attempt resolves left in limbo after an earlier call to
evdns_clear_nameservers_and_suspend().
@param base the evdns_base to which to apply this operation
@return 0 if successful, or -1 if an error occurred
@see evdns_base_clear_nameservers_and_suspend()
*/
int evdns_base_resume(struct evdns_base *base);
/**
Add a nameserver by string address.
This function parses a n IPv4 or IPv6 address from a string and adds it as a
nameserver. It supports the following formats:
- [IPv6Address]:port
- [IPv6Address]
- IPv6Address
- IPv4Address:port
- IPv4Address
If no port is specified, it defaults to 53.
@param base the evdns_base to which to apply this operation
@return 0 if successful, or -1 if an error occurred
@see evdns_base_nameserver_add()
*/
int evdns_base_nameserver_ip_add(struct evdns_base *base,
const char *ip_as_string);
/**
Add a nameserver by sockaddr.
**/
int
evdns_base_nameserver_sockaddr_add(struct evdns_base *base,
const struct sockaddr *sa, ev_socklen_t len, unsigned flags);
struct evdns_request;
/**
Lookup an A record for a given name.
@param base the evdns_base to which to apply this operation
@param name a DNS hostname
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return an evdns_request object if successful, or NULL if an error occurred.
@see evdns_resolve_ipv6(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6(), evdns_cancel_request()
*/
struct evdns_request *evdns_base_resolve_ipv4(struct evdns_base *base, const char *name, int flags, evdns_callback_type callback, void *ptr);
/**
Lookup an AAAA record for a given name.
@param base the evdns_base to which to apply this operation
@param name a DNS hostname
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return an evdns_request object if successful, or NULL if an error occurred.
@see evdns_resolve_ipv4(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6(), evdns_cancel_request()
*/
struct evdns_request *evdns_base_resolve_ipv6(struct evdns_base *base, const char *name, int flags, evdns_callback_type callback, void *ptr);
struct in_addr;
struct in6_addr;
/**
Lookup a PTR record for a given IP address.
@param base the evdns_base to which to apply this operation
@param in an IPv4 address
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return an evdns_request object if successful, or NULL if an error occurred.
@see evdns_resolve_reverse_ipv6(), evdns_cancel_request()
*/
struct evdns_request *evdns_base_resolve_reverse(struct evdns_base *base, const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr);
/**
Lookup a PTR record for a given IPv6 address.
@param base the evdns_base to which to apply this operation
@param in an IPv6 address
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return an evdns_request object if successful, or NULL if an error occurred.
@see evdns_resolve_reverse_ipv6(), evdns_cancel_request()
*/
struct evdns_request *evdns_base_resolve_reverse_ipv6(struct evdns_base *base, const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr);
/**
Cancels a pending DNS resolution request.
@param base the evdns_base that was used to make the request
@param req the evdns_request that was returned by calling a resolve function
@see evdns_base_resolve_ip4(), evdns_base_resolve_ipv6, evdns_base_resolve_reverse
*/
void evdns_cancel_request(struct evdns_base *base, struct evdns_request *req);
/**
Set the value of a configuration option.
The currently available configuration options are:
ndots, timeout, max-timeouts, max-inflight, attempts, randomize-case,
bind-to, initial-probe-timeout, getaddrinfo-allow-skew.
In versions before Libevent 2.0.3-alpha, the option name needed to end with
a colon.
@param base the evdns_base to which to apply this operation
@param option the name of the configuration option to be modified
@param val the value to be set
@return 0 if successful, or -1 if an error occurred
*/
int evdns_base_set_option(struct evdns_base *base, const char *option, const char *val);
/**
Parse a resolv.conf file.
The 'flags' parameter determines what information is parsed from the
resolv.conf file. See the man page for resolv.conf for the format of this
file.
The following directives are not parsed from the file: sortlist, rotate,
no-check-names, inet6, debug.
If this function encounters an error, the possible return values are: 1 =
failed to open file, 2 = failed to stat file, 3 = file too large, 4 = out of
memory, 5 = short read from file, 6 = no nameservers listed in the file
@param base the evdns_base to which to apply this operation
@param flags any of DNS_OPTION_NAMESERVERS|DNS_OPTION_SEARCH|DNS_OPTION_MISC|
DNS_OPTIONS_HOSTSFILE|DNS_OPTIONS_ALL
@param filename the path to the resolv.conf file
@return 0 if successful, or various positive error codes if an error
occurred (see above)
@see resolv.conf(3), evdns_config_windows_nameservers()
*/
int evdns_base_resolv_conf_parse(struct evdns_base *base, int flags, const char *const filename);
/**
Load an /etc/hosts-style file from 'hosts_fname' into 'base'.
If hosts_fname is NULL, add minimal entries for localhost, and nothing
else.
Note that only evdns_getaddrinfo uses the /etc/hosts entries.
Return 0 on success, negative on failure.
*/
int evdns_base_load_hosts(struct evdns_base *base, const char *hosts_fname);
/**
Obtain nameserver information using the Windows API.
Attempt to configure a set of nameservers based on platform settings on
a win32 host. Preferentially tries to use GetNetworkParams; if that fails,
looks in the registry.
@return 0 if successful, or -1 if an error occurred
@see evdns_resolv_conf_parse()
*/
#ifdef WIN32
int evdns_base_config_windows_nameservers(struct evdns_base *);
#define EVDNS_BASE_CONFIG_WINDOWS_NAMESERVERS_IMPLEMENTED
#endif
/**
Clear the list of search domains.
*/
void evdns_base_search_clear(struct evdns_base *base);
/**
Add a domain to the list of search domains
@param domain the domain to be added to the search list
*/
void evdns_base_search_add(struct evdns_base *base, const char *domain);
/**
Set the 'ndots' parameter for searches.
Sets the number of dots which, when found in a name, causes
the first query to be without any search domain.
@param ndots the new ndots parameter
*/
void evdns_base_search_ndots_set(struct evdns_base *base, const int ndots);
/**
A callback that is invoked when a log message is generated
@param is_warning indicates if the log message is a 'warning'
@param msg the content of the log message
*/
typedef void (*evdns_debug_log_fn_type)(int is_warning, const char *msg);
/**
Set the callback function to handle DNS log messages. If this
callback is not set, evdns log messages are handled with the regular
Libevent logging system.
@param fn the callback to be invoked when a log message is generated
*/
void evdns_set_log_fn(evdns_debug_log_fn_type fn);
/**
Set a callback that will be invoked to generate transaction IDs. By
default, we pick transaction IDs based on the current clock time, which
is bad for security.
@param fn the new callback, or NULL to use the default.
NOTE: This function has no effect in Libevent 2.0.4-alpha and later,
since Libevent now provides its own secure RNG.
*/
void evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void));
/**
Set a callback used to generate random bytes. By default, we use
the same function as passed to evdns_set_transaction_id_fn to generate
bytes two at a time. If a function is provided here, it's also used
to generate transaction IDs.
NOTE: This function has no effect in Libevent 2.0.4-alpha and later,
since Libevent now provides its own secure RNG.
*/
void evdns_set_random_bytes_fn(void (*fn)(char *, size_t));
/*
* Functions used to implement a DNS server.
*/
struct evdns_server_request;
struct evdns_server_question;
/**
A callback to implement a DNS server. The callback function receives a DNS
request. It should then optionally add a number of answers to the reply
using the evdns_server_request_add_*_reply functions, before calling either
evdns_server_request_respond to send the reply back, or
evdns_server_request_drop to decline to answer the request.
@param req A newly received request
@param user_data A pointer that was passed to
evdns_add_server_port_with_base().
*/
typedef void (*evdns_request_callback_fn_type)(struct evdns_server_request *, void *);
#define EVDNS_ANSWER_SECTION 0
#define EVDNS_AUTHORITY_SECTION 1
#define EVDNS_ADDITIONAL_SECTION 2
#define EVDNS_TYPE_A 1
#define EVDNS_TYPE_NS 2
#define EVDNS_TYPE_CNAME 5
#define EVDNS_TYPE_SOA 6
#define EVDNS_TYPE_PTR 12
#define EVDNS_TYPE_MX 15
#define EVDNS_TYPE_TXT 16
#define EVDNS_TYPE_AAAA 28
#define EVDNS_QTYPE_AXFR 252
#define EVDNS_QTYPE_ALL 255
#define EVDNS_CLASS_INET 1
/* flags that can be set in answers; as part of the err parameter */
#define EVDNS_FLAGS_AA 0x400
#define EVDNS_FLAGS_RD 0x080
/** Create a new DNS server port.
@param base The event base to handle events for the server port.
@param socket A UDP socket to accept DNS requests.
@param flags Always 0 for now.
@param callback A function to invoke whenever we get a DNS request
on the socket.
@param user_data Data to pass to the callback.
@return an evdns_server_port structure for this server port.
*/
struct evdns_server_port *evdns_add_server_port_with_base(struct event_base *base, evutil_socket_t socket, int flags, evdns_request_callback_fn_type callback, void *user_data);
/** Close down a DNS server port, and free associated structures. */
void evdns_close_server_port(struct evdns_server_port *port);
/** Sets some flags in a reply we're building.
Allows setting of the AA or RD flags
*/
void evdns_server_request_set_flags(struct evdns_server_request *req, int flags);
/* Functions to add an answer to an in-progress DNS reply.
*/
int evdns_server_request_add_reply(struct evdns_server_request *req, int section, const char *name, int type, int dns_class, int ttl, int datalen, int is_name, const char *data);
int evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl);
int evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, const void *addrs, int ttl);
int evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl);
int evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl);
/**
Send back a response to a DNS request, and free the request structure.
*/
int evdns_server_request_respond(struct evdns_server_request *req, int err);
/**
Free a DNS request without sending back a reply.
*/
int evdns_server_request_drop(struct evdns_server_request *req);
struct sockaddr;
/**
Get the address that made a DNS request.
*/
int evdns_server_request_get_requesting_addr(struct evdns_server_request *_req, struct sockaddr *sa, int addr_len);
/** Callback for evdns_getaddrinfo. */
typedef void (*evdns_getaddrinfo_cb)(int result, struct evutil_addrinfo *res, void *arg);
struct evdns_base;
struct evdns_getaddrinfo_request;
/** Make a non-blocking getaddrinfo request using the dns_base in 'dns_base'.
*
* If we can answer the request immediately (with an error or not!), then we
* invoke cb immediately and return NULL. Otherwise we return
* an evdns_getaddrinfo_request and invoke cb later.
*
* When the callback is invoked, we pass as its first argument the error code
* that getaddrinfo would return (or 0 for no error). As its second argument,
* we pass the evutil_addrinfo structures we found (or NULL on error). We
* pass 'arg' as the third argument.
*
* Limitations:
*
* - The AI_V4MAPPED and AI_ALL flags are not currently implemented.
* - For ai_socktype, we only handle SOCKTYPE_STREAM, SOCKTYPE_UDP, and 0.
* - For ai_protocol, we only handle IPPROTO_TCP, IPPROTO_UDP, and 0.
*/
struct evdns_getaddrinfo_request *evdns_getaddrinfo(
struct evdns_base *dns_base,
const char *nodename, const char *servname,
const struct evutil_addrinfo *hints_in,
evdns_getaddrinfo_cb cb, void *arg);
/* Cancel an in-progress evdns_getaddrinfo. This MUST NOT be called after the
* getaddrinfo's callback has been invoked. The resolves will be cancelled,
* and the callback will be invoked with the error EVUTIL_EAI_CANCEL. */
void evdns_getaddrinfo_cancel(struct evdns_getaddrinfo_request *req);
#ifdef __cplusplus
}
#endif
#endif /* !_EVENT2_DNS_H_ */

Просмотреть файл

@ -1,335 +0,0 @@
/*
* Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_DNS_COMPAT_H_
#define _EVENT2_DNS_COMPAT_H_
/** @file dns_compat.h
Potentially non-threadsafe versions of the functions in dns.h: provided
only for backwards compatibility.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
/* For int types. */
#include <event2/util.h>
/**
Initialize the asynchronous DNS library.
This function initializes support for non-blocking name resolution by
calling evdns_resolv_conf_parse() on UNIX and
evdns_config_windows_nameservers() on Windows.
@deprecated This function is deprecated because it always uses the current
event base, and is easily confused by multiple calls to event_init(), and
so is not safe for multithreaded use. Additionally, it allocates a global
structure that only one thread can use. The replacement is
evdns_base_new().
@return 0 if successful, or -1 if an error occurred
@see evdns_shutdown()
*/
int evdns_init(void);
struct evdns_base;
/**
Return the global evdns_base created by event_init() and used by the other
deprecated functions.
@deprecated This function is deprecated because use of the global
evdns_base is error-prone.
*/
struct evdns_base *evdns_get_global_base(void);
/**
Shut down the asynchronous DNS resolver and terminate all active requests.
If the 'fail_requests' option is enabled, all active requests will return
an empty result with the error flag set to DNS_ERR_SHUTDOWN. Otherwise,
the requests will be silently discarded.
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_shutdown().
@param fail_requests if zero, active requests will be aborted; if non-zero,
active requests will return DNS_ERR_SHUTDOWN.
@see evdns_init()
*/
void evdns_shutdown(int fail_requests);
/**
Add a nameserver.
The address should be an IPv4 address in network byte order.
The type of address is chosen so that it matches in_addr.s_addr.
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_nameserver_add().
@param address an IP address in network byte order
@return 0 if successful, or -1 if an error occurred
@see evdns_nameserver_ip_add()
*/
int evdns_nameserver_add(unsigned long int address);
/**
Get the number of configured nameservers.
This returns the number of configured nameservers (not necessarily the
number of running nameservers). This is useful for double-checking
whether our calls to the various nameserver configuration functions
have been successful.
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_count_nameservers().
@return the number of configured nameservers
@see evdns_nameserver_add()
*/
int evdns_count_nameservers(void);
/**
Remove all configured nameservers, and suspend all pending resolves.
Resolves will not necessarily be re-attempted until evdns_resume() is called.
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_clear_nameservers_and_suspend().
@return 0 if successful, or -1 if an error occurred
@see evdns_resume()
*/
int evdns_clear_nameservers_and_suspend(void);
/**
Resume normal operation and continue any suspended resolve requests.
Re-attempt resolves left in limbo after an earlier call to
evdns_clear_nameservers_and_suspend().
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_resume().
@return 0 if successful, or -1 if an error occurred
@see evdns_clear_nameservers_and_suspend()
*/
int evdns_resume(void);
/**
Add a nameserver.
This wraps the evdns_nameserver_add() function by parsing a string as an IP
address and adds it as a nameserver.
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_nameserver_ip_add().
@return 0 if successful, or -1 if an error occurred
@see evdns_nameserver_add()
*/
int evdns_nameserver_ip_add(const char *ip_as_string);
/**
Lookup an A record for a given name.
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_resolve_ipv4().
@param name a DNS hostname
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_ipv6(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_ipv4(const char *name, int flags, evdns_callback_type callback, void *ptr);
/**
Lookup an AAAA record for a given name.
@param name a DNS hostname
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_ipv4(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_ipv6(const char *name, int flags, evdns_callback_type callback, void *ptr);
struct in_addr;
struct in6_addr;
/**
Lookup a PTR record for a given IP address.
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_resolve_reverse().
@param in an IPv4 address
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_reverse(const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr);
/**
Lookup a PTR record for a given IPv6 address.
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_resolve_reverse_ipv6().
@param in an IPv6 address
@param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
@param callback a callback function to invoke when the request is completed
@param ptr an argument to pass to the callback function
@return 0 if successful, or -1 if an error occurred
@see evdns_resolve_reverse_ipv6()
*/
int evdns_resolve_reverse_ipv6(const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr);
/**
Set the value of a configuration option.
The currently available configuration options are:
ndots, timeout, max-timeouts, max-inflight, and attempts
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_set_option().
@param option the name of the configuration option to be modified
@param val the value to be set
@param flags Ignored.
@return 0 if successful, or -1 if an error occurred
*/
int evdns_set_option(const char *option, const char *val, int flags);
/**
Parse a resolv.conf file.
The 'flags' parameter determines what information is parsed from the
resolv.conf file. See the man page for resolv.conf for the format of this
file.
The following directives are not parsed from the file: sortlist, rotate,
no-check-names, inet6, debug.
If this function encounters an error, the possible return values are: 1 =
failed to open file, 2 = failed to stat file, 3 = file too large, 4 = out of
memory, 5 = short read from file, 6 = no nameservers listed in the file
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_resolv_conf_parse().
@param flags any of DNS_OPTION_NAMESERVERS|DNS_OPTION_SEARCH|DNS_OPTION_MISC|
DNS_OPTIONS_ALL
@param filename the path to the resolv.conf file
@return 0 if successful, or various positive error codes if an error
occurred (see above)
@see resolv.conf(3), evdns_config_windows_nameservers()
*/
int evdns_resolv_conf_parse(int flags, const char *const filename);
/**
Clear the list of search domains.
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_search_clear().
*/
void evdns_search_clear(void);
/**
Add a domain to the list of search domains
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_search_add().
@param domain the domain to be added to the search list
*/
void evdns_search_add(const char *domain);
/**
Set the 'ndots' parameter for searches.
Sets the number of dots which, when found in a name, causes
the first query to be without any search domain.
@deprecated This function is deprecated because it does not allow the
caller to specify which evdns_base it applies to. The recommended
function is evdns_base_search_ndots_set().
@param ndots the new ndots parameter
*/
void evdns_search_ndots_set(const int ndots);
/**
As evdns_server_new_with_base.
@deprecated This function is deprecated because it does not allow the
caller to specify which even_base it uses. The recommended
function is evdns_add_server_port_with_base().
*/
struct evdns_server_port *evdns_add_server_port(evutil_socket_t socket, int flags, evdns_request_callback_fn_type callback, void *user_data);
#ifdef WIN32
int evdns_config_windows_nameservers(void);
#define EVDNS_CONFIG_WINDOWS_NAMESERVERS_IMPLEMENTED
#endif
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_EVENT_COMPAT_H_ */

Просмотреть файл

@ -1,80 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_DNS_STRUCT_H_
#define _EVENT2_DNS_STRUCT_H_
/** @file dns_struct.h
Data structures for dns. Using these structures may hurt forward
compatibility with later versions of Libevent: be careful!
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
/* For int types. */
#include <event2/util.h>
/*
* Structures used to implement a DNS server.
*/
struct evdns_server_request {
int flags;
int nquestions;
struct evdns_server_question **questions;
};
struct evdns_server_question {
int type;
#ifdef __cplusplus
int dns_question_class;
#else
/* You should refer to this field as "dns_question_class". The
* name "class" works in C for backward compatibility, and will be
* removed in a future version. (1.5 or later). */
int class;
#define dns_question_class class
#endif
char name[1];
};
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_DNS_STRUCT_H_ */

Просмотреть файл

@ -1,753 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_EVENT_H_
#define _EVENT2_EVENT_H_
/** @file event2/event.h
Core functions for waiting for and receiving events, and using event bases.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "opal_rename.h"
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <stdio.h>
/* For int types. */
#include <event2/util.h>
struct event_base;
struct event;
struct event_config;
/** Enable some relatively expensive debugging checks in Libevent that would
* normally be turned off. Generally, these cause code that would otherwise
* crash mysteriously to fail earlier with an assertion failure. Note that
* this method MUST be called before any events or event_bases have been
* created.
*
* Debug mode can currently catch the following errors:
* An event is re-assigned while it is added
* Any function is called on a non-assigned event
*
* Note that debugging mode uses memory to track every event that has been
* initialized (via event_assign, event_set, or event_new) but not yet
* released (via event_free or event_debug_unassign). If you want to use
* debug mode, and you find yourself running out of memory, you will need
* to use event_debug_unassign to explicitly stop tracking events that
* are no longer considered set-up.
*/
void event_enable_debug_mode(void);
/**** OMPI CHANGE ****/
void event_set_debug_output(int output);
/**
* When debugging mode is enabled, informs Libevent that an event should no
* longer be considered as assigned. When debugging mode is not enabled, does
* nothing.
*
* This function must only be called on a non-added event.
*/
void event_debug_unassign(struct event *);
/**
Initialize the event API.
Use event_base_new() to initialize a new event base.
@see event_base_set(), event_base_free(),
event_base_new_with_config()
*/
struct event_base *event_base_new(void);
/**
Reinitialized the event base after a fork
Some event mechanisms do not survive across fork. The event base needs
to be reinitialized with the event_reinit() function.
@param base the event base that needs to be re-initialized
@return 0 if successful, or -1 if some events could not be re-added.
@see event_base_new(), event_init()
*/
int event_reinit(struct event_base *base);
/**
Threadsafe event dispatching loop.
@param eb the event_base structure returned by event_init()
@see event_init(), event_dispatch()
*/
int event_base_dispatch(struct event_base *);
/**
Get the kernel event notification mechanism used by Libevent.
@param eb the event_base structure returned by event_base_new()
@return a string identifying the kernel event mechanism (kqueue, epoll, etc.)
*/
const char *event_base_get_method(const struct event_base *);
/**
Gets all event notification mechanisms supported by Libevent.
This functions returns the event mechanism in order preferred by
Libevent. Note that this list will include all backends that
Libevent has compiled-in support for, and will not necessarily check
your OS to see whether it has the required resources.
@return an array with pointers to the names of support methods.
The end of the array is indicated by a NULL pointer. If an
error is encountered NULL is returned.
*/
const char **event_get_supported_methods(void);
/**
Allocates a new event configuration object.
The event configuration object can be used to change the behavior of
an event base.
@return an event_config object that can be used to store configuration or
NULL when an error is encountered.
*/
struct event_config *event_config_new(void);
/**
Deallocates all memory associated with an event configuration object
@param cfg the event configuration object to be freed.
*/
void event_config_free(struct event_config *cfg);
/**
Enters an event method that should be avoided into the configuration.
This can be used to avoid event mechanisms that do not support certain
file descriptor types. An application can make use of multiple event
bases to accommodate incompatible file descriptor types.
@param cfg the event configuration object
@param method the event method to avoid
@return 0 on success, -1 on failure.
*/
int event_config_avoid_method(struct event_config *cfg, const char *method);
enum event_method_feature {
/* Require an event method that allows edge-triggered events with EV_ET. */
EV_FEATURE_ET = 0x01,
/* Require an event method where having one event triggered among
* many is [approximately] an O(1) operation. This excludes (for
* example) select and poll, which are approximately O(N) for N
* equal to the total number of possible events. */
EV_FEATURE_O1 = 0x02,
/* Require an event method that allows file descriptors as well as
* sockets. */
EV_FEATURE_FDS = 0x04
};
enum event_base_config_flag {
/** Do not allocate a lock for the event base, even if we have
locking set up. */
EVENT_BASE_FLAG_NOLOCK = 0x01,
/** Do not check the EVENT_NO* environment variables when picking
an event_base. */
EVENT_BASE_FLAG_IGNORE_ENV = 0x02,
/** Windows only: enable the IOCP dispatcher at startup */
EVENT_BASE_FLAG_STARTUP_IOCP = 0x04,
/** Instead of checking the current time every time the event loop is
ready to run timeout callbacks, check after each timeout callback.
*/
EVENT_BASE_FLAG_NO_CACHE_TIME = 0x08
};
/**
Return a bitmask of the features implemented by an event base.
*/
int event_base_get_features(const struct event_base *base);
/**
Enters a required event method feature that the application demands.
Note that not every feature or combination of features is supported
on every platform. Code that requests features should be prepared
to handle the case where event_base_new_with_config() returns NULL, as in:
<pre>
event_config_require_features(cfg, EV_FEATURE_ET);
base = event_base_new_with_config(cfg);
if (base == NULL) {
// We can't get edge-triggered behavior here.
event_config_require_features(cfg, 0);
base = event_base_new_with_config(cfg);
}
</pre>
@param cfg the event configuration object
@param feature a bitfield of one or more event_method_feature values.
Replaces values from previous calls to this function.
@return 0 on success, -1 on failure.
*/
int event_config_require_features(struct event_config *cfg, int feature);
/** Sets one or more flags to configure what parts of the eventual event_base
* will be initialized, and how they'll work. */
int event_config_set_flag(struct event_config *cfg, int flag);
/**
* Records a hint for the number of CPUs in the system. This is used for
* tuning thread pools, etc, for optimal performance.
*
* @param cfg the event configuration object
* @param cpus the number of cpus
* @return 0 on success, -1 on failure.
*/
int event_config_set_num_cpus_hint(struct event_config *cfg, int cpus);
/**
Initialize the event API.
Use event_base_new_with_config() to initialize a new event base, taking
the specified configuration under consideration. The configuration object
can currently be used to avoid certain event notification mechanisms.
@param cfg the event configuration object
@return an initialized event_base that can be used to registering events,
or NULL if no event base can be created with the requested event_config.
@see event_base_new(), event_base_free(), event_init(), event_assign()
*/
struct event_base *event_base_new_with_config(const struct event_config *);
/**
Deallocate all memory associated with an event_base, and free the base.
Note that this function will not close any fds or free any memory passed
to event_set as the argument to callback.
@param eb an event_base to be freed
*/
void event_base_free(struct event_base *);
#define _EVENT_LOG_DEBUG 0
#define _EVENT_LOG_MSG 1
#define _EVENT_LOG_WARN 2
#define _EVENT_LOG_ERR 3
typedef void (*event_log_cb)(int severity, const char *msg);
/**
Redirect Libevent's log messages.
@param cb a function taking two arguments: an integer severity between
_EVENT_LOG_DEBUG and _EVENT_LOG_ERR, and a string. If cb is NULL,
then the default log is used.
*/
void event_set_log_callback(event_log_cb cb);
/**
Override Libevent's behavior in the event of a fatal internal error.
By default, Libevent will call exit(1) if a programming error makes it
impossible to continue correct operation. This function allows you to supply
another callback instead. Note that if the function is ever invoked,
something is wrong with your program, or with Libevent: any subsequent calls
to Libevent may result in undefined behavior.
Libevent will (almost) always log an _EVENT_LOG_ERR message before calling
this function; look at the last log message to see why Libevent has died.
*/
typedef void (*event_fatal_cb)(int err);
void event_set_fatal_callback(event_fatal_cb cb);
/**
Associate a different event base with an event.
@param eb the event base
@param ev the event
*/
int event_base_set(struct event_base *, struct event *);
/**
event_loop() flags
*/
/*@{*/
#define EVLOOP_ONCE 0x01 /**< Block at most once. */
#define EVLOOP_NONBLOCK 0x02 /**< Do not block. */
/*@}*/
/**
Handle events (threadsafe version).
This is a more flexible version of event_base_dispatch().
@param eb the event_base structure returned by event_init()
@param flags any combination of EVLOOP_ONCE | EVLOOP_NONBLOCK
@return 0 if successful, -1 if an error occurred, or 1 if no events were
registered.
@see event_loopexit(), event_base_loop()
*/
int event_base_loop(struct event_base *, int);
/**
Exit the event loop after the specified time (threadsafe variant).
The next event_base_loop() iteration after the given timer expires will
complete normally (handling all queued events) then exit without
blocking for events again.
Subsequent invocations of event_base_loop() will proceed normally.
@param eb the event_base structure returned by event_init()
@param tv the amount of time after which the loop should terminate.
@return 0 if successful, or -1 if an error occurred
@see event_loopexit()
*/
int event_base_loopexit(struct event_base *, const struct timeval *);
/**
Abort the active event_base_loop() immediately.
event_base_loop() will abort the loop after the next event is completed;
event_base_loopbreak() is typically invoked from this event's callback.
This behavior is analogous to the "break;" statement.
Subsequent invocations of event_loop() will proceed normally.
@param eb the event_base structure returned by event_init()
@return 0 if successful, or -1 if an error occurred
@see event_base_loopexit
*/
int event_base_loopbreak(struct event_base *);
/**
Checks if the event loop was told to exit by event_loopexit().
This function will return true for an event_base at every point after
event_loopexit() is called, until the event loop is next entered.
@param eb the event_base structure returned by event_init()
@return true if event_base_loopexit() was called on this event base,
or 0 otherwise
@see event_base_loopexit
@see event_base_got_break
*/
int event_base_got_exit(struct event_base *);
/**
Checks if the event loop was told to abort immediately by event_loopbreak().
This function will return true for an event_base at every point after
event_loopbreak() is called, until the event loop is next entered.
@param eb the event_base structure returned by event_init()
@return true if event_base_loopbreak() was called on this event base,
or 0 otherwise
@see event_base_loopbreak
@see event_base_got_exit
*/
int event_base_got_break(struct event_base *);
/* Flags to pass to event_set(), event_new(), event_assign(),
* event_pending(), and anything else with an argument of the form
* "short events" */
#define EV_TIMEOUT 0x01
#define EV_READ 0x02
#define EV_WRITE 0x04
#define EV_SIGNAL 0x08
/** Persistent event: won't get removed automatically when activated. */
#define EV_PERSIST 0x10
/** Select edge-triggered behavior, if supported by the backend. */
#define EV_ET 0x20
/**
Define a timer event.
@param ev event struct to be modified
@param b an event_base
@param cb callback function
@param arg argument that will be passed to the callback function
*/
#define evtimer_assign(ev, b, cb, arg) \
event_assign((ev), (b), -1, 0, (cb), (arg))
#define evtimer_new(b, cb, arg) event_new((b), -1, 0, (cb), (arg))
/**
Add a timer event.
@param ev the event struct
@param tv timeval struct
*/
#define evtimer_add(ev, tv) event_add((ev), (tv))
/**
* Delete a timer event.
*
* @param ev the event struct to be disabled
*/
#define evtimer_del(ev) event_del(ev)
#define evtimer_pending(ev, tv) event_pending((ev), EV_TIMEOUT, (tv))
#define evtimer_initialized(ev) _event_initialized((ev), 0)
#define evsignal_add(ev, tv) event_add((ev), (tv))
#define evsignal_assign(ev, b, x, cb, arg) \
event_assign((ev), (b), (x), EV_SIGNAL|EV_PERSIST, cb, (arg))
#define evsignal_new(b, x, cb, arg) \
event_new((b), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg))
#define evsignal_del(ev) event_del(ev)
#define evsignal_pending(ev, tv) event_pending((ev), EV_SIGNAL, (tv))
#define evsignal_initialized(ev) _event_initialized((ev), 0)
typedef void (*event_callback_fn)(evutil_socket_t, short, void *);
/**
Prepare an event structure to be added.
The function event_assign() prepares the event structure ev to be used in
future calls to event_add() and event_del(). The event will be prepared to
call the function specified by the fn argument with an int argument
indicating the file descriptor, a short argument indicating the type of
event, and a void * argument given in the arg argument. The fd indicates
the file descriptor that should be monitored for events. The events can be
either EV_READ, EV_WRITE, or both. Indicating that an application can read
or write from the file descriptor respectively without blocking.
The function fn will be called with the file descriptor that triggered the
event and the type of event which will be either EV_TIMEOUT, EV_SIGNAL,
EV_READ, or EV_WRITE. The additional flag EV_PERSIST makes an event_add()
persistent until event_del() has been called.
Note that using event_assign() request that you have already allocated the
event struct. Doing so will often require your code to depend on the size
of the structure, and will create possible incompatibility with future
versions of Libevent. If this seems like a bad idea to you, use event_new()
and event_free() instead.
@param ev an event struct to be modified
@param base the event base to which ev should be attached.
@param fd the file descriptor to be monitored
@param event desired events to monitor; can be EV_READ and/or EV_WRITE
@param fn callback function to be invoked when the event occurs
@param arg an argument to be passed to the callback function
@return 0 if success, or -1 on invalid arguments.
@see event_add(), event_del(), event_once()
*/
int event_assign(struct event *, struct event_base *, evutil_socket_t, short, event_callback_fn, void *);
/**
Create and allocate a new event structure, ready to be added.
Arguments are as for event_assign; returns a newly allocated struct event *
that must later be deallocated with event_free().
*/
struct event *event_new(struct event_base *, evutil_socket_t, short, event_callback_fn, void *);
/**
Deallocate a struct event * returned by event_new().
*/
void event_free(struct event *);
/**
Schedule a one-time event
The function event_base_once() is similar to event_set(). However, it
schedules a callback to be called exactly once and does not require the
caller to prepare an event structure.
@param base an event_base returned by event_init()
@param fd a file descriptor to monitor
@param events event(s) to monitor; can be any of EV_TIMEOUT | EV_READ |
EV_WRITE
@param callback callback function to be invoked when the event occurs
@param arg an argument to be passed to the callback function
@param timeout the maximum amount of time to wait for the event, or NULL
to wait forever
@return 0 if successful, or -1 if an error occurred
@see event_once()
*/
int event_base_once(struct event_base *, evutil_socket_t, short, event_callback_fn, void *, const struct timeval *);
/**
Add an event to the set of monitored events.
The function event_add() schedules the execution of the ev event when the
event specified in event_set() occurs or in at least the time specified in
the tv. If tv is NULL, no timeout occurs and the function will only be
called if a matching event occurs on the file descriptor. The event in the
ev argument must be already initialized by event_set() and may not be used
in calls to event_set() until it has timed out or been removed with
event_del(). If the event in the ev argument already has a scheduled
timeout, the old timeout will be replaced by the new one.
@param ev an event struct initialized via event_set()
@param timeout the maximum amount of time to wait for the event, or NULL
to wait forever
@return 0 if successful, or -1 if an error occurred
@see event_del(), event_set()
*/
int event_add(struct event *, const struct timeval *);
/**
Remove an event from the set of monitored events.
The function event_del() will cancel the event in the argument ev. If the
event has already executed or has never been added the call will have no
effect.
@param ev an event struct to be removed from the working set
@return 0 if successful, or -1 if an error occurred
@see event_add()
*/
int event_del(struct event *);
/**
Make an event active.
@param ev an event to make active.
@param res a set of flags to pass to the event's callback.
@param ncalls
**/
void event_active(struct event *, int, short);
/**
Checks if a specific event is pending or scheduled.
@param ev an event struct previously passed to event_add()
@param what the requested event type; any of EV_TIMEOUT|EV_READ|
EV_WRITE|EV_SIGNAL
@param tv if this field is not NULL, and the event has a timeout,
this field is set to hold the time at which the timeout will
expire.
@return true if the event is pending on any of the events in 'what', (that
is to say, it has been added), or 0 if the event is not added.
*/
int event_pending(const struct event *, short, struct timeval *);
/**
Test if an event structure might be initialized.
The event_initialized() macro can be used to check if an event has been
initialized.
Warning: This macro is deprecated because it does not perform a reliable
test: While it can tell a zeroed-out piece of memory from an initialized
event, it can easily be confused by uninitialized memory.
@param ev an event structure to be tested
@return 1 if the structure might be initialized, or 0 if it has not been
initialized
*/
#define event_initialized(ev) _event_initialized((ev), 1)
int _event_initialized(const struct event *, int check_fd);
/**
Get the signal number assigned to an event.
*/
#define event_get_signal(ev) ((int)event_get_fd(ev))
/**
Get the socket assigned to an event.
*/
evutil_socket_t event_get_fd(const struct event *ev);
/**
Get the event_base assigned to an event.
*/
struct event_base *event_get_base(const struct event *ev);
/**
Return the events (EV_READ, EV_WRITE, etc) assigned to an event.
*/
short event_get_events(const struct event *ev);
/**
Return the callback assigned to an event.
*/
event_callback_fn event_get_callback(const struct event *ev);
/**
Return the callback argument assigned to an event.
*/
void *event_get_callback_arg(const struct event *ev);
void event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out);
/**
Return the size of struct event that the Libevent library was compiled
with.
This will be NO GREATER than sizeof(struct event) if you're running with
the same version of Libevent that your application was built with, but
otherwise might not.
Note that it might be SMALLER than sizeof(struct event) if some future
version of Libevent adds extra padding to the end of struct event.
We might do this to help ensure ABI-compatibility between different
versions of Libevent.
*/
size_t event_get_struct_event_size(void);
/**
Get the Libevent version.
Note that this will give you the version of the library that you're
currently linked against, not the version of the headers that you've
compiled against.
@return a string containing the version number of Libevent
*/
const char *event_get_version(void);
/**
Return a numeric representation of Libevent's version.
Note that this will give you the version of the library that you're
currently linked against, not the version of the headers you've used to
compile.
The format uses one byte each for the major, minor, and patchlevel parts of
the version number. The low-order byte is unused. For example, version
2.0.1-alpha has a numeric representation of 0x02000100
*/
ev_uint32_t event_get_version_number(void);
/** As event_get_version, but gives the version of Libevent's headers. */
#define LIBEVENT_VERSION _EVENT_VERSION
/** As event_get_version_number, but gives the version number of Libevent's
* headers. */
#define LIBEVENT_VERSION_NUMBER _EVENT_NUMERIC_VERSION
#define EVENT_MAX_PRIORITIES 256
/**
Set the number of different event priorities (threadsafe variant).
See the description of event_priority_init() for more information.
@param eb the event_base structure returned by event_init()
@param npriorities the maximum number of priorities
@return 0 if successful, or -1 if an error occurred
@see event_priority_init(), event_priority_set()
*/
int event_base_priority_init(struct event_base *, int);
/**
Assign a priority to an event.
@param ev an event struct
@param priority the new priority to be assigned
@return 0 if successful, or -1 if an error occurred
@see event_priority_init()
*/
int event_priority_set(struct event *, int);
/**
Prepare Libevent to use a large number of timeouts with the same duration.
Libevent's default scheduling algorithm is optimized for having a large
number of timeouts with their durations more or less randomly distributed.
If you have a large number of timeouts that all have the same duration (for
example, if you have a large number of connections that all have a
10-second timeout), then you can improve Libevent's performance by telling
Libevent about it.
To do this, call this function with the common duration. It will return a
pointer to a different, opaque timeout value. (Don't depend on its actual
contents!) When you use this timeout value in event_add(), Libevent will
schedule the event more efficiently.
(This optimization probably will not be worthwhile until you have thousands
or tens of thousands of events with the same timeout.)
*/
const struct timeval *event_base_init_common_timeout(struct event_base *base,
const struct timeval *duration);
#ifndef _EVENT_DISABLE_MM_REPLACEMENT
/**
Override the functions that Libevent uses for memory management.
Usually, Libevent uses the standard libc functions malloc, realloc, and
free to allocate memory. Passing replacements for those functions to
event_set_mem_functions() overrides this behavior. To restore the default
behavior, pass NULLs as the arguments to this function.
Note that all memory returned from Libevent will be allocated by the
replacement functions rather than by malloc() and realloc(). Thus, if you
have replaced those functions, it may not be appropriate to free() memory
that you get from Libevent.
@param malloc_fn A replacement for malloc.
@param realloc_fn A replacement for realloc
@param free_fn A replacement for free.
**/
void event_set_mem_functions(void *(*malloc_fn)(size_t sz),
void *(*realloc_fn)(void *ptr, size_t sz),
void (*free_fn)(void *ptr));
#define EVENT_SET_MEM_FUNCTIONS_IMPLEMENTED
#endif
void event_base_dump_events(struct event_base *, FILE *);
/** Sets 'tv' to the current time (as returned by gettimeofday()),
looking at the cached value in 'base' if possible, and calling
gettimeofday() or clock_gettime() as appropriate if there is no
cached time.
Generally, this value will only be cached while actually
processing event callbacks, and may be very inaccuate if your
callbacks take a long time to execute.
Returns 0 on success, negative on failure.
*/
int event_base_gettimeofday_cached(struct event_base *base,
struct timeval *tv);
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_EVENT_H_ */

Просмотреть файл

@ -1,324 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_EVENT_COMPAT_H_
#define _EVENT2_EVENT_COMPAT_H_
/** @file event_compat.h
Potentially non-threadsafe versions of the functions in event.h: provided
only for backwards compatibility.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
/* For int types. */
#include <event2/util.h>
/**
Initialize the event API.
The event API needs to be initialized with event_init() before it can be
used. Sets the global current base that gets used for events that have no
base associated with them.
@deprecated This function is deprecated because it replaces the "current"
event_base, and is totally unsafe for multithreaded use. The replacement
is event_base_new().
@see event_base_set(), event_base_new()
*/
struct event_base *event_init(void);
/**
Loop to process events.
In order to process events, an application needs to call
event_dispatch(). This function only returns on error, and should
replace the event core of the application program.
@deprecated This function is deprecated because it is easily confused by
multiple calls to event_init(), and because it is not safe for
multithreaded use. The replacement is event_base_dispatch().
@see event_base_dispatch()
*/
int event_dispatch(void);
/**
Handle events.
This is a more flexible version of event_dispatch().
@deprecated This function is deprecated because it uses the event base from
the last call to event_init, and is therefore not safe for multithreaded
use. The replacement is event_base_loop().
@param flags any combination of EVLOOP_ONCE | EVLOOP_NONBLOCK
@return 0 if successful, -1 if an error occurred, or 1 if no events were
registered.
@see event_base_loopexit(), event_base_loop()
*/
int event_loop(int);
/**
Exit the event loop after the specified time.
The next event_loop() iteration after the given timer expires will
complete normally (handling all queued events) then exit without
blocking for events again.
Subsequent invocations of event_loop() will proceed normally.
@deprecated This function is deprecated because it is easily confused by
multiple calls to event_init(), and because it is not safe for
multithreaded use. The replacement is event_base_loopexit().
@param tv the amount of time after which the loop should terminate.
@return 0 if successful, or -1 if an error occurred
@see event_loop(), event_base_loop(), event_base_loopexit()
*/
int event_loopexit(const struct timeval *);
/**
Abort the active event_loop() immediately.
event_loop() will abort the loop after the next event is completed;
event_loopbreak() is typically invoked from this event's callback.
This behavior is analogous to the "break;" statement.
Subsequent invocations of event_loop() will proceed normally.
@deprecated This function is deprecated because it is easily confused by
multiple calls to event_init(), and because it is not safe for
multithreaded use. The replacement is event_base_loopbreak().
@return 0 if successful, or -1 if an error occurred
@see event_base_loopbreak(), event_loopexit()
*/
int event_loopbreak(void);
/**
Schedule a one-time event to occur.
The function event_once() is similar to event_set(). However, it schedules
a callback to be called exactly once and does not require the caller to
prepare an event structure.
@deprecated This function is deprecated because it is easily confused by
multiple calls to event_init(), and because it is not safe for
multithreaded use. The replacement is event_base_once().
@param fd a file descriptor to monitor
@param events event(s) to monitor; can be any of EV_TIMEOUT | EV_READ |
EV_WRITE
@param callback callback function to be invoked when the event occurs
@param arg an argument to be passed to the callback function
@param timeout the maximum amount of time to wait for the event, or NULL
to wait forever
@return 0 if successful, or -1 if an error occurred
@see event_set()
*/
int event_once(evutil_socket_t , short,
void (*)(evutil_socket_t, short, void *), void *, const struct timeval *);
/**
Get the kernel event notification mechanism used by Libevent.
@return a string identifying the kernel event mechanism (kqueue, epoll, etc.)
@deprecated This function is deprecated because it is easily confused by
multiple calls to event_init(), and because it is not safe for
multithreaded use. The replacement is event_base_get_method().
*/
const char *event_get_method(void);
/**
Set the number of different event priorities.
By default Libevent schedules all active events with the same priority.
However, some time it is desirable to process some events with a higher
priority than others. For that reason, Libevent supports strict priority
queues. Active events with a lower priority are always processed before
events with a higher priority.
The number of different priorities can be set initially with the
event_priority_init() function. This function should be called before the
first call to event_dispatch(). The event_priority_set() function can be
used to assign a priority to an event. By default, Libevent assigns the
middle priority to all events unless their priority is explicitly set.
@deprecated This function is deprecated because it is easily confused by
multiple calls to event_init(), and because it is not safe for
multithreaded use. The replacement is event_base_priority_init().
@param npriorities the maximum number of priorities
@return 0 if successful, or -1 if an error occurred
@see event_base_priority_init(), event_priority_set()
*/
int event_priority_init(int);
/**
Prepare an event structure to be added.
The function event_set() prepares the event structure ev to be used in
future calls to event_add() and event_del(). The event will be prepared to
call the function specified by the fn argument with an int argument
indicating the file descriptor, a short argument indicating the type of
event, and a void * argument given in the arg argument. The fd indicates
the file descriptor that should be monitored for events. The events can be
either EV_READ, EV_WRITE, or both. Indicating that an application can read
or write from the file descriptor respectively without blocking.
The function fn will be called with the file descriptor that triggered the
event and the type of event which will be either EV_TIMEOUT, EV_SIGNAL,
EV_READ, or EV_WRITE. The additional flag EV_PERSIST makes an event_add()
persistent until event_del() has been called.
For read and write events, edge-triggered behavior can be requested
with the EV_ET flag. Not all backends support edge-triggered
behavior. When an edge-triggered event is activated, the EV_ET flag
is added to its events argument.
@param ev an event struct to be modified
@param fd the file descriptor to be monitored
@param event desired events to monitor; can be EV_READ and/or EV_WRITE
@param fn callback function to be invoked when the event occurs
@param arg an argument to be passed to the callback function
@see event_add(), event_del(), event_once()
@deprecated event_set() is not recommended for new code, because it requires
a subsequent call to event_base_set() to be safe under many circumstances.
Use event_assign() or event_new() instead.
*/
void event_set(struct event *, evutil_socket_t, short, void (*)(evutil_socket_t, short, void *), void *);
#define evtimer_set(ev, cb, arg) event_set((ev), -1, 0, (cb), (arg))
#define evsignal_set(ev, x, cb, arg) \
event_set((ev), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg))
/**
* Add a timeout event.
*
* @param ev the event struct to be disabled
* @param tv the timeout value, in seconds
*
* @deprecated This macro is deprecated because its naming is inconsistent.
* The recommend macro is evtimer_add().
*/
#define timeout_add(ev, tv) event_add((ev), (tv))
/**
* Define a timeout event.
*
* @param ev the event struct to be defined
* @param cb the callback to be invoked when the timeout expires
* @param arg the argument to be passed to the callback
*
* @deprecated This macro is deprecated because its naming is inconsistent.
* The recommend macro is evtimer_set().
*/
#define timeout_set(ev, cb, arg) event_set((ev), -1, 0, (cb), (arg))
/**
* Disable a timeout event.
*
* @param ev the timeout event to be disabled
*
* @deprecated This macro is deprecated because its naming is inconsistent.
* The recommend macro is evtimer_del().
*/
#define timeout_del(ev) event_del(ev)
/**
@deprecated This macro is deprecated because its naming is inconsistent.
The recommend macro is evtimer_pending().
*/
#define timeout_pending(ev, tv) event_pending((ev), EV_TIMEOUT, (tv))
/**
@deprecated This macro is deprecated because its naming is inconsistent.
The recommend macro is evtimer_initialized().
*/
#define timeout_initialized(ev) _event_initialized((ev), 0)
/**
@deprecated This macro is deprecated because its naming is inconsistent.
The recommend macro is evsignal_add().
*/
#define signal_add(ev, tv) event_add((ev), (tv))
/**
@deprecated This macro is deprecated because its naming is inconsistent.
The recommend macro is evsignal_set().
*/
#define signal_set(ev, x, cb, arg) \
event_set((ev), (x), EV_SIGNAL|EV_PERSIST, (cb), (arg))
/**
@deprecated This macro is deprecated because its naming is inconsistent.
The recommend macro is evsignal_del().
*/
#define signal_del(ev) event_del(ev)
/**
@deprecated This macro is deprecated because its naming is inconsistent.
The recommend macro is evsignal_pending().
*/
#define signal_pending(ev, tv) event_pending((ev), EV_SIGNAL, (tv))
/**
@deprecated This macro is deprecated because its naming is inconsistent.
The recommend macro is evsignal_initialized().
*/
#define signal_initialized(ev) _event_initialized((ev), 0)
#ifndef EVENT_FD
/* These macros are obsolete; use event_get_fd and event_get_signal instead. */
#define EVENT_FD(ev) ((int)event_get_fd(ev))
#define EVENT_SIGNAL(ev) event_get_signal(ev)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_EVENT_COMPAT_H_ */

Просмотреть файл

@ -1,139 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_EVENT_STRUCT_H_
#define _EVENT2_EVENT_STRUCT_H_
/** @file event_struct.h
Structures used by event.h. Using these structures directly may harm
forward compatibility: be careful!
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
/* For int types. */
#include <event2/util.h>
/* For evkeyvalq */
#include <event2/keyvalq_struct.h>
#define EVLIST_TIMEOUT 0x01
#define EVLIST_INSERTED 0x02
#define EVLIST_SIGNAL 0x04
#define EVLIST_ACTIVE 0x08
#define EVLIST_INTERNAL 0x10
#define EVLIST_INIT 0x80
/* EVLIST_X_ Private space: 0x1000-0xf000 */
#define EVLIST_ALL (0xf000 | 0x9f)
/* Fix so that people don't have to run with <sys/queue.h> */
#ifndef TAILQ_ENTRY
#define _EVENT_DEFINED_TQENTRY
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
}
#endif /* !TAILQ_ENTRY */
#ifndef TAILQ_HEAD
#define _EVENT_DEFINED_TQHEAD
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; \
struct type **tqh_last; \
}
#endif
struct event_base;
struct event {
TAILQ_ENTRY(event) ev_active_next;
TAILQ_ENTRY(event) ev_next;
/* for managing timeouts */
union {
TAILQ_ENTRY(event) ev_next_with_common_timeout;
int min_heap_idx;
} ev_timeout_pos;
evutil_socket_t ev_fd;
struct event_base *ev_base;
union {
/* used for io events */
struct {
TAILQ_ENTRY(event) ev_io_next;
struct timeval ev_timeout;
} ev_io;
/* used by signal events */
struct {
TAILQ_ENTRY(event) ev_signal_next;
short ev_ncalls;
/* Allows deletes in callback */
short *ev_pncalls;
} ev_signal;
} _ev;
short ev_events;
short ev_res; /* result passed to event callback */
short ev_flags;
ev_uint8_t ev_pri; /* smaller numbers are higher priority */
ev_uint8_t ev_closure;
struct timeval ev_timeout;
/* allows us to adopt for different types of events */
void (*ev_callback)(evutil_socket_t, short, void *arg);
void *ev_arg;
};
TAILQ_HEAD (event_list, event);
#ifdef _EVENT_DEFINED_TQENTRY
#undef TAILQ_ENTRY
#endif
#ifdef _EVENT_DEFINED_TQHEAD
#undef TAILQ_HEAD
#endif
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_EVENT_STRUCT_H_ */

Просмотреть файл

@ -1,565 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_HTTP_H_
#define _EVENT2_HTTP_H_
/* For int types. */
#include <event2/util.h>
#ifdef __cplusplus
extern "C" {
#endif
/* In case we haven't included the right headers yet. */
struct evbuffer;
struct event_base;
/** @file http.h
*
* Basic support for HTTP serving.
*
* As Libevent is a library for dealing with event notification and most
* interesting applications are networked today, I have often found the
* need to write HTTP code. The following prototypes and definitions provide
* an application with a minimal interface for making HTTP requests and for
* creating a very simple HTTP server.
*/
/* Response codes */
#define HTTP_OK 200 /**< request completed ok */
#define HTTP_NOCONTENT 204 /**< request does not have content */
#define HTTP_MOVEPERM 301 /**< the uri moved permanently */
#define HTTP_MOVETEMP 302 /**< the uri moved temporarily */
#define HTTP_NOTMODIFIED 304 /**< page was not modified from last */
#define HTTP_BADREQUEST 400 /**< invalid http request was made */
#define HTTP_NOTFOUND 404 /**< could not find content for uri */
#define HTTP_SERVUNAVAIL 503 /**< the server is not available */
struct evhttp;
struct evhttp_request;
struct evkeyvalq;
struct evhttp_bound_socket;
/**
* Create a new HTTP server.
*
* @param base (optional) the event base to receive the HTTP events
* @return a pointer to a newly initialized evhttp server structure
* @see evhttp_free()
*/
struct evhttp *evhttp_new(struct event_base *base);
/**
* Binds an HTTP server on the specified address and port.
*
* Can be called multiple times to bind the same http server
* to multiple different ports.
*
* @param http a pointer to an evhttp object
* @param address a string containing the IP address to listen(2) on
* @param port the port number to listen on
* @return 0 on success, -1 on failure.
* @see evhttp_accept_socket()
*/
int evhttp_bind_socket(struct evhttp *http, const char *address, ev_uint16_t port);
/**
* Like evhttp_bind_socket(), but returns a handle for referencing the socket.
*
* The returned pointer is not valid after \a http is freed.
*
* @param http a pointer to an evhttp object
* @param address a string containing the IP address to listen(2) on
* @param port the port number to listen on
* @return Handle for the socket on success, NULL on failure.
* @see evhttp_bind_socket(), evhttp_del_accept_socket()
*/
struct evhttp_bound_socket *evhttp_bind_socket_with_handle(struct evhttp *http, const char *address, ev_uint16_t port);
/**
* Makes an HTTP server accept connections on the specified socket.
*
* This may be useful to create a socket and then fork multiple instances
* of an http server, or when a socket has been communicated via file
* descriptor passing in situations where an http servers does not have
* permissions to bind to a low-numbered port.
*
* Can be called multiple times to have the http server listen to
* multiple different sockets.
*
* @param http a pointer to an evhttp object
* @param fd a socket fd that is ready for accepting connections
* @return 0 on success, -1 on failure.
* @see evhttp_bind_socket()
*/
int evhttp_accept_socket(struct evhttp *http, evutil_socket_t fd);
/**
* Like evhttp_accept_socket(), but returns a handle for referencing the socket.
*
* The returned pointer is not valid after \a http is freed.
*
* @param http a pointer to an evhttp object
* @param fd a socket fd that is ready for accepting connections
* @return Handle for the socket on success, NULL on failure.
* @see evhttp_accept_socket(), evhttp_del_accept_socket()
*/
struct evhttp_bound_socket *evhttp_accept_socket_with_handle(struct evhttp *http, evutil_socket_t fd);
/**
* Makes an HTTP server stop accepting connections on the specified socket
*
* This may be useful when a socket has been sent via file descriptor passing
* and is no longer needed by the current process.
*
* This function does not close the socket.
*
* \a bound_socket is an invalid pointer after this call returns.
*
* @param http a pointer to an evhttp object
* @param bound_socket a handle returned by evhttp_{bind,accept}_socket_with_handle
* @see evhttp_bind_socket_with_handle(), evhttp_accept_socket_with_handle()
*/
void evhttp_del_accept_socket(struct evhttp *http, struct evhttp_bound_socket *bound_socket);
/**
* Get the raw file descriptor referenced by an evhttp_bound_socket.
*
* @param bound_socket a handle returned by evhttp_{bind,accept}_socket_with_handle
* @return the file descriptor used by the bound socket
* @see evhttp_bind_socket_with_handle(), evhttp_accept_socket_with_handle()
*/
evutil_socket_t evhttp_bound_socket_get_fd(struct evhttp_bound_socket *bound_socket);
/**
* Free the previously created HTTP server.
*
* Works only if no requests are currently being served.
*
* @param http the evhttp server object to be freed
* @see evhttp_start()
*/
void evhttp_free(struct evhttp* http);
/** XXX Document. */
void evhttp_set_max_headers_size(struct evhttp* http, ev_ssize_t max_headers_size);
/** XXX Document. */
void evhttp_set_max_body_size(struct evhttp* http, ev_ssize_t max_body_size);
/**
Set a callback for a specified URI
@param http the http sever on which to set the callback
@param path the path for which to invoke the callback
@param cb the callback function that gets invoked on requesting path
@param cb_arg an additional context argument for the callback
@return 0 on success, -1 if the callback existed already, -2 on failure
*/
int evhttp_set_cb(struct evhttp *http, const char *path,
void (*cb)(struct evhttp_request *, void *), void *cb_arg);
/** Removes the callback for a specified URI */
int evhttp_del_cb(struct evhttp *, const char *);
/**
Set a callback for all requests that are not caught by specific callbacks
Invokes the specified callback for all requests that do not match any of
the previously specified request paths. This is catchall for requests not
specifically configured with evhttp_set_cb().
@param http the evhttp server object for which to set the callback
@param cb the callback to invoke for any unmatched requests
@param arg an context argument for the callback
*/
void evhttp_set_gencb(struct evhttp *http,
void (*cb)(struct evhttp_request *, void *), void *arg);
/**
Adds a virtual host to the http server.
A virtual host is a newly initialized evhttp object that has request
callbacks set on it via evhttp_set_cb() or evhttp_set_gencb(). It
most not have any listing sockets associated with it.
If the virtual host has not been removed by the time that evhttp_free()
is called on the main http server, it will be automatically freed, too.
It is possible to have hierarchical vhosts. For example: A vhost
with the pattern *.example.com may have other vhosts with patterns
foo.example.com and bar.example.com associated with it.
@param http the evhttp object to which to add a virtual host
@param pattern the glob pattern against which the hostname is matched.
The match is case insensitive and follows otherwise regular shell
matching.
@param vhost the virtual host to add the regular http server.
@return 0 on success, -1 on failure
@see evhttp_remove_virtual_host()
*/
int evhttp_add_virtual_host(struct evhttp* http, const char *pattern,
struct evhttp* vhost);
/**
Removes a virtual host from the http server.
@param http the evhttp object from which to remove the virtual host
@param vhost the virtual host to remove from the regular http server.
@return 0 on success, -1 on failure
@see evhttp_add_virtual_host()
*/
int evhttp_remove_virtual_host(struct evhttp* http, struct evhttp* vhost);
/**
* Set the timeout for an HTTP request.
*
* @param http an evhttp object
* @param timeout_in_secs the timeout, in seconds
*/
void evhttp_set_timeout(struct evhttp *http, int timeout_in_secs);
/* Request/Response functionality */
/**
* Send an HTML error message to the client.
*
* @param req a request object
* @param error the HTTP error code
* @param reason a brief explanation of the error. If this is NULL, we'll
* just use the standard meaning of the error code.
*/
void evhttp_send_error(struct evhttp_request *req, int error,
const char *reason);
/**
* Send an HTML reply to the client.
*
* The body of the reply consists of the data in databuf. After calling
* evhttp_send_reply() databuf will be empty, but the buffer is still
* owned by the caller and needs to be deallocated by the caller if
* necessary.
*
* @param req a request object
* @param code the HTTP response code to send
* @param reason a brief message to send with the response code
* @param databuf the body of the response
*/
void evhttp_send_reply(struct evhttp_request *req, int code,
const char *reason, struct evbuffer *databuf);
/* Low-level response interface, for streaming/chunked replies */
/**
Initiate a reply that uses Transfer-Encoding chunked.
This allows the caller to stream the reply back to the client and is
useful when either not all of the reply data is immediately available
or when sending very large replies.
The caller needs to supply data chunks with evhttp_send_reply_chunk()
and complete the reply by calling evhttp_send_reply_end().
@param req a request object
@param code the HTTP response code to send
@param reason a brief message to send with the response code
*/
void evhttp_send_reply_start(struct evhttp_request *req, int code,
const char *reason);
/**
Send another data chunk as part of an ongoing chunked reply.
The reply chunk consists of the data in databuf. After calling
evhttp_send_reply_chunk() databuf will be empty, but the buffer is
still owned by the caller and needs to be deallocated by the caller
if necessary.
@param req a request object
@param databuf the data chunk to send as part of the reply.
*/
void evhttp_send_reply_chunk(struct evhttp_request *req,
struct evbuffer *databuf);
/**
Complete a chunked reply.
@param req a request object
*/
void evhttp_send_reply_end(struct evhttp_request *req);
/*
* Interfaces for making requests
*/
/** the different request types supported by evhttp */
enum evhttp_cmd_type {
EVHTTP_REQ_GET,
EVHTTP_REQ_POST,
EVHTTP_REQ_HEAD,
EVHTTP_REQ_PUT,
EVHTTP_REQ_DELETE
};
/** a request object can represent either a request or a reply */
enum evhttp_request_kind { EVHTTP_REQUEST, EVHTTP_RESPONSE };
/**
* Creates a new request object that needs to be filled in with the request
* parameters. The callback is executed when the request completed or an
* error occurred.
*/
struct evhttp_request *evhttp_request_new(
void (*cb)(struct evhttp_request *, void *), void *arg);
/**
* Enable delivery of chunks to requestor.
* @param cb will be called after every read of data with the same argument
* as the completion callback. Will never be called on an empty
* response. May drain the input buffer; it will be drained
* automatically on return.
*/
void evhttp_request_set_chunked_cb(struct evhttp_request *,
void (*cb)(struct evhttp_request *, void *));
/** Frees the request object and removes associated events. */
void evhttp_request_free(struct evhttp_request *req);
struct evdns_base;
/**
* A connection object that can be used to for making HTTP requests. The
* connection object tries to resolve address and establish the connection
* when it is given an http request object.
*
* @param base the event_base to use for handling the connection
* @param dnsbase the dns_base to use for resolving host names; if not
* specified host name resolution will block.
* @param address the address to which to connect
* @param port the port to connect to
* @return an evhttp_connection object that can be used for making requests
*/
struct evhttp_connection *evhttp_connection_base_new(
struct event_base *base, struct evdns_base *dnsbase,
const char *address, unsigned short port);
/** Takes ownership of the request object
*
* Can be used in a request callback to keep onto the request until
* evhttp_request_free() is explicitly called by the user.
*/
void evhttp_request_own(struct evhttp_request *req);
/** Returns 1 if the request is owned by the user */
int evhttp_request_is_owned(struct evhttp_request *req);
/**
* Returns the connection object associated with the request or NULL
*
* The server needs to either free the request explicitly or call
* evhttp_send_reply_end().
*/
struct evhttp_connection *evhttp_request_get_connection(struct evhttp_request *req);
void evhttp_connection_set_max_headers_size(struct evhttp_connection *evcon,
ev_ssize_t new_max_headers_size);
void evhttp_connection_set_max_body_size(struct evhttp_connection* evcon,
ev_ssize_t new_max_body_size);
/** Frees an http connection */
void evhttp_connection_free(struct evhttp_connection *evcon);
/** sets the ip address from which http connections are made */
void evhttp_connection_set_local_address(struct evhttp_connection *evcon,
const char *address);
/** sets the local port from which http connections are made */
void evhttp_connection_set_local_port(struct evhttp_connection *evcon,
ev_uint16_t port);
/** Sets the timeout for events related to this connection */
void evhttp_connection_set_timeout(struct evhttp_connection *evcon,
int timeout_in_secs);
/** Sets the retry limit for this connection - -1 repeats indefinitely */
void evhttp_connection_set_retries(struct evhttp_connection *evcon,
int retry_max);
/** Set a callback for connection close. */
void evhttp_connection_set_closecb(struct evhttp_connection *evcon,
void (*)(struct evhttp_connection *, void *), void *);
/** Get the remote address and port associated with this connection. */
void evhttp_connection_get_peer(struct evhttp_connection *evcon,
char **address, ev_uint16_t *port);
/**
Make an HTTP request over the specified connection.
The connection gets ownership of the request. On failure, the
request object is no longer valid as it has been freed.
@param evcon the evhttp_connection object over which to send the request
@param req the previously created and configured request object
@param type the request type EVHTTP_REQ_GET, EVHTTP_REQ_POST, etc.
@param uri the URI associated with the request
@return 0 on success, -1 on failure
@see evhttp_cancel_request()
*/
int evhttp_make_request(struct evhttp_connection *evcon,
struct evhttp_request *req,
enum evhttp_cmd_type type, const char *uri);
/**
Cancels a pending HTTP request.
Cancels an ongoing HTTP request. The callback associated with this request
is not executed and the request object is freed. If the request is
currently being processed, e.g. it is ongoing, the corresponding
evhttp_connection object is going to get reset.
A request cannot be canceled if its callback has executed already. A request
may be canceled reentrantly from its chunked callback.
@param req the evhttp_request to cancel; req becomes invalid after this call.
*/
void evhttp_cancel_request(struct evhttp_request *req);
/** Returns the request URI */
const char *evhttp_request_get_uri(struct evhttp_request *req);
/** Returns the input headers */
struct evkeyvalq *evhttp_request_get_input_headers(struct evhttp_request *req);
/** Returns the output headers */
struct evkeyvalq *evhttp_request_get_output_headers(struct evhttp_request *req);
/** Returns the input buffer */
struct evbuffer *evhttp_request_get_input_buffer(struct evhttp_request *req);
/** Returns the output buffer */
struct evbuffer *evhttp_request_get_output_buffer(struct evhttp_request *req);
/* Interfaces for dealing with HTTP headers */
/**
Finds the value belonging to a header.
@param headers the evkeyvalq object in which to find the header
@param key the name of the header to find
@returns a pointer to the value for the header or NULL if the header
count not be found.
@see evhttp_add_header(), evhttp_remove_header()
*/
const char *evhttp_find_header(const struct evkeyvalq *headers,
const char *key);
/**
Removes a header from a list of existing headers.
@param headers the evkeyvalq object from which to remove a header
@param key the name of the header to remove
@returns 0 if the header was removed, -1 otherwise.
@see evhttp_find_header(), evhttp_add_header()
*/
int evhttp_remove_header(struct evkeyvalq *headers, const char *key);
/**
Adds a header to a list of existing headers.
@param headers the evkeyvalq object to which to add a header
@param key the name of the header
@param value the value belonging to the header
@returns 0 on success, -1 otherwise.
@see evhttp_find_header(), evhttp_clear_headers()
*/
int evhttp_add_header(struct evkeyvalq *headers, const char *key, const char *value);
/**
Removes all headers from the header list.
@param headers the evkeyvalq object from which to remove all headers
*/
void evhttp_clear_headers(struct evkeyvalq *headers);
/* Miscellaneous utility functions */
/**
Helper function to encode a URI.
The returned string must be freed by the caller.
@param uri an unencoded URI
@return a newly allocated URI-encoded string or NULL on failure
*/
char *evhttp_encode_uri(const char *uri);
/**
Helper function to decode a URI.
The returned string must be freed by the caller.
@param uri an encoded URI
@return a newly allocated unencoded URI or NULL on failure
*/
char *evhttp_decode_uri(const char *uri);
/**
Helper function to parse out arguments in a query.
Parsing a uri like
http://foo.com/?q=test&s=some+thing
will result in two entries in the key value queue.
The first entry is: key="q", value="test"
The second entry is: key="s", value="some thing"
@param uri the request URI
@param headers the head of the evkeyval queue
*/
void evhttp_parse_query(const char *uri, struct evkeyvalq *headers);
/**
* Escape HTML character entities in a string.
*
* Replaces <, >, ", ' and & with &lt;, &gt;, &quot;,
* &#039; and &amp; correspondingly.
*
* The returned string needs to be freed by the caller.
*
* @param html an unescaped HTML string
* @return an escaped HTML string or NULL on error
*/
char *evhttp_htmlescape(const char *html);
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_HTTP_H_ */

Просмотреть файл

@ -1,90 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_HTTP_COMPAT_H_
#define _EVENT2_HTTP_COMPAT_H_
/** @file http_compat.h
Potentially non-threadsafe versions of the functions in http.h: provided
only for backwards compatibility.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
/* For int types. */
#include <event2/util.h>
/**
* Start an HTTP server on the specified address and port
*
* @deprecated It does not allow an event base to be specified
*
* @param address the address to which the HTTP server should be bound
* @param port the port number on which the HTTP server should listen
* @return an struct evhttp object
*/
struct evhttp *evhttp_start(const char *address, unsigned short port);
/**
* A connection object that can be used to for making HTTP requests. The
* connection object tries to establish the connection when it is given an
* http request object.
*
* @deprecated It does not allow an event base to be specified
*/
struct evhttp_connection *evhttp_connection_new(
const char *address, unsigned short port);
/**
* Associates an event base with the connection - can only be called
* on a freshly created connection object that has not been used yet.
*
* @deprecated XXXX Why?
*/
void evhttp_connection_set_base(struct evhttp_connection *evcon,
struct event_base *base);
/** Returns the request URI */
#define evhttp_request_uri evhttp_request_get_uri
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_EVENT_COMPAT_H_ */

Просмотреть файл

@ -1,126 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_HTTP_STRUCT_H_
#define _EVENT2_HTTP_STRUCT_H_
/** @file http_struct.h
Data structures for http. Using these structures may hurt forward
compatibility with later versions of Libevent: be careful!
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
/* For int types. */
#include <event2/util.h>
/**
* the request structure that a server receives.
* WARNING: expect this structure to change. I will try to provide
* reasonable accessors.
*/
struct evhttp_request {
#if defined(TAILQ_ENTRY)
TAILQ_ENTRY(evhttp_request) next;
#else
struct {
struct evhttp_request *tqe_next;
struct evhttp_request **tqe_prev;
} next;
#endif
/* the connection object that this request belongs to */
struct evhttp_connection *evcon;
int flags;
/** The request obj owns the evhttp connection and needs to free it */
#define EVHTTP_REQ_OWN_CONNECTION 0x0001
/** Request was made via a proxy */
#define EVHTTP_PROXY_REQUEST 0x0002
/** The request object is owned by the user; the user must free it */
#define EVHTTP_USER_OWNED 0x0004
/** The request will be used again upstack; freeing must be deferred */
#define EVHTTP_REQ_DEFER_FREE 0x0008
/** The request should be freed upstack */
#define EVHTTP_REQ_NEEDS_FREE 0x0010
struct evkeyvalq *input_headers;
struct evkeyvalq *output_headers;
/* address of the remote host and the port connection came from */
char *remote_host;
ev_uint16_t remote_port;
enum evhttp_request_kind kind;
enum evhttp_cmd_type type;
size_t headers_size;
size_t body_size;
char *uri; /* uri after HTTP request was parsed */
char major; /* HTTP Major number */
char minor; /* HTTP Minor number */
int response_code; /* HTTP Response code */
char *response_code_line; /* Readable response */
struct evbuffer *input_buffer; /* read data */
ev_int64_t ntoread;
int chunked:1, /* a chunked request */
userdone:1; /* the user has sent all data */
struct evbuffer *output_buffer; /* outgoing post or data */
/* Callback */
void (*cb)(struct evhttp_request *, void *);
void *cb_arg;
/*
* Chunked data callback - call for each completed chunk if
* specified. If not specified, all the data is delivered via
* the regular callback.
*/
void (*chunk_cb)(struct evhttp_request *, void *);
};
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_HTTP_STRUCT_H_ */

Просмотреть файл

@ -1,80 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_EVENT_KEYVALQ_STRUCT_H_
#define _EVENT2_EVENT_KEYVALQ_STRUCT_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Fix so that people don't have to run with <sys/queue.h> */
/* XXXX This code is duplicated with event_struct.h */
#ifndef TAILQ_ENTRY
#define _EVENT_DEFINED_TQENTRY
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
}
#endif /* !TAILQ_ENTRY */
#ifndef TAILQ_HEAD
#define _EVENT_DEFINED_TQHEAD
#define TAILQ_HEAD(name, type) \
struct name { \
struct type *tqh_first; \
struct type **tqh_last; \
}
#endif
/*
* Key-Value pairs. Can be used for HTTP headers but also for
* query argument parsing.
*/
struct evkeyval {
TAILQ_ENTRY(evkeyval) next;
char *key;
char *value;
};
TAILQ_HEAD (evkeyvalq, evkeyval);
/* XXXX This code is duplicated with event_struct.h */
#ifdef _EVENT_DEFINED_TQENTRY
#undef TAILQ_ENTRY
#endif
#ifdef _EVENT_DEFINED_TQHEAD
#undef TAILQ_HEAD
#endif
#ifdef __cplusplus
}
#endif
#endif

Просмотреть файл

@ -1,119 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_LISTENER_H_
#define _EVENT2_LISTENER_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event.h>
struct sockaddr;
struct evconnlistener;
/**
A callback that we invoke when a listener has a new connection.
@param listener The evconnlistener
@param fd The new file descriptor
@param addr The source address of the connection
@param socklen The length of addr
@param user_arg the pointer passed to evconnlistener_new()
*/
typedef void (*evconnlistener_cb)(struct evconnlistener *, evutil_socket_t, struct sockaddr *, int socklen, void *);
/** Flag: Indicates that we should not make incoming sockets nonblocking
* before passing them to the callback. */
#define LEV_OPT_LEAVE_SOCKETS_BLOCKING (1u<<0)
/** Flag: Indicates that freeing the listener should close the underlying
* socket. */
#define LEV_OPT_CLOSE_ON_FREE (1u<<1)
/** Flag: Indicates that we should set the close-on-exec flag, if possible */
#define LEV_OPT_CLOSE_ON_EXEC (1u<<2)
/** Flag: Indicates that we should disable the timeout (if any) between when
* this socket is closed and when we can listen again on the same port. */
#define LEV_OPT_REUSEABLE (1u<<3)
/**
Allocate a new evconnlistener object to listen for incoming TCP connections
on a given file descriptor.
@param base The event base to associate the listener with.
@param cb A callback to be invoked when a new connection arrives.
@param ptr A user-supplied pointer to give to the callback.
@param flags Any number of LEV_OPT_* flags
@param backlog Passed to the listen() call to determine the length of the
acceptable connection backlog. Set to -1 for a reasonable default.
Set to 0 if the socket is already listening.
@param fd The file descriptor to listen on. It must be a nonblocking
file descriptor, and it should already be bound to an appropriate
port and address.
*/
struct evconnlistener *evconnlistener_new(struct event_base *base,
evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
evutil_socket_t fd);
/**
Allocate a new evconnlistener object to listen for incoming TCP connections
on a given address.
@param base The event base to associate the listener with.
@param cb A callback to be invoked when a new connection arrives.
@param ptr A user-supplied pointer to give to the callback.
@param flags Any number of LEV_OPT_* flags
@param backlog Passed to the listen() call to determine the length of the
acceptable connection backlog. Set to -1 for a reasonable default.
@param addr The address to listen for connections on.
@param socklen The length of the address.
*/
struct evconnlistener *evconnlistener_new_bind(struct event_base *base,
evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
const struct sockaddr *sa, int socklen);
/**
Disable and deallocate an evconnlistener.
*/
void evconnlistener_free(struct evconnlistener *lev);
/**
Re-enable an evconnlistener that has been disabled.
*/
int evconnlistener_enable(struct evconnlistener *lev);
/**
Stop listening for connections on an evconnlistener.
*/
int evconnlistener_disable(struct evconnlistener *lev);
/** Return an evconnlistener's associated event_base. */
struct event_base *evconnlistener_get_base(struct evconnlistener *lev);
/** Return the socket that an evconnlistner is listening on. */
evutil_socket_t evconnlistener_get_fd(struct evconnlistener *lev);
#ifdef __cplusplus
}
#endif
#endif

Просмотреть файл

@ -1,596 +0,0 @@
/*
* Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_RPC_H_
#define _EVENT2_RPC_H_
#ifdef __cplusplus
extern "C" {
#endif
/** @file rpc.h
*
* This header files provides basic support for an RPC server and client.
*
* To support RPCs in a server, every supported RPC command needs to be
* defined and registered.
*
* EVRPC_HEADER(SendCommand, Request, Reply);
*
* SendCommand is the name of the RPC command.
* Request is the name of a structure generated by event_rpcgen.py.
* It contains all parameters relating to the SendCommand RPC. The
* server needs to fill in the Reply structure.
* Reply is the name of a structure generated by event_rpcgen.py. It
* contains the answer to the RPC.
*
* To register an RPC with an HTTP server, you need to first create an RPC
* base with:
*
* struct evrpc_base *base = evrpc_init(http);
*
* A specific RPC can then be registered with
*
* EVRPC_REGISTER(base, SendCommand, Request, Reply, FunctionCB, arg);
*
* when the server receives an appropriately formatted RPC, the user callback
* is invoked. The callback needs to fill in the reply structure.
*
* void FunctionCB(EVRPC_STRUCT(SendCommand)* rpc, void *arg);
*
* To send the reply, call EVRPC_REQUEST_DONE(rpc);
*
* See the regression test for an example.
*/
/**
Determines if the member has been set in the message
@param msg the message to inspect
@param member the member variable to test for presences
@return 1 if it's present or 0 otherwise.
*/
#define EVTAG_HAS(msg, member) \
((msg)->member##_set == 1)
#ifndef _EVENT2_RPC_COMPAT_H_
/**
Assigns a value to the member in the message.
@param msg the message to which to assign a value
@param member the name of the member variable
@param value the value to assign
*/
#define EVTAG_ASSIGN(msg, member, value) \
(*(msg)->base->member##_assign)((msg), (value))
/**
Assigns a value to the member in the message.
@param msg the message to which to assign a value
@param member the name of the member variable
@param value the value to assign
@param len the length of the value
*/
#define EVTAG_ASSIGN_WITH_LEN(msg, member, value, len) \
(*(msg)->base->member##_assign)((msg), (value), (len))
/**
Returns the value for a member.
@param msg the message from which to get the value
@param member the name of the member variable
@param pvalue a pointer to the variable to hold the value
@return 0 on success, -1 otherwise.
*/
#define EVTAG_GET(msg, member, pvalue) \
(*(msg)->base->member##_get)((msg), (pvalue))
/**
Returns the value for a member.
@param msg the message from which to get the value
@param member the name of the member variable
@param pvalue a pointer to the variable to hold the value
@param plen a pointer to the length of the value
@return 0 on success, -1 otherwise.
*/
#define EVTAG_GET_WITH_LEN(msg, member, pvalue, plen) \
(*(msg)->base->member##_get)((msg), (pvalue), (plen))
#endif /* _EVENT2_RPC_COMPAT_H_ */
/**
Adds a value to an array.
*/
#define EVTAG_ARRAY_ADD_VALUE(msg, member, value) \
(*(msg)->base->member##_add)((msg), (value))
/**
Allocates a new entry in the array and returns it.
*/
#define EVTAG_ARRAY_ADD(msg, member) \
(*(msg)->base->member##_add)(msg)
/**
Gets a variable at the specified offset from the array.
*/
#define EVTAG_ARRAY_GET(msg, member, offset, pvalue) \
(*(msg)->base->member##_get)((msg), (offset), (pvalue))
/**
Returns the number of entries in the array.
*/
#define EVTAG_ARRAY_LEN(msg, member) ((msg)->member##_length)
struct evbuffer;
struct event_base;
struct evrpc_req_generic;
struct evrpc_request_wrapper;
struct evrpc;
/** The type of a specific RPC Message
*
* @param rpcname the name of the RPC message
*/
#define EVRPC_STRUCT(rpcname) struct evrpc_req__##rpcname
struct evhttp_request;
struct evrpc_status;
struct evrpc_hook_meta;
/** Creates the definitions and prototypes for an RPC
*
* You need to use EVRPC_HEADER to create structures and function prototypes
* needed by the server and client implementation. The structures have to be
* defined in an .rpc file and converted to source code via event_rpcgen.py
*
* @param rpcname the name of the RPC
* @param reqstruct the name of the RPC request structure
* @param replystruct the name of the RPC reply structure
* @see EVRPC_GENERATE()
*/
#define EVRPC_HEADER(rpcname, reqstruct, rplystruct) \
EVRPC_STRUCT(rpcname) { \
struct evrpc_hook_meta *hook_meta; \
struct reqstruct* request; \
struct rplystruct* reply; \
struct evrpc* rpc; \
struct evhttp_request* http_req; \
struct evbuffer* rpc_data; \
}; \
int evrpc_send_request_##rpcname(struct evrpc_pool *, \
struct reqstruct *, struct rplystruct *, \
void (*)(struct evrpc_status *, \
struct reqstruct *, struct rplystruct *, void *cbarg), \
void *);
struct evrpc_pool;
/** use EVRPC_GENERATE instead */
struct evrpc_request_wrapper *evrpc_make_request_ctx(
struct evrpc_pool *pool, void *request, void *reply,
const char *rpcname,
void (*req_marshal)(struct evbuffer*, void *),
void (*rpl_clear)(void *),
int (*rpl_unmarshal)(void *, struct evbuffer *),
void (*cb)(struct evrpc_status *, void *, void *, void *),
void *cbarg);
/** Creates a context structure that contains rpc specific information.
*
* EVRPC_MAKE_CTX is used to populate a RPC specific context that
* contains information about marshaling the RPC data types.
*
* @param rpcname the name of the RPC
* @param reqstruct the name of the RPC request structure
* @param replystruct the name of the RPC reply structure
* @param pool the evrpc_pool over which to make the request
* @param request a pointer to the RPC request structure object
* @param reply a pointer to the RPC reply structure object
* @param cb the callback function to call when the RPC has completed
* @param cbarg the argument to supply to the callback
*/
#define EVRPC_MAKE_CTX(rpcname, reqstruct, rplystruct, \
pool, request, reply, cb, cbarg) \
evrpc_make_request_ctx(pool, request, reply, \
#rpcname, \
(void (*)(struct evbuffer *, void *))reqstruct##_marshal, \
(void (*)(void *))rplystruct##_clear, \
(int (*)(void *, struct evbuffer *))rplystruct##_unmarshal, \
(void (*)(struct evrpc_status *, void *, void *, void *))cb, \
cbarg)
/** Generates the code for receiving and sending an RPC message
*
* EVRPC_GENERATE is used to create the code corresponding to sending
* and receiving a particular RPC message
*
* @param rpcname the name of the RPC
* @param reqstruct the name of the RPC request structure
* @param replystruct the name of the RPC reply structure
* @see EVRPC_HEADER()
*/
#define EVRPC_GENERATE(rpcname, reqstruct, rplystruct) \
int evrpc_send_request_##rpcname(struct evrpc_pool *pool, \
struct reqstruct *request, struct rplystruct *reply, \
void (*cb)(struct evrpc_status *, \
struct reqstruct *, struct rplystruct *, void *cbarg), \
void *cbarg) { \
return evrpc_send_request_generic(pool, request, reply, \
(void (*)(struct evrpc_status *, void *, void *, void *))cb, \
cbarg, \
#rpcname, \
(void (*)(struct evbuffer *, void *))reqstruct##_marshal, \
(void (*)(void *))rplystruct##_clear, \
(int (*)(void *, struct evbuffer *))rplystruct##_unmarshal); \
}
/** Provides access to the HTTP request object underlying an RPC
*
* Access to the underlying http object; can be used to look at headers or
* for getting the remote ip address
*
* @param rpc_req the rpc request structure provided to the server callback
* @return an struct evhttp_request object that can be inspected for
* HTTP headers or sender information.
*/
#define EVRPC_REQUEST_HTTP(rpc_req) (rpc_req)->http_req
/** completes the server response to an rpc request */
void evrpc_request_done(struct evrpc_req_generic *req);
/** accessors for request and reply */
void *evrpc_get_request(struct evrpc_req_generic *req);
void *evrpc_get_reply(struct evrpc_req_generic *req);
/** Creates the reply to an RPC request
*
* EVRPC_REQUEST_DONE is used to answer a request; the reply is expected
* to have been filled in. The request and reply pointers become invalid
* after this call has finished.
*
* @param rpc_req the rpc request structure provided to the server callback
*/
#define EVRPC_REQUEST_DONE(rpc_req) do { \
struct evrpc_req_generic *_req = (struct evrpc_req_generic *)(rpc_req); \
evrpc_request_done(_req); \
} while (0)
struct evrpc_base;
struct evhttp;
/* functions to start up the rpc system */
/** Creates a new rpc base from which RPC requests can be received
*
* @param server a pointer to an existing HTTP server
* @return a newly allocated evrpc_base struct
* @see evrpc_free()
*/
struct evrpc_base *evrpc_init(struct evhttp *server);
/**
* Frees the evrpc base
*
* For now, you are responsible for making sure that no rpcs are ongoing.
*
* @param base the evrpc_base object to be freed
* @see evrpc_init
*/
void evrpc_free(struct evrpc_base *base);
/** register RPCs with the HTTP Server
*
* registers a new RPC with the HTTP server, each RPC needs to have
* a unique name under which it can be identified.
*
* @param base the evrpc_base structure in which the RPC should be
* registered.
* @param name the name of the RPC
* @param request the name of the RPC request structure
* @param reply the name of the RPC reply structure
* @param callback the callback that should be invoked when the RPC
* is received. The callback has the following prototype
* void (*callback)(EVRPC_STRUCT(Message)* rpc, void *arg)
* @param cbarg an additional parameter that can be passed to the callback.
* The parameter can be used to carry around state.
*/
#define EVRPC_REGISTER(base, name, request, reply, callback, cbarg) \
evrpc_register_generic(base, #name, \
(void (*)(struct evrpc_req_generic *, void *))callback, cbarg, \
(void *(*)(void *))request##_new, NULL, \
(void (*)(void *))request##_free, \
(int (*)(void *, struct evbuffer *))request##_unmarshal, \
(void *(*)(void *))reply##_new, NULL, \
(void (*)(void *))reply##_free, \
(int (*)(void *))reply##_complete, \
(void (*)(struct evbuffer *, void *))reply##_marshal)
/**
Low level function for registering an RPC with a server.
Use EVRPC_REGISTER() instead.
@see EVRPC_REGISTER()
*/
int evrpc_register_rpc(struct evrpc_base *, struct evrpc *,
void (*)(struct evrpc_req_generic*, void *), void *);
/**
* Unregisters an already registered RPC
*
* @param base the evrpc_base object from which to unregister an RPC
* @param name the name of the rpc to unregister
* @return -1 on error or 0 when successful.
* @see EVRPC_REGISTER()
*/
#define EVRPC_UNREGISTER(base, name) evrpc_unregister_rpc((base), #name)
int evrpc_unregister_rpc(struct evrpc_base *base, const char *name);
/*
* Client-side RPC support
*/
struct evhttp_connection;
struct evrpc_status;
/** launches an RPC and sends it to the server
*
* EVRPC_MAKE_REQUEST() is used by the client to send an RPC to the server.
*
* @param name the name of the RPC
* @param pool the evrpc_pool that contains the connection objects over which
* the request should be sent.
* @param request a pointer to the RPC request structure - it contains the
* data to be sent to the server.
* @param reply a pointer to the RPC reply structure. It is going to be filled
* if the request was answered successfully
* @param cb the callback to invoke when the RPC request has been answered
* @param cbarg an additional argument to be passed to the client
* @return 0 on success, -1 on failure
*/
#define EVRPC_MAKE_REQUEST(name, pool, request, reply, cb, cbarg) \
evrpc_send_request_##name((pool), (request), (reply), (cb), (cbarg))
/**
Makes an RPC request based on the provided context.
This is a low-level function and should not be used directly
unless a custom context object is provided. Use EVRPC_MAKE_REQUEST()
instead.
@param ctx a context from EVRPC_MAKE_CTX()
@returns 0 on success, -1 otherwise.
@see EVRPC_MAKE_REQUEST(), EVRPC_MAKE_CTX()
*/
int evrpc_make_request(struct evrpc_request_wrapper *ctx);
/** creates an rpc connection pool
*
* a pool has a number of connections associated with it.
* rpc requests are always made via a pool.
*
* @param base a pointer to an struct event_based object; can be left NULL
* in singled-threaded applications
* @return a newly allocated struct evrpc_pool object
* @see evrpc_pool_free()
*/
struct evrpc_pool *evrpc_pool_new(struct event_base *base);
/** frees an rpc connection pool
*
* @param pool a pointer to an evrpc_pool allocated via evrpc_pool_new()
* @see evrpc_pool_new()
*/
void evrpc_pool_free(struct evrpc_pool *pool);
/**
* Adds a connection over which rpc can be dispatched to the pool.
*
* The connection object must have been newly created.
*
* @param pool the pool to which to add the connection
* @param evcon the connection to add to the pool.
*/
void evrpc_pool_add_connection(struct evrpc_pool *pool,
struct evhttp_connection *evcon);
/**
* Removes a connection from the pool.
*
* The connection object must have been newly created.
*
* @param pool the pool from which to remove the connection
* @param evcon the connection to remove from the pool.
*/
void evrpc_pool_remove_connection(struct evrpc_pool *pool,
struct evhttp_connection *evcon);
/**
* Sets the timeout in secs after which a request has to complete. The
* RPC is completely aborted if it does not complete by then. Setting
* the timeout to 0 means that it never timeouts and can be used to
* implement callback type RPCs.
*
* Any connection already in the pool will be updated with the new
* timeout. Connections added to the pool after set_timeout has be
* called receive the pool timeout only if no timeout has been set
* for the connection itself.
*
* @param pool a pointer to a struct evrpc_pool object
* @param timeout_in_secs the number of seconds after which a request should
* timeout and a failure be returned to the callback.
*/
void evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs);
/**
* Hooks for changing the input and output of RPCs; this can be used to
* implement compression, authentication, encryption, ...
*/
enum EVRPC_HOOK_TYPE {
EVRPC_INPUT, /**< apply the function to an input hook */
EVRPC_OUTPUT /**< apply the function to an output hook */
};
#ifndef WIN32
/** Deprecated alias for EVRPC_INPUT. Not available on windows, where it
* conflicts with platform headers. */
#define INPUT EVRPC_INPUT
/** Deprecated alias for EVRPC_OUTPUT. Not available on windows, where it
* conflicts with platform headers. */
#define OUTPUT EVRPC_OUTPUT
#endif
/**
* Return value from hook processing functions
*/
enum EVRPC_HOOK_RESULT {
EVRPC_TERMINATE = -1, /**< indicates the rpc should be terminated */
EVRPC_CONTINUE = 0, /**< continue processing the rpc */
EVRPC_PAUSE = 1 /**< pause processing request until resumed */
};
/** adds a processing hook to either an rpc base or rpc pool
*
* If a hook returns TERMINATE, the processing is aborted. On CONTINUE,
* the request is immediately processed after the hook returns. If the
* hook returns PAUSE, request processing stops until evrpc_resume_request()
* has been called.
*
* The add functions return handles that can be used for removing hooks.
*
* @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
* @param hook_type either INPUT or OUTPUT
* @param cb the callback to call when the hook is activated
* @param cb_arg an additional argument for the callback
* @return a handle to the hook so it can be removed later
* @see evrpc_remove_hook()
*/
void *evrpc_add_hook(void *vbase,
enum EVRPC_HOOK_TYPE hook_type,
int (*cb)(void *, struct evhttp_request *, struct evbuffer *, void *),
void *cb_arg);
/** removes a previously added hook
*
* @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
* @param hook_type either INPUT or OUTPUT
* @param handle a handle returned by evrpc_add_hook()
* @return 1 on success or 0 on failure
* @see evrpc_add_hook()
*/
int evrpc_remove_hook(void *vbase,
enum EVRPC_HOOK_TYPE hook_type,
void *handle);
/** resume a paused request
*
* @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
* @param ctx the context pointer provided to the original hook call
*/
int
evrpc_resume_request(void *vbase, void *ctx, enum EVRPC_HOOK_RESULT res);
/** adds meta data to request
*
* evrpc_hook_add_meta() allows hooks to add meta data to a request. for
* a client request, the meta data can be inserted by an outgoing request hook
* and retrieved by the incoming request hook.
*
* @param ctx the context provided to the hook call
* @param key a NUL-terminated c-string
* @param data the data to be associated with the key
* @param data_size the size of the data
*/
void evrpc_hook_add_meta(void *ctx, const char *key,
const void *data, size_t data_size);
/** retrieves meta data previously associated
*
* evrpc_hook_find_meta() can be used to retrieve meta data associated to a
* request by a previous hook.
* @param ctx the context provided to the hook call
* @param key a NUL-terminated c-string
* @param data pointer to a data pointer that will contain the retrieved data
* @param data_size pointer to the size of the data
* @return 0 on success or -1 on failure
*/
int evrpc_hook_find_meta(void *ctx, const char *key,
void **data, size_t *data_size);
/**
* returns the connection object associated with the request
*
* @param ctx the context provided to the hook call
* @return a pointer to the evhttp_connection object
*/
struct evhttp_connection *evrpc_hook_get_connection(void *ctx);
/**
Function for sending a generic RPC request.
Do not call this function directly, use EVRPC_MAKE_REQUEST() instead.
@see EVRPC_MAKE_REQUEST()
*/
int evrpc_send_request_generic(struct evrpc_pool *pool,
void *request, void *reply,
void (*cb)(struct evrpc_status *, void *, void *, void *),
void *cb_arg,
const char *rpcname,
void (*req_marshal)(struct evbuffer *, void *),
void (*rpl_clear)(void *),
int (*rpl_unmarshal)(void *, struct evbuffer *));
/**
Function for registering a generic RPC with the RPC base.
Do not call this function directly, use EVRPC_REGISTER() instead.
@see EVRPC_REGISTER()
*/
int
evrpc_register_generic(struct evrpc_base *base, const char *name,
void (*callback)(struct evrpc_req_generic *, void *), void *cbarg,
void *(*req_new)(void *), void *req_new_arg, void (*req_free)(void *),
int (*req_unmarshal)(void *, struct evbuffer *),
void *(*rpl_new)(void *), void *rpl_new_arg, void (*rpl_free)(void *),
int (*rpl_complete)(void *),
void (*rpl_marshal)(struct evbuffer *, void *));
/** accessors for obscure and undocumented functionality */
struct evrpc_pool* evrpc_request_get_pool(struct evrpc_request_wrapper *ctx);
void evrpc_request_set_pool(struct evrpc_request_wrapper *ctx,
struct evrpc_pool *pool);
void evrpc_request_set_cb(struct evrpc_request_wrapper *ctx,
void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg),
void *cb_arg);
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_RPC_H_ */

Просмотреть файл

@ -1,61 +0,0 @@
/*
* Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_RPC_COMPAT_H_
#define _EVENT2_RPC_COMPAT_H_
/** @file rpc_compat.h
Deprecated versions of the functions in rpc.h: provided only for
backwards compatibility.
*/
#ifdef __cplusplus
extern "C" {
#endif
/** backwards compatible accessors that work only with gcc */
#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
#undef EVTAG_ASSIGN
#undef EVTAG_GET
#undef EVTAG_ADD
#define EVTAG_ASSIGN(msg, member, args...) \
(*(msg)->base->member##_assign)(msg, ## args)
#define EVTAG_GET(msg, member, args...) \
(*(msg)->base->member##_get)(msg, ## args)
#define EVTAG_ADD(msg, member, args...) \
(*(msg)->base->member##_add)(msg, ## args)
#endif
#define EVTAG_LEN(msg, member) ((msg)->member##_length)
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_EVENT_COMPAT_H_ */

Просмотреть файл

@ -1,100 +0,0 @@
/*
* Copyright (c) 2006-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_RPC_STRUCT_H_
#define _EVENT2_RPC_STRUCT_H_
#ifdef __cplusplus
extern "C" {
#endif
/** @file rpc_struct.h
Structures used by rpc.h. Using these structures directly may harm
forward compatibility: be careful!
*/
/**
* provides information about the completed RPC request.
*/
struct evrpc_status {
#define EVRPC_STATUS_ERR_NONE 0
#define EVRPC_STATUS_ERR_TIMEOUT 1
#define EVRPC_STATUS_ERR_BADPAYLOAD 2
#define EVRPC_STATUS_ERR_UNSTARTED 3
#define EVRPC_STATUS_ERR_HOOKABORTED 4
int error;
/* for looking at headers or other information */
struct evhttp_request *http_req;
};
/* the structure below needs to be synchronized with evrpc_req_generic */
/* Encapsulates a request */
struct evrpc {
TAILQ_ENTRY(evrpc) next;
/* the URI at which the request handler lives */
const char* uri;
/* creates a new request structure */
void *(*request_new)(void *);
void *request_new_arg;
/* frees the request structure */
void (*request_free)(void *);
/* unmarshals the buffer into the proper request structure */
int (*request_unmarshal)(void *, struct evbuffer *);
/* creates a new reply structure */
void *(*reply_new)(void *);
void *reply_new_arg;
/* frees the reply structure */
void (*reply_free)(void *);
/* verifies that the reply is valid */
int (*reply_complete)(void *);
/* marshals the reply into a buffer */
void (*reply_marshal)(struct evbuffer*, void *);
/* the callback invoked for each received rpc */
void (*cb)(struct evrpc_req_generic *, void *);
void *cb_arg;
/* reference for further configuration */
struct evrpc_base *base;
};
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_RPC_STRUCT_H_ */

Просмотреть файл

@ -1,124 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_TAG_H_
#define _EVENT2_TAG_H_
/** @file tag.h
Helper functions for reading and writing tagged data onto buffers.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
/* For int types. */
#include <event2/util.h>
struct evbuffer;
/*
* Marshaling tagged data - We assume that all tags are inserted in their
* numeric order - so that unknown tags will always be higher than the
* known ones - and we can just ignore the end of an event buffer.
*/
void evtag_init(void);
/**
Unmarshals the header and returns the length of the payload
@param evbuf the buffer from which to unmarshal data
@param ptag a pointer in which the tag id is being stored
@returns -1 on failure or the number of bytes in the remaining payload.
*/
int evtag_unmarshal_header(struct evbuffer *evbuf, ev_uint32_t *ptag);
void evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag, const void *data,
ev_uint32_t len);
void evtag_marshal_buffer(struct evbuffer *evbuf, ev_uint32_t tag,
struct evbuffer *data);
/**
Encode an integer and store it in an evbuffer.
We encode integers by nybbles; the first nibble contains the number
of significant nibbles - 1; this allows us to encode up to 64-bit
integers. This function is byte-order independent.
@param evbuf evbuffer to store the encoded number
@param number a 32-bit integer
*/
void evtag_encode_int(struct evbuffer *evbuf, ev_uint32_t number);
void evtag_encode_int64(struct evbuffer *evbuf, ev_uint64_t number);
void evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag,
ev_uint32_t integer);
void evtag_marshal_int64(struct evbuffer *evbuf, ev_uint32_t tag,
ev_uint64_t integer);
void evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag,
const char *string);
void evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag,
struct timeval *tv);
int evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag,
struct evbuffer *dst);
int evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag);
int evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength);
int evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength);
int evtag_consume(struct evbuffer *evbuf);
int evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
ev_uint32_t *pinteger);
int evtag_unmarshal_int64(struct evbuffer *evbuf, ev_uint32_t need_tag,
ev_uint64_t *pinteger);
int evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag,
void *data, size_t len);
int evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
char **pstring);
int evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
struct timeval *ptv);
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_TAG_H_ */

Просмотреть файл

@ -1,39 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_TAG_COMPAT_H_
#define _EVENT2_TAG_COMPAT_H_
/** @file tag_compat.h
Obsolete/deprecated functions from tag.h; provided only for backwards
compatibility.
*/
#define encode_int(evbuf, number) evtag_encode_int((evbuf), (number))
#define encode_int64(evbuf, number) evtag_encode_int64((evbuf), (number))
#endif /* _EVENT2_TAG_H_ */

Просмотреть файл

@ -1,227 +0,0 @@
/*
* Copyright (c) 2008-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_THREAD_H_
#define _EVENT2_THREAD_H_
/** @file thread.h
Functions for multi-threaded applications using Libevent.
When using a multi-threaded application in which multiple threads
add and delete events from a single event base, Libevent needs to
lock its data structures.
Like the memory-management function hooks, all of the threading functions
_must_ be set up before an event_base is created if you want the base to
use them.
A multi-threaded application must provide locking functions to
Libevent via evthread_set_locking_callback(). Libevent will invoke
this callback whenever a lock needs to be acquired or released.
The total number of locks employed by Libevent can be determined
via the evthread_num_locks() function. An application must provision
that many locks.
If the owner of an event base is waiting for events to happen,
Libevent may signal the thread via a special file descriptor to wake
up. To enable this feature, an application needs to provide a
thread identity function via evthread_set_id_callback().
*/
#ifdef __cplusplus
extern "C" {
#endif
#include <event2/event-config.h>
#include "opal_rename.h"
/** A flag passed to a locking callback when the lock was allocated as a
* read-write lock, and we want to acquire or release the lock for writing. */
#define EVTHREAD_WRITE 0x04
/** A flag passed to a locking callback when the lock was allocated as a
* read-write lock, and we want to acquire or release the lock for reading. */
#define EVTHREAD_READ 0x08
/** A flag passed to a locking callback when we don't want to block waiting
* for the lock; if we can't get the lock immediately, we will instead
* return nonzero from the locking callback. */
#define EVTHREAD_TRY 0x10
#ifndef _EVENT_DISABLE_THREAD_SUPPORT
#define EVTHREAD_LOCK_API_VERSION 1
/** A recursive lock is one that can be acquired multiple times at once by the
* same thread. No other process can allocate the lock until the thread that
* has been holding it has unlocked it as many times as it locked it. */
#define EVTHREAD_LOCKTYPE_RECURSIVE 1
/* A read-write lock is one that allows multiple simultaneous readers, but
* where any one writer excludes all other writers and readers. */
#define EVTHREAD_LOCKTYPE_READWRITE 2
/** This structure describes the interface a threading library uses for
* locking. It's used to tell evthread_set_lock_callbacks how to use
* locking on this platform.
*/
struct evthread_lock_callbacks {
/** The current version of the locking API. Set this to
* EVTHREAD_LOCK_API_VERSION */
int lock_api_version;
/** Which kinds of locks does this version of the locking API
* support? A bitfield of EVTHREAD_LOCKTYPE_RECURSIVE and
* EVTHREAD_LOCKTYPE_READWRITE.
*
* (Note that RECURSIVE locks are currently mandatory, and
* READWRITE locks are not currently used.)
**/
unsigned supported_locktypes;
/** Function to allocate and initialize new lock of type 'locktype'.
* Returns NULL on failure. */
void *(*alloc)(unsigned locktype);
/** Funtion to release all storage held in 'lock', which was created
* with type 'locktype'. */
void (*free)(void *lock, unsigned locktype);
/** Acquire an already-allocated lock at 'lock' with mode 'mode'.
* Returns 0 on success, and nonzero on failure. */
int (*lock)(unsigned mode, void *lock);
/** Release a lock at 'lock' using mode 'mode'. Returns 0 on success,
* and nonzero on failure. */
int (*unlock)(unsigned mode, void *lock);
};
/** Sets a group of functions that Libevent should use for locking.
* For full information on the required callback API, see the
* documentation for the individual members of evthread_lock_callbacks.
*
* Note that if you're using Windows or the Pthreads threading library, you
* probably shouldn't call this function; instead, use
* evthread_use_windows_threads() or evthread_use_posix_threads() if you can.
*/
int evthread_set_lock_callbacks(const struct evthread_lock_callbacks *);
#define EVTHREAD_CONDITION_API_VERSION 1
struct timeval;
/** This structure describes the interface a threading library uses for
* condition variables. It's used to tell evthread_set_condition_callbacks
* how to use locking on this platform.
*/
struct evthread_condition_callbacks {
/** The current version of the conditions API. Set this to
* EVTHREAD_CONDITION_API_VERSION */
int condition_api_version;
/** Function to allocate and initialize a new condition variable.
* Returns the condition variable on success, and NULL on failure.
* The 'condtype' argument will be 0 with this API version.
*/
void *(*alloc_condition)(unsigned condtype);
/** Function to free a condition variable. */
void (*free_condition)(void *cond);
/** Function to signal a condition variable. If 'broadcast' is 1, all
* threads waiting on 'cond' should be woken; otherwise, only on one
* thread is worken. Should return 0 on success, -1 on failure.
* This function will only be called while holding the associated
* lock for the condition.
*/
int (*signal_condition)(void *cond, int broadcast);
/** Function to wait for a condition variable. The lock 'lock'
* will be held when this function is called; should be released
* while waiting for the condition to be come signalled, and
* should be held again when this function returns.
* If timeout is provided, it is interval of seconds to wait for
* the event to become signalled; if it is NULL, the function
* should wait indefinitely.
*
* The function should return -1 on error; 0 if the condition
* was signalled, or 1 on a timeout. */
int (*wait_condition)(void *cond, void *lock,
const struct timeval *timeout);
};
/** Sets a group of functions that Libevent should use for condition variables.
* For full information on the required callback API, see the
* documentation for the individual members of evthread_condition_callbacks.
*
* Note that if you're using Windows or the Pthreads threading library, you
* probably shouldn't call this function; instead, use
* evthread_use_windows_threads() or evthread_use_posix_threads() if you can.
*/
int evthread_set_condition_callbacks(
const struct evthread_condition_callbacks *);
/**
Sets the function for determining the thread id.
@param base the event base for which to set the id function
@param id_fn the identify function Libevent should invoke to
determine the identity of a thread.
*/
void evthread_set_id_callback(
unsigned long (*id_fn)(void));
#if defined(WIN32) && !defined(_EVENT_DISABLE_THREAD_SUPPORT)
/** Sets up Libevent for use with Windows builtin locking and thread ID
functions. Unavailable if Libevent is not built for Windows.
@return 0 on success, -1 on failure. */
int evthread_use_windows_threads(void);
#define EVTHREAD_USE_WINDOWS_THREADS_IMPLEMENTED 1
#endif
#if defined(_EVENT_HAVE_PTHREADS)
/** Sets up Libevent for use with Pthreads locking and thread ID functions.
Unavailable if Libevent is not build for use with pthreads. Requires
libraries to link against Libevent_pthreads as well as Libevent.
@return 0 on success, -1 on failure. */
int evthread_use_pthreads(void);
#define EVTHREAD_USE_PTHREADS_IMPLEMENTED 1
#endif
/** Enable debugging wrappers around the current lock callbacks. If Libevent
* makes one of several common locking errors, exit with an assertion failure.
**/
void evthread_enable_lock_debuging(void);
#endif /* _EVENT_DISABLE_THREAD_SUPPORT */
struct event_base;
/** Make sure it's safe to tell an event base to wake up from another thread.
or a signal handler.
@return 0 on success, -1 on failure.
*/
int evthread_make_base_notifiable(struct event_base *base);
#ifdef __cplusplus
}
#endif
#endif /* _EVENT2_THREAD_H_ */

Просмотреть файл

@ -1,580 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT2_UTIL_H_
#define _EVENT2_UTIL_H_
/** @file event2/util.h
Common convenience functions for cross-platform portability and
related socket manipulations.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "opal_rename.h"
#include <event2/event-config.h>
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#ifdef _EVENT_HAVE_STDINT_H
#include <stdint.h>
#elif defined(_EVENT_HAVE_INTTYPES_H)
#include <inttypes.h>
#endif
#ifdef _EVENT_HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef _EVENT_HAVE_STDDEF_H
#include <stddef.h>
#endif
#ifdef _MSC_VER
#include <BaseTsd.h>
#endif
#include <stdarg.h>
#ifdef _EVENT_HAVE_NETDB_H
#if !defined(_GNU_SOURCE)
#define _GNU_SOURCE
#endif
#include <netdb.h>
#endif
#ifdef WIN32
#include <winsock2.h>
#endif
/* Integer type definitions for types that are supposed to be defined in the
* C99-specified stdint.h. Shamefully, some platforms do not include
* stdint.h, so we need to replace it. (If you are on a platform like this,
* your C headers are now over 10 years out of date. You should bug them to
* do something about this.)
*
* We define:
* ev_uint64_t, ev_uint32_t, ev_uint16_t, ev_uint8_t -- unsigned integer
* types of exactly 64, 32, 16, and 8 bits respectively.
* ev_int64_t, ev_int32_t, ev_int16_t, ev_int8_t -- signed integer
* types of exactly 64, 32, 16, and 8 bits respectively.
* ev_uintptr_t, ev_intptr_t -- unsigned/signed integers large enough
* to hold a pointer without loss of bits.
*/
#ifdef _EVENT_HAVE_UINT64_T
#define ev_uint64_t uint64_t
#define ev_int64_t int64_t
#elif defined(WIN32)
#define ev_uint64_t unsigned __int64
#define ev_int64_t signed __int64
#elif _EVENT_SIZEOF_LONG_LONG == 8
#define ev_uint64_t unsigned long long
#define ev_int64_t long long
#elif _EVENT_SIZEOF_LONG == 8
#define ev_uint64_t unsigned long
#define ev_int64_t long
#else
#error "No way to define ev_uint64_t"
#endif
#ifdef _EVENT_HAVE_UINT32_T
#define ev_uint32_t uint32_t
#define ev_int32_t int32_t
#elif defined(WIN32)
#define ev_uint32_t unsigned int
#define ev_int32_t signed int
#elif _EVENT_SIZEOF_LONG == 4
#define ev_uint32_t unsigned long
#define ev_int32_t signed long
#elif _EVENT_SIZEOF_INT == 4
#define ev_uint32_t unsigned int
#define ev_int32_t signed int
#else
#error "No way to define ev_uint32_t"
#endif
#ifdef _EVENT_HAVE_UINT16_T
#define ev_uint16_t uint16_t
#define ev_int16_t int16_t
#elif defined(WIN32)
#define ev_uint16_t unsigned short
#define ev_int16_t signed short
#elif _EVENT_SIZEOF_INT == 2
#define ev_uint16_t unsigned int
#define ev_int16_t signed int
#elif _EVENT_SIZEOF_SHORT == 2
#define ev_uint16_t unsigned short
#define ev_int16_t signed short
#else
#error "No way to define ev_uint16_t"
#endif
#ifdef _EVENT_HAVE_UINT8_T
#define ev_uint8_t uint8_t
#define ev_int8_t int8_t
#else
#define ev_uint8_t unsigned char
#define ev_int8_t signed char
#endif
/* Some openbsd autoconf versions get the name of this macro wrong. */
#if defined(_EVENT_SIZEOF_VOID__) && !defined(_EVENT_SIZEOF_VOID_P)
#define _EVENT_SIZEOF_VOID_P _EVENT_SIZEOF_VOID__
#endif
#ifdef _EVENT_HAVE_UINTPTR_T
#define ev_uintptr_t uintptr_t
#define ev_intptr_t intptr_t
#elif _EVENT_SIZEOF_VOID_P <= 4
#define ev_uintptr_t ev_uint32_t
#define ev_intptr_t ev_int32_t
#elif _EVENT_SIZEOF_VOID_P <= 8
#define ev_uintptr_t ev_uint64_t
#define ev_intptr_t ev_int64_t
#else
#error "No way to define ev_uintptr_t"
#endif
#ifdef _EVENT_ssize_t
#define ev_ssize_t _EVENT_ssize_t
#else
#define ev_ssize_t ssize_t
#endif
/* Limits for integer types.
We're making two assumptions here:
- The compiler does constant folding properly.
- The platform does signed arithmetic in two's complement.
*/
#define EV_UINT64_MAX ((((ev_uint64_t)0xffffffffUL) << 32) | 0xffffffffUL)
#define EV_INT64_MAX ((((ev_int64_t) 0x7fffffffL) << 32) | 0xffffffffL)
#define EV_INT64_MIN ((-EV_INT64_MAX) - 1)
#define EV_UINT32_MAX ((ev_uint32_t)0xffffffffUL)
#define EV_INT32_MAX ((ev_int32_t) 0x7fffffffL)
#define EV_INT32_MIN ((-EV_INT32_MAX) - 1)
#define EV_UINT16_MAX ((ev_uint16_t)0xffffUL)
#define EV_INT16_MAX ((ev_int16_t) 0x7fffL)
#define EV_INT16_MIN ((-EV_INT16_MAX) - 1)
#define EV_UINT8_MAX 255
#define EV_INT8_MAX 127
#define EV_INT8_MIN ((-EV_INT8_MAX) - 1)
#if _EVENT_SIZEOF_SIZE_T == 8
#define EV_SIZE_MAX EV_UINT64_MAX
#define EV_SSIZE_MAX EV_INT64_MAX
#elif _EVENT_SIZEOF_SIZE_T == 4
#define EV_SIZE_MAX EV_UINT32_MAX
#define EV_SSIZE_MAX EV_INT32_MAX
#else
#error "No way to define SIZE_MAX"
#endif
#define EV_SSIZE_MIN ((-EV_SSIZE_MAX) - 1)
#ifdef WIN32
#define ev_socklen_t int
#elif defined(_EVENT_socklen_t)
#define ev_socklen_t _EVENT_socklen_t
#else
#define ev_socklen_t socklen_t
#endif
#ifdef WIN32
/** A type wide enough to hold the output of "socket()" or "accept()". On
* Windows, this is an intptr_t; elsewhere, it is an int. */
#define evutil_socket_t intptr_t
#else
#define evutil_socket_t int
#endif
/** Create two new sockets that are connected to each other. On Unix, this
simply calls socketpair(). On Windows, it uses the loopback network
interface on 127.0.0.1, and only AF_INET,SOCK_STREAM are supported.
(This may fail on some Windows hosts where firewall software has cleverly
decided to keep 127.0.0.1 from talking to itself.)
Parameters and return values are as for socketpair()
*/
int evutil_socketpair(int d, int type, int protocol, evutil_socket_t sv[2]);
/** Do platform-specific operations as needed to make a socket nonblocking.
@param sock The socket to make nonblocking
@return 0 on success, -1 on failure
*/
int evutil_make_socket_nonblocking(evutil_socket_t sock);
/** Do platform-specific operations on a listener socket to make sure that
another program will be able to bind this address right after we've
closed the listener
@param sock The socket to make reusable
@return 0 on success, -1 on failure
*/
int evutil_make_listen_socket_reuseable(evutil_socket_t sock);
/** Do platform-specific operations as needed to close a socket upon a
successful execution of one of the exec*() functions.
@param sock The socket to be closed
@return 0 on success, -1 on failure
*/
int evutil_make_socket_closeonexec(evutil_socket_t sock);
/** Do the platform-specific call needed to close a socket returned from
socket() or accept().
@param sock The socket to be closed
@return 0 on success, -1 on failure
*/
int evutil_closesocket(evutil_socket_t sock);
#define EVUTIL_CLOSESOCKET(s) evutil_closesocket(s)
/* Winsock handles socket errors differently from the rest of the world.
* Elsewhere, a socket error is like any other error and is stored in errno.
* But winsock functions require you to retrieve the error with a special
* function, and don't let you use strerror for the error codes. And handling
* EWOULDBLOCK is ... different. */
#ifdef WIN32
/** Return the most recent socket error. Not idempotent on all platforms. */
#define EVUTIL_SOCKET_ERROR() WSAGetLastError()
/** Replace the most recent socket error with errcode */
#define EVUTIL_SET_SOCKET_ERROR(errcode) \
do { WSASetLastError(errcode); } while (0)
/** Return the most recent socket error to occur on sock. */
int evutil_socket_geterror(evutil_socket_t sock);
/** Convert a socket error to a string. */
const char *evutil_socket_error_to_string(int errcode);
#else
#define EVUTIL_SOCKET_ERROR() (errno)
#define EVUTIL_SET_SOCKET_ERROR(errcode) \
do { errno = (errcode); } while (0)
#define evutil_socket_geterror(sock) (errno)
#define evutil_socket_error_to_string(errcode) (strerror(errcode))
#endif
/*
* Manipulation macros for struct timeval. We define replacements
* for timeradd, timersub, timerclear, timercmp, and timerisset.
*/
#ifdef _EVENT_HAVE_TIMERADD
#define evutil_timeradd(tvp, uvp, vvp) timeradd((tvp), (uvp), (vvp))
#define evutil_timersub(tvp, uvp, vvp) timersub((tvp), (uvp), (vvp))
#else
#define evutil_timeradd(tvp, uvp, vvp) \
do { \
(vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \
(vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \
if ((vvp)->tv_usec >= 1000000) { \
(vvp)->tv_sec++; \
(vvp)->tv_usec -= 1000000; \
} \
} while (0)
#define evutil_timersub(tvp, uvp, vvp) \
do { \
(vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
(vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
if ((vvp)->tv_usec < 0) { \
(vvp)->tv_sec--; \
(vvp)->tv_usec += 1000000; \
} \
} while (0)
#endif /* !_EVENT_HAVE_HAVE_TIMERADD */
#ifdef _EVENT_HAVE_TIMERCLEAR
#define evutil_timerclear(tvp) timerclear(tvp)
#else
#define evutil_timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0
#endif
/** Return true iff the tvp is related to uvp according to the relational
* operator cmp. Recognized values for cmp are ==, <=, <, >=, and >. */
#define evutil_timercmp(tvp, uvp, cmp) \
(((tvp)->tv_sec == (uvp)->tv_sec) ? \
((tvp)->tv_usec cmp (uvp)->tv_usec) : \
((tvp)->tv_sec cmp (uvp)->tv_sec))
#ifdef _EVENT_HAVE_TIMERISSET
#define evutil_timerisset(tvp) timerisset(tvp)
#else
#define evutil_timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
#endif
/* Replacement for offsetof on platforms that don't define it. */
#ifdef offsetof
#define evutil_offsetof(type, field) offsetof(type, field)
#else
#define evutil_offsetof(type, field) ((off_t)(&((type *)0)->field))
#endif
/* big-int related functions */
/** Parse a 64-bit value from a string. Arguments are as for strtol. */
ev_int64_t evutil_strtoll(const char *s, char **endptr, int base);
/* Replacement for gettimeofday on platforms that lack it. */
#ifdef _EVENT_HAVE_GETTIMEOFDAY
#define evutil_gettimeofday(tv, tz) gettimeofday((tv), (tz))
#else
struct timezone;
int evutil_gettimeofday(struct timeval *tv, struct timezone *tz);
#endif
/** Replacement for snprintf to get consistent behavior on platforms for
which the return value of snprintf does not conform to C99.
*/
int evutil_snprintf(char *buf, size_t buflen, const char *format, ...)
#ifdef __GNUC__
__attribute__((format(printf, 3, 4)))
#endif
;
int evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap);
/** Replacement for inet_ntop for platforms which lack it. */
const char *evutil_inet_ntop(int af, const void *src, char *dst, size_t len);
/** Replacement for inet_pton for platforms which lack it. */
int evutil_inet_pton(int af, const char *src, void *dst);
struct sockaddr;
/** Parse an IPv4 or IPv6 address, with optional port, from a string.
Recognized formats are:
- [IPv6Address]:port
- [IPv6Address]
- IPv6Address
- IPv4Address:port
- IPv4Address
If no port is specified, the port in the output is set to 0.
@param str The string to parse.
@param out A struct sockaddr to hold the result. This should probably be
a struct sockaddr_storage.
@param outlen A pointer to the number of bytes that that 'out' can safely
hold. Set to the number of bytes used in 'out' on success.
@return -1 if the address is not well-formed, if the port is out of range,
or if out is not large enough to hold the result. Otherwise returns
0 on success.
*/
int evutil_parse_sockaddr_port(const char *str, struct sockaddr *out, int *outlen);
/** Compare two sockaddrs; return 0 if they are equal, or less than 0 if sa1
* preceeds sa2, or greater than 0 if sa1 follows sa2. If include_port is
* true, consider the port as well as the address. Only implemented for
* AF_INET and AF_INET6 addresses. The ordering is not guaranteed to remain
* the same between Libevent versions. */
int evutil_sockaddr_cmp(const struct sockaddr *sa1, const struct sockaddr *sa2,
int include_port);
/** As strcasecmp, but always compares the characters in locale-independent
ASCII. That's useful if you're handling data in ASCII-based protocols.
*/
int evutil_ascii_strcasecmp(const char *str1, const char *str2);
/** As strncasecmp, but always compares the characters in locale-independent
ASCII. That's useful if you're handling data in ASCII-based protocols.
*/
int evutil_ascii_strncasecmp(const char *str1, const char *str2, size_t n);
/* Here we define evutil_addrinfo to the native addrinfo type, or redefinte it
* if this system has no getaddrinfo(). */
#ifdef _EVENT_HAVE_STRUCT_ADDRINFO
#define evutil_addrinfo addrinfo
#else
struct evutil_addrinfo {
int ai_flags; /* AI_PASSIVE, AI_CANONNAME, AI_NUMERICHOST */
int ai_family; /* PF_xxx */
int ai_socktype; /* SOCK_xxx */
int ai_protocol; /* 0 or IPPROTO_xxx for IPv4 and IPv6 */
size_t ai_addrlen; /* length of ai_addr */
char *ai_canonname; /* canonical name for nodename */
struct sockaddr *ai_addr; /* binary address */
struct evutil_addrinfo *ai_next; /* next structure in linked list */
};
#endif
#ifdef EAI_ADDRFAMILY
#define EVUTIL_EAI_ADDRFAMILY EAI_ADDRFAMILY
#else
#define EVUTIL_EAI_ADDRFAMILY -901
#endif
#ifdef EAI_AGAIN
#define EVUTIL_EAI_AGAIN EAI_AGAIN
#else
#define EVUTIL_EAI_AGAIN -902
#endif
#ifdef EAI_BADFLAGS
#define EVUTIL_EAI_BADFLAGS EAI_BADFLAGS
#else
#define EVUTIL_EAI_BADFLAGS -903
#endif
#ifdef EAI_FAIL
#define EVUTIL_EAI_FAIL EAI_FAIL
#else
#define EVUTIL_EAI_FAIL -904
#endif
#ifdef EAI_FAMILY
#define EVUTIL_EAI_FAMILY EAI_FAMILY
#else
#define EVUTIL_EAI_FAMILY -905
#endif
#ifdef EAI_MEMORY
#define EVUTIL_EAI_MEMORY EAI_MEMORY
#else
#define EVUTIL_EAI_MEMORY -906
#endif
/* This test is a bit complicated, since some MS SDKs decide to
* remove NODATA or redefine it to be the same as NONAME, in a
* fun interpretation of RFC 2553 and RFC 3493. */
#if defined(EAI_NODATA) && (!defined(EAI_NONAME) || EAI_NODATA != EAI_NONAME)
#define EVUTIL_EAI_NODATA EAI_NODATA
#else
#define EVUTIL_EAI_NODATA -907
#endif
#ifdef EAI_NONAME
#define EVUTIL_EAI_NONAME EAI_NONAME
#else
#define EVUTIL_EAI_NONAME -908
#endif
#ifdef EAI_SERVICE
#define EVUTIL_EAI_SERVICE EAI_SERVICE
#else
#define EVUTIL_EAI_SERVICE -909
#endif
#ifdef EAI_SOCKTYPE
#define EVUTIL_EAI_SOCKTYPE EAI_SOCKTYPE
#else
#define EVUTIL_EAI_SOCKTYPE -910
#endif
#ifdef EAI_SYSTEM
#define EVUTIL_EAI_SYSTEM EAI_SYSTEM
#else
#define EVUTIL_EAI_SYSTEM -911
#endif
#define EVUTIL_EAI_CANCEL -90001
#ifdef AI_PASSIVE
#define EVUTIL_AI_PASSIVE AI_PASSIVE
#else
#define EVUTIL_AI_PASSIVE 0x1000
#endif
#ifdef AI_CANONNAME
#define EVUTIL_AI_CANONNAME AI_CANONNAME
#else
#define EVUTIL_AI_CANONNAME 0x2000
#endif
#ifdef AI_NUMERICHOST
#define EVUTIL_AI_NUMERICHOST AI_NUMERICHOST
#else
#define EVUTIL_AI_NUMERICHOST 0x4000
#endif
#ifdef AI_NUMERICSERV
#define EVUTIL_AI_NUMERICSERV AI_NUMERICSERV
#else
#define EVUTIL_AI_NUMERICSERV 0x8000
#endif
#ifdef AI_V4MAPPED
#define EVUTIL_AI_V4MAPPED AI_V4MAPPED
#else
#define EVUTIL_AI_V4MAPPED 0x10000
#endif
#ifdef AI_ALL
#define EVUTIL_AI_ALL AI_ALL
#else
#define EVUTIL_AI_ALL 0x20000
#endif
#ifdef AI_ADDRCONFIG
#define EVUTIL_AI_ADDRCONFIG AI_ADDRCONFIG
#else
#define EVUTIL_AI_ADDRCONFIG 0x40000
#endif
struct evutil_addrinfo;
/* This function clones getaddrinfo for systems that don't have it. For full
* details, see RFC 3493, section 6.1.
*
* Limitations:
* - When the system has no getaddrinfo, we fall back to gethostbyname_r or
* gethostbyname, with their attendant issues.
* - The AI_V4MAPPED and AI_ALL flags are not currently implemented.
*
* For a nonblocking variant, see evdns_getaddrinfo.
*/
int evutil_getaddrinfo(const char *nodename, const char *servname,
const struct evutil_addrinfo *hints_in, struct evutil_addrinfo **res);
/* Release storage allocated by evutil_getaddrinfo or evdns_getaddrinfo. */
void evutil_freeaddrinfo(struct evutil_addrinfo *ai);
const char *evutil_gai_strerror(int err);
/* Generate n bytes of secure pseudorandom data, and store them in buf.
*
* By default, Libevent uses an ARC4-based random number generator, seeded
* using the platform's entropy source (/dev/urandom on Unix-like systems;
* CryptGenRandom on Windows).
*/
void evutil_secure_rng_get_bytes(void *buf, size_t n);
/**
* Seed the secure random number generator if needed, and return 0 on
* success or -1 on failure.
*
* It is okay to call this function more than once; it will still return
* 0 if the RNG has been successfully seeded and -1 if it can't be
* seeded.
*
* Ordinarily you don't need to call this function from your own code;
* Libevent will seed the RNG itself the first time it needs good random
* numbers. You only need to call it if (a) you want to double-check
* that one of the seeding methods did succeed, or (b) you plan to drop
* the capability to seed (by chrooting, or dropping capabilities, or
* whatever), and you want to make sure that seeding happens before your
* program loses the ability to do it.
*/
int evutil_secure_rng_init(void);
/** Seed the random number generator with extra random bytes.
You should almost never need to call this function; it should be
sufficient to invoke evutil_secure_rng_init(), or let Libevent take
care of calling evutil_secure_rng_init() on its own.
If you call this function as a _replacement_ for the regular
entropy sources, then you need to be sure that your input
contains a fairly large amount of strong entropy. Doing so is
notoriously hard: most people who try get it wrong. Watch out!
@param dat a buffer full of a strong source of random numbers
@param datlen the number of bytes to read from datlen
*/
void evutil_secure_rng_add_bytes(const char *dat, size_t datlen);
#ifdef __cplusplus
}
#endif
#endif /* _EVUTIL_H_ */

Просмотреть файл

@ -1,201 +0,0 @@
/*
* Copyright (c) 2009-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT_IOCP_INTERNAL_H
#define _EVENT_IOCP_INTERNAL_H
#ifdef __cplusplus
extern "C" {
#endif
struct event_overlapped;
struct event_iocp_port;
struct evbuffer;
typedef void (*iocp_callback)(struct event_overlapped *, ev_uintptr_t, ev_ssize_t, int success);
/* This whole file is actually win32 only. We wrap the structures in a win32
* ifdef so that we can test-compile code that uses these interfaces on
* non-win32 platforms. */
#ifdef WIN32
/**
Internal use only. Wraps an OVERLAPPED that we're using for libevent
functionality. Whenever an event_iocp_port gets an event for a given
OVERLAPPED*, it upcasts the pointer to an event_overlapped, and calls the
iocp_callback function with the event_overlapped, the iocp key, and the
number of bytes transferred as arguments.
*/
struct event_overlapped {
OVERLAPPED overlapped;
iocp_callback cb;
};
/* Mingw's headers don't define LPFN_ACCEPTEX. */
typedef BOOL (WINAPI *AcceptExPtr)(SOCKET, SOCKET, PVOID, DWORD, DWORD, DWORD, LPDWORD, LPOVERLAPPED);
typedef BOOL (WINAPI *ConnectExPtr)(SOCKET, const struct sockaddr *, int, PVOID, DWORD, LPDWORD, LPOVERLAPPED);
typedef void (WINAPI *GetAcceptExSockaddrsPtr)(PVOID, DWORD, DWORD, DWORD, LPSOCKADDR *, LPINT, LPSOCKADDR *, LPINT);
/** Internal use only. Holds pointers to functions that only some versions of
Windows provide.
*/
struct win32_extension_fns {
AcceptExPtr AcceptEx;
ConnectExPtr ConnectEx;
GetAcceptExSockaddrsPtr GetAcceptExSockaddrs;
};
/**
Internal use only. Stores a Windows IO Completion port, along with
related data.
*/
struct event_iocp_port {
/** The port itself */
HANDLE port;
/* A lock to cover internal structures. */
CRITICAL_SECTION lock;
/** Number of threads ever open on the port. */
short n_threads;
/** True iff we're shutting down all the threads on this port */
short shutdown;
/** How often the threads on this port check for shutdown and other
* conditions */
long ms;
/* The threads that are waiting for events. */
HANDLE *threads;
/** Number of threads currently open on this port. */
short n_live_threads;
/** A semaphore to signal when we are done shutting down. */
HANDLE *shutdownSemaphore;
};
const struct win32_extension_fns *event_get_win32_extension_fns(void);
#else
/* Dummy definition so we can test-compile more things on unix. */
struct event_overlapped {
iocp_callback cb;
};
#endif
/** Initialize the fields in an event_overlapped.
@param overlapped The struct event_overlapped to initialize
@param cb The callback that should be invoked once the IO operation has
finished.
*/
void event_overlapped_init(struct event_overlapped *, iocp_callback cb);
/** Allocate and return a new evbuffer that supports overlapped IO on a given
socket. The socket must be associated with an IO completion port using
event_iocp_port_associate.
*/
struct evbuffer *evbuffer_overlapped_new(evutil_socket_t fd);
/** XXXX Document (nickm) */
evutil_socket_t _evbuffer_overlapped_get_fd(struct evbuffer *buf);
void _evbuffer_overlapped_set_fd(struct evbuffer *buf, evutil_socket_t fd);
/** Start reading data onto the end of an overlapped evbuffer.
An evbuffer can only have one read pending at a time. While the read
is in progress, no other data may be added to the end of the buffer.
The buffer must be created with event_overlapped_init().
evbuffer_commit_read() must be called in the completion callback.
@param buf The buffer to read onto
@param n The number of bytes to try to read.
@param ol Overlapped object with associated completion callback.
@return 0 on success, -1 on error.
*/
int evbuffer_launch_read(struct evbuffer *buf, size_t n, struct event_overlapped *ol);
/** Start writing data from the start of an evbuffer.
An evbuffer can only have one write pending at a time. While the write is
in progress, no other data may be removed from the front of the buffer.
The buffer must be created with event_overlapped_init().
evbuffer_commit_write() must be called in the completion callback.
@param buf The buffer to read onto
@param n The number of bytes to try to read.
@param ol Overlapped object with associated completion callback.
@return 0 on success, -1 on error.
*/
int evbuffer_launch_write(struct evbuffer *buf, ev_ssize_t n, struct event_overlapped *ol);
/** XXX document */
void evbuffer_commit_read(struct evbuffer *, ev_ssize_t);
void evbuffer_commit_write(struct evbuffer *, ev_ssize_t);
/** Create an IOCP, and launch its worker threads. Internal use only.
This interface is unstable, and will change.
*/
struct event_iocp_port *event_iocp_port_launch(int n_cpus);
/** Associate a file descriptor with an iocp, such that overlapped IO on the
fd will happen on one of the iocp's worker threads.
*/
int event_iocp_port_associate(struct event_iocp_port *port, evutil_socket_t fd,
ev_uintptr_t key);
/** Tell all threads serving an iocp to stop. Wait for up to waitMsec for all
the threads to finish whatever they're doing. If waitMsec is -1, wait
as long as required. If all the threads are done, free the port and return
0. Otherwise, return -1. If you get a -1 return value, it is safe to call
this function again.
*/
int event_iocp_shutdown(struct event_iocp_port *port, long waitMsec);
/* FIXME document. */
int event_iocp_activate_overlapped(struct event_iocp_port *port,
struct event_overlapped *o,
ev_uintptr_t key, ev_uint32_t n_bytes);
struct event_base;
/* FIXME document. */
struct event_iocp_port *event_base_get_iocp(struct event_base *base);
/* FIXME document. */
int event_base_start_iocp(struct event_base *base, int n_cpus);
void event_base_stop_iocp(struct event_base *base);
/* FIXME document. */
struct bufferevent *bufferevent_async_new(struct event_base *base,
evutil_socket_t fd, int options);
/* FIXME document. */
void bufferevent_async_set_connected(struct bufferevent *bev);
int bufferevent_async_can_connect(struct bufferevent *bev);
int bufferevent_async_connect(struct bufferevent *bev, evutil_socket_t fd,
const struct sockaddr *sa, int socklen);
#ifdef __cplusplus
}
#endif
#endif

Просмотреть файл

@ -1,72 +0,0 @@
/*
* Copyright (c) 2009-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Internal use only: Fake IPv6 structures and values on platforms that
* do not have them */
#ifndef _EVENT_IPV6_INTERNAL_H
#define _EVENT_IPV6_INTERNAL_H
#include <sys/types.h>
#ifdef _EVENT_HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#include "event2/event-config.h"
#include <event2/util.h>
#ifdef __cplusplus
extern "C" {
#endif
/** @file ipv6-internal.h
*
* Replacement types and functions for platforms that don't support ipv6
* properly.
*/
#ifndef _EVENT_HAVE_STRUCT_IN6_ADDR
struct in6_addr {
ev_uint8_t s6_addr[16];
};
#endif
#ifndef _EVENT_HAVE_SA_FAMILY_T
typedef int sa_family_t;
#endif
#ifndef _EVENT_HAVE_STRUCT_SOCKADDR_IN6
struct sockaddr_in6 {
sa_family_t sin6_family;
ev_uint16_t sin6_port;
struct in6_addr sin6_addr;
};
#endif
#ifdef __cplusplus
}
#endif
#endif

Просмотреть файл

@ -1,414 +0,0 @@
/* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */
/*
* Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "event2/event-config.h"
#define _GNU_SOURCE
#include <sys/types.h>
#ifdef _EVENT_HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include <sys/queue.h>
#include <sys/event.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#ifdef _EVENT_HAVE_INTTYPES_H
#include <inttypes.h>
#endif
/* Some platforms apparently define the udata field of struct kevent as
* intptr_t, whereas others define it as void*. There doesn't seem to be an
* easy way to tell them apart via autoconf, so we need to use OS macros. */
#if defined(_EVENT_HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__)
#define PTR_TO_UDATA(x) ((intptr_t)(x))
#else
#define PTR_TO_UDATA(x) (x)
#endif
#include "event-internal.h"
#include "log-internal.h"
#include "evmap-internal.h"
#include "event2/thread.h"
#include "evthread-internal.h"
#include "changelist-internal.h"
#define NEVENT 64
struct kqop {
struct kevent *changes;
int changes_size;
struct kevent *events;
int events_size;
int kq;
pid_t pid;
};
static void kqop_free(struct kqop *kqop);
static void *kq_init(struct event_base *);
static int kq_sig_add(struct event_base *, int, short, short, void *);
static int kq_sig_del(struct event_base *, int, short, short, void *);
static int kq_dispatch(struct event_base *, struct timeval *);
static void kq_dealloc(struct event_base *);
const struct eventop kqops = {
"kqueue",
kq_init,
event_changelist_add,
event_changelist_del,
kq_dispatch,
kq_dealloc,
1 /* need reinit */,
EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_FDS,
EVENT_CHANGELIST_FDINFO_SIZE
};
static const struct eventop kqsigops = {
"kqueue_signal",
NULL,
kq_sig_add,
kq_sig_del,
NULL,
NULL,
1 /* need reinit */,
0,
0
};
static void *
kq_init(struct event_base *base)
{
int kq = -1;
struct kqop *kqueueop = NULL;
if (!(kqueueop = mm_calloc(1, sizeof(struct kqop))))
return (NULL);
/* Initialize the kernel queue */
if ((kq = kqueue()) == -1) {
event_warn("kqueue");
goto err;
}
kqueueop->kq = kq;
kqueueop->pid = getpid();
/* Initialize fields */
kqueueop->changes = mm_calloc(NEVENT, sizeof(struct kevent));
if (kqueueop->changes == NULL)
goto err;
kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent));
if (kqueueop->events == NULL)
goto err;
kqueueop->events_size = kqueueop->changes_size = NEVENT;
/* Check for Mac OS X kqueue bug. */
memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]);
kqueueop->changes[0].ident = -1;
kqueueop->changes[0].filter = EVFILT_READ;
kqueueop->changes[0].flags = EV_ADD;
/*
* If kqueue works, then kevent will succeed, and it will
* stick an error in events[0]. If kqueue is broken, then
* kevent will fail.
*/
if (kevent(kq,
kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 ||
kqueueop->events[0].ident != -1 ||
kqueueop->events[0].flags != EV_ERROR) {
event_warn("%s: detected broken kqueue; not using.", __func__);
goto err;
}
base->evsigsel = &kqsigops;
base->evsigbase = kqueueop;
return (kqueueop);
err:
if (kqueueop)
kqop_free(kqueueop);
return (NULL);
}
static void
kq_sighandler(int sig)
{
/* Do nothing here */
}
static void
kq_setup_kevent(struct kevent *out, evutil_socket_t fd, int filter, short change)
{
memset(out, 0, sizeof(out));
out->ident = fd;
out->filter = filter;
if (change & EV_CHANGE_ADD) {
out->flags = EV_ADD;
if (change & EV_ET)
out->flags |= EV_CLEAR;
#ifdef NOTE_EOF
/* Make it behave like select() and poll() */
if (filter == EVFILT_READ)
out->fflags = NOTE_EOF;
#endif
} else {
EVUTIL_ASSERT(change & EV_CHANGE_DEL);
out->flags = EV_DELETE;
}
}
static int
kq_build_changes_list(const struct event_changelist *changelist,
struct kqop *kqop)
{
int i;
int n_changes = 0;
for (i = 0; i < changelist->n_changes; ++i) {
struct event_change *in_ch = &changelist->changes[i];
struct kevent *out_ch;
if (n_changes >= kqop->changes_size - 1) {
int newsize = kqop->changes_size * 2;
struct kevent *newchanges;
newchanges = mm_realloc(kqop->changes,
newsize * sizeof(struct kevent));
if (newchanges == NULL) {
event_warn("%s: realloc", __func__);
return (-1);
}
kqop->changes = newchanges;
kqop->changes_size = newsize;
}
if (in_ch->read_change) {
out_ch = &kqop->changes[n_changes++];
kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ,
in_ch->read_change);
}
if (in_ch->write_change) {
out_ch = &kqop->changes[n_changes++];
kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE,
in_ch->write_change);
}
}
return n_changes;
}
static int
kq_dispatch(struct event_base *base, struct timeval *tv)
{
struct kqop *kqop = base->evbase;
struct kevent *events = kqop->events;
struct kevent *changes;
struct timespec ts, *ts_p = NULL;
int i, n_changes, res;
if (tv != NULL) {
TIMEVAL_TO_TIMESPEC(tv, &ts);
ts_p = &ts;
}
/* Build "changes" from "base->changes" */
EVUTIL_ASSERT(kqop->changes);
n_changes = kq_build_changes_list(&base->changelist, kqop);
if (n_changes < 0)
return -1;
event_changelist_remove_all(&base->changelist, base);
/* steal the changes array in case some broken code tries to call
* dispatch twice at once. */
changes = kqop->changes;
kqop->changes = NULL;
EVBASE_RELEASE_LOCK(base, th_base_lock);
res = kevent(kqop->kq, changes, n_changes,
events, kqop->events_size, ts_p);
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
EVUTIL_ASSERT(kqop->changes == NULL);
kqop->changes = changes;
if (res == -1) {
if (errno != EINTR) {
event_warn("kevent");
return (-1);
}
return (0);
}
event_debug(("%s: kevent reports %d", __func__, res));
for (i = 0; i < res; i++) {
int which = 0;
if (events[i].flags & EV_ERROR) {
/*
* Error messages that can happen, when a delete fails.
* EBADF happens when the file descriptor has been
* closed,
* ENOENT when the file descriptor was closed and
* then reopened.
* EINVAL for some reasons not understood; EINVAL
* should not be returned ever; but FreeBSD does :-\
* An error is also indicated when a callback deletes
* an event we are still processing. In that case
* the data field is set to ENOENT.
*/
if (events[i].data == EBADF ||
events[i].data == EINVAL ||
events[i].data == ENOENT)
continue;
errno = events[i].data;
return (-1);
}
if (events[i].filter == EVFILT_READ) {
which |= EV_READ;
} else if (events[i].filter == EVFILT_WRITE) {
which |= EV_WRITE;
} else if (events[i].filter == EVFILT_SIGNAL) {
which |= EV_SIGNAL;
}
if (!which)
continue;
if (events[i].filter == EVFILT_SIGNAL) {
evmap_signal_active(base, events[i].ident, 1);
} else {
evmap_io_active(base, events[i].ident, which | EV_ET);
}
}
if (res == kqop->events_size) {
struct kevent *newresult;
int size = kqop->events_size;
/* We used all the events space that we have. Maybe we should
make it bigger. */
size *= 2;
newresult = mm_realloc(kqop->events,
size * sizeof(struct kevent));
if (newresult) {
kqop->events = newresult;
kqop->events_size = size;
}
}
return (0);
}
static void
kqop_free(struct kqop *kqop)
{
if (kqop->changes)
mm_free(kqop->changes);
if (kqop->events)
mm_free(kqop->events);
if (kqop->kq >= 0 && kqop->pid == getpid())
close(kqop->kq);
memset(kqop, 0, sizeof(struct kqop));
mm_free(kqop);
}
static void
kq_dealloc(struct event_base *base)
{
struct kqop *kqop = base->evbase;
evsig_dealloc(base);
kqop_free(kqop);
}
/* signal handling */
static int
kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p)
{
struct kqop *kqop = base->evbase;
struct kevent kev;
struct timespec timeout = { 0, 0 };
(void)p;
EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
memset(&kev, 0, sizeof(kev));
kev.ident = nsignal;
kev.filter = EVFILT_SIGNAL;
kev.flags = EV_ADD;
/* Be ready for the signal if it is sent any
* time between now and the next call to
* kq_dispatch. */
if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
return (-1);
if (_evsig_set_handler(base, nsignal, kq_sighandler) == -1)
return (-1);
return (0);
}
static int
kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *p)
{
struct kqop *kqop = base->evbase;
struct kevent kev;
struct timespec timeout = { 0, 0 };
(void)p;
EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
memset(&kev, 0, sizeof(kev));
kev.ident = nsignal;
kev.filter = EVFILT_SIGNAL;
kev.flags = EV_DELETE;
/* Because we insert signal events
* immediately, we need to delete them
* immediately, too */
if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
return (-1);
if (_evsig_restore_handler(base, nsignal) == -1)
return (-1);
return (0);
}

Просмотреть файл

@ -1,16 +0,0 @@
#libevent pkg-config source file
prefix=@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
includedir=@includedir@
Name: libevent
Description: libevent is an asynchronous notification event loop library
Version: @VERSION@
Requires:
Conflicts:
Libs: -L${libdir} -levent
Libs.private: @LIBS@
Cflags: -I${includedir}

Просмотреть файл

@ -1,16 +0,0 @@
#libevent pkg-config source file
prefix=@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
includedir=@includedir@
Name: libevent_openssl
Description: libevent_openssl adds openssl-based TLS support to libevent
Version: @VERSION@
Requires: libevent
Conflicts:
Libs: -L${libdir} -levent_openssl
Libs.private: @LIBS@ -lcrypto -lssl
Cflags: -I${includedir}

Просмотреть файл

@ -1,16 +0,0 @@
#libevent pkg-config source file
prefix=@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
includedir=@includedir@
Name: libevent_pthreads
Description: libevent_pthreads adds pthreads-based threading support to libevent
Version: @VERSION@
Requires: libevent
Conflicts:
Libs: -L${libdir} -levent_pthreads
Libs.private: @LIBS@ @PTHREAD_LIBS@
Cflags: -I${includedir} @PTHREAD_CFLAGS@

Просмотреть файл

@ -1,630 +0,0 @@
/*
* Copyright (c) 2009-2010 Niels Provos, Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/types.h>
#include "event2/event-config.h"
#ifdef WIN32
#include <winsock2.h>
#include <ws2tcpip.h>
#include <mswsock.h>
#endif
#include <errno.h>
#ifdef _EVENT_HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef _EVENT_HAVE_FCNTL_H
#include <fcntl.h>
#endif
#ifdef _EVENT_HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <event2/listener.h>
#include <event2/util.h>
#include <event2/event.h>
#include <event2/event_struct.h>
#include "mm-internal.h"
#include "util-internal.h"
#include "log-internal.h"
#ifdef WIN32
#include "iocp-internal.h"
#include "defer-internal.h"
#endif
struct evconnlistener_ops {
int (*enable)(struct evconnlistener *);
int (*disable)(struct evconnlistener *);
void (*destroy)(struct evconnlistener *);
evutil_socket_t (*getfd)(struct evconnlistener *);
struct event_base *(*getbase)(struct evconnlistener *);
};
struct evconnlistener {
const struct evconnlistener_ops *ops;
evconnlistener_cb cb;
void *user_data;
unsigned flags;
};
struct evconnlistener_event {
struct evconnlistener base;
struct event listener;
};
#ifdef WIN32
struct evconnlistener_iocp {
struct evconnlistener base;
evutil_socket_t fd;
struct event_base *event_base;
struct event_iocp_port *port;
CRITICAL_SECTION lock;
int n_accepting;
struct accepting_socket **accepting;
};
#endif
struct evconnlistener *
evconnlistener_new_async(struct event_base *base,
evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
evutil_socket_t fd); /* XXXX export this? */
static int event_listener_enable(struct evconnlistener *);
static int event_listener_disable(struct evconnlistener *);
static void event_listener_destroy(struct evconnlistener *);
static evutil_socket_t event_listener_getfd(struct evconnlistener *);
static struct event_base *event_listener_getbase(struct evconnlistener *);
static const struct evconnlistener_ops evconnlistener_event_ops = {
event_listener_enable,
event_listener_disable,
event_listener_destroy,
event_listener_getfd,
event_listener_getbase
};
static void listener_read_cb(evutil_socket_t, short, void *);
struct evconnlistener *
evconnlistener_new(struct event_base *base,
evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
evutil_socket_t fd)
{
struct evconnlistener_event *lev;
#ifdef WIN32
if (base && event_base_get_iocp(base)) {
const struct win32_extension_fns *ext =
event_get_win32_extension_fns();
if (ext->AcceptEx && ext->GetAcceptExSockaddrs)
return evconnlistener_new_async(base, cb, ptr, flags,
backlog, fd);
}
#endif
if (backlog > 0) {
if (listen(fd, backlog) < 0)
return NULL;
} else if (backlog < 0) {
if (listen(fd, 128) < 0)
return NULL;
}
lev = mm_calloc(1, sizeof(struct evconnlistener_event));
if (!lev)
return NULL;
lev->base.ops = &evconnlistener_event_ops;
lev->base.cb = cb;
lev->base.user_data = ptr;
lev->base.flags = flags;
event_assign(&lev->listener, base, fd, EV_READ|EV_PERSIST,
listener_read_cb, lev);
evconnlistener_enable(&lev->base);
return &lev->base;
}
struct evconnlistener *
evconnlistener_new_bind(struct event_base *base, evconnlistener_cb cb,
void *ptr, unsigned flags, int backlog, const struct sockaddr *sa,
int socklen)
{
struct evconnlistener *listener;
evutil_socket_t fd;
int on = 1;
int family = sa ? sa->sa_family : AF_UNSPEC;
if (backlog == 0)
return NULL;
fd = socket(family, SOCK_STREAM, 0);
if (fd == -1)
return NULL;
if (evutil_make_socket_nonblocking(fd) < 0) {
evutil_closesocket(fd);
return NULL;
}
if (flags & LEV_OPT_CLOSE_ON_EXEC) {
if (evutil_make_socket_closeonexec(fd) < 0) {
evutil_closesocket(fd);
return NULL;
}
}
setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void*)&on, sizeof(on));
if (flags & LEV_OPT_REUSEABLE) {
evutil_make_listen_socket_reuseable(fd);
}
if (sa) {
if (bind(fd, sa, socklen)<0) {
evutil_closesocket(fd);
return NULL;
}
}
listener = evconnlistener_new(base, cb, ptr, flags, backlog, fd);
if (!listener) {
evutil_closesocket(fd);
return NULL;
}
return listener;
}
void
evconnlistener_free(struct evconnlistener *lev)
{
lev->ops->destroy(lev);
mm_free(lev);
}
static void
event_listener_destroy(struct evconnlistener *lev)
{
struct evconnlistener_event *lev_e =
EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
event_del(&lev_e->listener);
if (lev->flags & LEV_OPT_CLOSE_ON_FREE)
evutil_closesocket(event_get_fd(&lev_e->listener));
event_debug_unassign(&lev_e->listener);
}
int
evconnlistener_enable(struct evconnlistener *lev)
{
return lev->ops->enable(lev);
}
int
evconnlistener_disable(struct evconnlistener *lev)
{
return lev->ops->disable(lev);
}
static int
event_listener_enable(struct evconnlistener *lev)
{
struct evconnlistener_event *lev_e =
EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
return event_add(&lev_e->listener, NULL);
}
static int
event_listener_disable(struct evconnlistener *lev)
{
struct evconnlistener_event *lev_e =
EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
return event_del(&lev_e->listener);
}
evutil_socket_t
evconnlistener_get_fd(struct evconnlistener *lev)
{
return lev->ops->getfd(lev);
}
static evutil_socket_t
event_listener_getfd(struct evconnlistener *lev)
{
struct evconnlistener_event *lev_e =
EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
return event_get_fd(&lev_e->listener);
}
struct event_base *
evconnlistener_get_base(struct evconnlistener *lev)
{
return lev->ops->getbase(lev);
}
static struct event_base *
event_listener_getbase(struct evconnlistener *lev)
{
struct evconnlistener_event *lev_e =
EVUTIL_UPCAST(lev, struct evconnlistener_event, base);
return event_get_base(&lev_e->listener);
}
static void
listener_read_cb(evutil_socket_t fd, short what, void *p)
{
struct evconnlistener *lev = p;
int err;
while (1) {
struct sockaddr_storage ss;
socklen_t socklen = sizeof(ss);
evutil_socket_t new_fd = accept(fd, (struct sockaddr*)&ss, &socklen);
if (new_fd < 0)
break;
if (!(lev->flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING))
evutil_make_socket_nonblocking(new_fd);
lev->cb(lev, new_fd, (struct sockaddr*)&ss, (int)socklen,
lev->user_data);
}
err = evutil_socket_geterror(fd);
if (EVUTIL_ERR_ACCEPT_RETRIABLE(err))
return;
event_sock_warn(fd, "Error from accept() call");
}
#ifdef WIN32
struct accepting_socket {
CRITICAL_SECTION lock;
struct event_overlapped overlapped;
SOCKET s;
struct deferred_cb deferred;
struct evconnlistener_iocp *lev;
ev_uint8_t buflen;
ev_uint8_t family;
unsigned free_on_cb:1;
char addrbuf[1];
};
static void accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key,
ev_ssize_t n, int ok);
static void accepted_socket_invoke_user_cb(struct deferred_cb *cb, void *arg);
static struct accepting_socket *
new_accepting_socket(struct evconnlistener_iocp *lev, int family)
{
struct accepting_socket *res;
int addrlen;
int buflen;
if (family == AF_INET)
addrlen = sizeof(struct sockaddr_in);
else if (family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
else
return NULL;
buflen = (addrlen+16)*2;
res = mm_calloc(1,sizeof(struct accepting_socket)-1+buflen);
if (!res)
return NULL;
event_overlapped_init(&res->overlapped, accepted_socket_cb);
res->s = INVALID_SOCKET;
res->lev = lev;
res->buflen = buflen;
res->family = family;
event_deferred_cb_init(&res->deferred,
accepted_socket_invoke_user_cb, res);
InitializeCriticalSectionAndSpinCount(&res->lock, 1000);
return res;
}
static void
free_and_unlock_accepting_socket(struct accepting_socket *as)
{
/* requires lock. */
if (as->s != INVALID_SOCKET)
closesocket(as->s);
LeaveCriticalSection(&as->lock);
DeleteCriticalSection(&as->lock);
mm_free(as);
}
static int
start_accepting(struct accepting_socket *as)
{
/* requires lock */
const struct win32_extension_fns *ext = event_get_win32_extension_fns();
DWORD pending = 0;
SOCKET s = socket(as->family, SOCK_STREAM, 0);
if (s == INVALID_SOCKET)
return -1;
setsockopt(s, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
(char *)&as->lev->fd, sizeof(&as->lev->fd));
if (!(as->lev->base.flags & LEV_OPT_LEAVE_SOCKETS_BLOCKING))
evutil_make_socket_nonblocking(s);
if (event_iocp_port_associate(as->lev->port, s, 1) < 0) {
closesocket(s);
return -1;
}
as->s = s;
if (ext->AcceptEx(as->lev->fd, s, as->addrbuf, 0,
as->buflen/2, as->buflen/2, &pending, &as->overlapped.overlapped))
{
/* Immediate success! */
accepted_socket_cb(&as->overlapped, 1, 0, 1);
} else {
int err = WSAGetLastError();
if (err != ERROR_IO_PENDING) {
event_warnx("AcceptEx: %s", evutil_socket_error_to_string(err));
return -1;
}
}
return 0;
}
static void
stop_accepting(struct accepting_socket *as)
{
/* requires lock. */
SOCKET s = as->s;
as->s = INVALID_SOCKET;
closesocket(s);
}
static void
accepted_socket_invoke_user_cb(struct deferred_cb *cb, void *arg)
{
struct accepting_socket *as = arg;
struct sockaddr *sa_local=NULL, *sa_remote=NULL;
int socklen_local=0, socklen_remote=0;
const struct win32_extension_fns *ext = event_get_win32_extension_fns();
EVUTIL_ASSERT(ext->GetAcceptExSockaddrs);
EnterCriticalSection(&as->lock);
if (as->free_on_cb) {
free_and_unlock_accepting_socket(as);
return;
}
ext->GetAcceptExSockaddrs(
as->addrbuf, 0, as->buflen/2, as->buflen/2,
&sa_local, &socklen_local, &sa_remote, &socklen_remote);
as->lev->base.cb(&as->lev->base, as->s, sa_remote,
socklen_remote, as->lev->base.user_data);
as->s = INVALID_SOCKET;
start_accepting(as); /* XXXX handle error */
LeaveCriticalSection(&as->lock);
}
static void
accepted_socket_cb(struct event_overlapped *o, ev_uintptr_t key, ev_ssize_t n, int ok)
{
struct accepting_socket *as =
EVUTIL_UPCAST(o, struct accepting_socket, overlapped);
EnterCriticalSection(&as->lock);
if (ok) {
/* XXXX Don't do this if some EV_MT flag is set. */
event_deferred_cb_schedule(
event_base_get_deferred_cb_queue(as->lev->event_base),
&as->deferred);
LeaveCriticalSection(&as->lock);
} else if (as->free_on_cb) {
free_and_unlock_accepting_socket(as);
} else if (as->s == INVALID_SOCKET) {
/* This is okay; we were disabled by iocp_listener_disable. */
LeaveCriticalSection(&as->lock);
} else {
/* Some error on accept that we couldn't actually handle. */
event_sock_warn(as->s, "Unexpected error on AcceptEx");
LeaveCriticalSection(&as->lock);
/* XXXX recover better. */
}
}
static int
iocp_listener_enable(struct evconnlistener *lev)
{
int i;
struct evconnlistener_iocp *lev_iocp =
EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
EnterCriticalSection(&lev_iocp->lock);
for (i = 0; i < lev_iocp->n_accepting; ++i) {
struct accepting_socket *as = lev_iocp->accepting[i];
if (!as)
continue;
EnterCriticalSection(&as->lock);
if (!as->free_on_cb && as->s == INVALID_SOCKET)
start_accepting(as); /* XXXX handle error */
LeaveCriticalSection(&as->lock);
}
LeaveCriticalSection(&lev_iocp->lock);
return 0;
}
static int
iocp_listener_disable_impl(struct evconnlistener *lev, int shutdown)
{
int i;
struct evconnlistener_iocp *lev_iocp =
EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
EnterCriticalSection(&lev_iocp->lock);
for (i = 0; i < lev_iocp->n_accepting; ++i) {
struct accepting_socket *as = lev_iocp->accepting[i];
if (!as)
continue;
EnterCriticalSection(&as->lock);
if (!as->free_on_cb && as->s != INVALID_SOCKET) {
if (shutdown)
as->free_on_cb = 1;
stop_accepting(as);
}
LeaveCriticalSection(&as->lock);
}
LeaveCriticalSection(&lev_iocp->lock);
return 0;
}
static int
iocp_listener_disable(struct evconnlistener *lev)
{
return iocp_listener_disable_impl(lev,0);
}
static void
iocp_listener_destroy(struct evconnlistener *lev)
{
iocp_listener_disable_impl(lev,1);
}
static evutil_socket_t
iocp_listener_getfd(struct evconnlistener *lev)
{
struct evconnlistener_iocp *lev_iocp =
EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
return lev_iocp->fd;
}
static struct event_base *
iocp_listener_getbase(struct evconnlistener *lev)
{
struct evconnlistener_iocp *lev_iocp =
EVUTIL_UPCAST(lev, struct evconnlistener_iocp, base);
return lev_iocp->event_base;
}
static const struct evconnlistener_ops evconnlistener_iocp_ops = {
iocp_listener_enable,
iocp_listener_disable,
iocp_listener_destroy,
iocp_listener_getfd,
iocp_listener_getbase
};
/* XXX define some way to override this. */
#define N_SOCKETS_PER_LISTENER 4
struct evconnlistener *
evconnlistener_new_async(struct event_base *base,
evconnlistener_cb cb, void *ptr, unsigned flags, int backlog,
evutil_socket_t fd)
{
struct sockaddr_storage ss;
int socklen = sizeof(ss);
struct evconnlistener_iocp *lev;
int i;
if (!base || !event_base_get_iocp(base))
goto err;
/* XXXX duplicate code */
if (backlog > 0) {
if (listen(fd, backlog) < 0)
goto err;
} else if (backlog < 0) {
if (listen(fd, 128) < 0)
goto err;
}
if (getsockname(fd, (struct sockaddr*)&ss, &socklen)) {
event_sock_warn(fd, "getsockname");
goto err;
}
lev = mm_calloc(1, sizeof(struct evconnlistener_event));
if (!lev) {
event_warn("calloc");
goto err;
}
lev->base.ops = &evconnlistener_iocp_ops;
lev->base.cb = cb;
lev->base.user_data = ptr;
lev->base.flags = flags;
lev->port = event_base_get_iocp(base);
lev->fd = fd;
lev->event_base = base;
if (event_iocp_port_associate(lev->port, fd, 1) < 0)
goto err_free_lev;
InitializeCriticalSectionAndSpinCount(&lev->lock, 1000);
lev->n_accepting = N_SOCKETS_PER_LISTENER;
lev->accepting = mm_calloc(lev->n_accepting,
sizeof(struct accepting_socket *));
if (!lev->accepting) {
event_warn("calloc");
goto err_delete_lock;
}
for (i = 0; i < lev->n_accepting; ++i) {
lev->accepting[i] = new_accepting_socket(lev, ss.ss_family);
if (!lev->accepting[i]) {
event_warnx("Couldn't create accepting socket");
goto err_free_accepting;
}
if (start_accepting(lev->accepting[i]) < 0) {
event_warnx("Couldn't start accepting on socket");
EnterCriticalSection(&lev->accepting[i]->lock);
free_and_unlock_accepting_socket(lev->accepting[i]);
goto err_free_accepting;
}
}
return &lev->base;
err_free_accepting:
mm_free(lev->accepting);
/* XXXX free the other elements. */
err_delete_lock:
DeleteCriticalSection(&lev->lock);
err_free_lev:
mm_free(lev);
err:
/* Don't close the fd, it is caller's responsibility. */
return NULL;
}
#endif

Просмотреть файл

@ -1,62 +0,0 @@
/*
* Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _LOG_H_
#define _LOG_H_
#include <event2/util.h>
#ifdef __GNUC__
#define EV_CHECK_FMT(a,b) __attribute__((format(printf, a, b)))
#define EV_NORETURN __attribute__((noreturn))
#else
#define EV_CHECK_FMT(a,b)
#define EV_NORETURN
#endif
#define _EVENT_ERR_ABORT 0xdeaddead
void event_err(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN;
void event_warn(const char *fmt, ...) EV_CHECK_FMT(1,2);
void event_sock_err(int eval, evutil_socket_t sock, const char *fmt, ...) EV_CHECK_FMT(3,4) EV_NORETURN;
void event_sock_warn(evutil_socket_t sock, const char *fmt, ...) EV_CHECK_FMT(2,3);
void event_errx(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3) EV_NORETURN;
void event_warnx(const char *fmt, ...) EV_CHECK_FMT(1,2);
void event_msgx(const char *fmt, ...) EV_CHECK_FMT(1,2);
void _event_debugx(const char *fmt, ...) EV_CHECK_FMT(1,2);
#ifndef _EVENT_DISABLE_DEBUG_MODE
#define event_debug(x) _event_debugx x
#else
#define event_debug(x) do {;} while (0)
#endif
#undef EV_CHECK_FMT
/**** OMPI CHANGE ****/
extern int event_enable_debug_output;
#endif

Просмотреть файл

@ -1,231 +0,0 @@
/* $OpenBSD: err.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
/*
* log.c
*
* Based on err.c, which was adapted from OpenBSD libc *err* *warn* code.
*
* Copyright (c) 2005-2010 Niels Provos and Nick Mathewson
*
* Copyright (c) 2000 Dug Song <dugsong@monkey.org>
*
* Copyright (c) 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "event2/event-config.h"
#ifdef WIN32
#include <winsock2.h>
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#undef WIN32_LEAN_AND_MEAN
#endif
#include <sys/types.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include "event2/event.h"
#include "event2/util.h"
#include "log-internal.h"
static void _warn_helper(int severity, const char *errstr, const char *fmt,
va_list ap);
static void event_log(int severity, const char *msg);
static void event_exit(int errcode) EV_NORETURN;
static event_fatal_cb fatal_fn = NULL;
/**** OMPI CHANGE ****/
int event_enable_debug_output = 0;
void
event_set_fatal_callback(event_fatal_cb cb)
{
fatal_fn = cb;
}
static void
event_exit(int errcode)
{
if (fatal_fn) {
fatal_fn(errcode);
exit(errcode); /* should never be reached */
} else if (errcode == _EVENT_ERR_ABORT)
abort();
else
exit(errcode);
}
void
event_err(int eval, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_ERR, strerror(errno), fmt, ap);
va_end(ap);
event_exit(eval);
}
void
event_warn(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_WARN, strerror(errno), fmt, ap);
va_end(ap);
}
void
event_sock_err(int eval, evutil_socket_t sock, const char *fmt, ...)
{
va_list ap;
int err = evutil_socket_geterror(sock);
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_ERR, evutil_socket_error_to_string(err), fmt, ap);
va_end(ap);
event_exit(eval);
}
void
event_sock_warn(evutil_socket_t sock, const char *fmt, ...)
{
va_list ap;
int err = evutil_socket_geterror(sock);
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_WARN, evutil_socket_error_to_string(err), fmt, ap);
va_end(ap);
}
void
event_errx(int eval, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_ERR, NULL, fmt, ap);
va_end(ap);
event_exit(eval);
}
void
event_warnx(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_WARN, NULL, fmt, ap);
va_end(ap);
}
void
event_msgx(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_MSG, NULL, fmt, ap);
va_end(ap);
}
void
_event_debugx(const char *fmt, ...)
{
va_list ap;
if (!event_enable_debug_output) {
return;
}
va_start(ap, fmt);
_warn_helper(_EVENT_LOG_DEBUG, NULL, fmt, ap);
va_end(ap);
}
static void
_warn_helper(int severity, const char *errstr, const char *fmt, va_list ap)
{
char buf[1024];
size_t len;
if (fmt != NULL)
evutil_vsnprintf(buf, sizeof(buf), fmt, ap);
else
buf[0] = '\0';
if (errstr) {
len = strlen(buf);
if (len < sizeof(buf) - 3) {
evutil_snprintf(buf + len, sizeof(buf) - len, ": %s", errstr);
}
}
event_log(severity, buf);
}
static event_log_cb log_fn = NULL;
void
event_set_log_callback(event_log_cb cb)
{
log_fn = cb;
}
static void
event_log(int severity, const char *msg)
{
if (log_fn)
log_fn(severity, msg);
else {
const char *severity_str;
switch (severity) {
case _EVENT_LOG_DEBUG:
severity_str = "debug";
break;
case _EVENT_LOG_MSG:
severity_str = "msg";
break;
case _EVENT_LOG_WARN:
severity_str = "warn";
break;
case _EVENT_LOG_ERR:
severity_str = "err";
break;
default:
severity_str = "???";
break;
}
(void)fprintf(stderr, "[%s] %s\n", severity_str, msg);
}
}

Просмотреть файл

@ -1,279 +0,0 @@
##### http://autoconf-archive.cryp.to/acx_pthread.html
#
# SYNOPSIS
#
# ACX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]])
#
# DESCRIPTION
#
# This macro figures out how to build C programs using POSIX threads.
# It sets the PTHREAD_LIBS output variable to the threads library and
# linker flags, and the PTHREAD_CFLAGS output variable to any special
# C compiler flags that are needed. (The user can also force certain
# compiler flags/libs to be tested by setting these environment
# variables.)
#
# Also sets PTHREAD_CC to any special C compiler that is needed for
# multi-threaded programs (defaults to the value of CC otherwise).
# (This is necessary on AIX to use the special cc_r compiler alias.)
#
# NOTE: You are assumed to not only compile your program with these
# flags, but also link it with them as well. e.g. you should link
# with $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS
# $LIBS
#
# If you are only building threads programs, you may wish to use
# these variables in your default LIBS, CFLAGS, and CC:
#
# LIBS="$PTHREAD_LIBS $LIBS"
# CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
# CC="$PTHREAD_CC"
#
# In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute
# constant has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to
# that name (e.g. PTHREAD_CREATE_UNDETACHED on AIX).
#
# ACTION-IF-FOUND is a list of shell commands to run if a threads
# library is found, and ACTION-IF-NOT-FOUND is a list of commands to
# run it if it is not found. If ACTION-IF-FOUND is not specified, the
# default action will define HAVE_PTHREAD.
#
# Please let the authors know if this macro fails on any platform, or
# if you have any other suggestions or comments. This macro was based
# on work by SGJ on autoconf scripts for FFTW (http://www.fftw.org/)
# (with help from M. Frigo), as well as ac_pthread and hb_pthread
# macros posted by Alejandro Forero Cuervo to the autoconf macro
# repository. We are also grateful for the helpful feedback of
# numerous users.
#
# LAST MODIFICATION
#
# 2007-07-29
#
# COPYLEFT
#
# Copyright (c) 2007 Steven G. Johnson <stevenj@alum.mit.edu>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
# As a special exception, the respective Autoconf Macro's copyright
# owner gives unlimited permission to copy, distribute and modify the
# configure scripts that are the output of Autoconf when processing
# the Macro. You need not follow the terms of the GNU General Public
# License when using or distributing such scripts, even though
# portions of the text of the Macro appear in them. The GNU General
# Public License (GPL) does govern all other use of the material that
# constitutes the Autoconf Macro.
#
# This special exception to the GPL applies to versions of the
# Autoconf Macro released by the Autoconf Macro Archive. When you
# make and distribute a modified version of the Autoconf Macro, you
# may extend this special exception to the GPL to apply to your
# modified version as well.
AC_DEFUN([ACX_PTHREAD], [
AC_REQUIRE([AC_CANONICAL_HOST])
AC_LANG_SAVE
AC_LANG_C
acx_pthread_ok=no
# We used to check for pthread.h first, but this fails if pthread.h
# requires special compiler flags (e.g. on True64 or Sequent).
# It gets checked for in the link test anyway.
# First of all, check if the user has set any of the PTHREAD_LIBS,
# etcetera environment variables, and if threads linking works using
# them:
if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then
save_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
save_LIBS="$LIBS"
LIBS="$PTHREAD_LIBS $LIBS"
AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS])
AC_TRY_LINK_FUNC(pthread_join, acx_pthread_ok=yes)
AC_MSG_RESULT($acx_pthread_ok)
if test x"$acx_pthread_ok" = xno; then
PTHREAD_LIBS=""
PTHREAD_CFLAGS=""
fi
LIBS="$save_LIBS"
CFLAGS="$save_CFLAGS"
fi
# We must check for the threads library under a number of different
# names; the ordering is very important because some systems
# (e.g. DEC) have both -lpthread and -lpthreads, where one of the
# libraries is broken (non-POSIX).
# Create a list of thread flags to try. Items starting with a "-" are
# C compiler flags, and other items are library names, except for "none"
# which indicates that we try without any flags at all, and "pthread-config"
# which is a program returning the flags for the Pth emulation library.
acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config"
# The ordering *is* (sometimes) important. Some notes on the
# individual items follow:
# pthreads: AIX (must check this before -lpthread)
# none: in case threads are in libc; should be tried before -Kthread and
# other compiler flags to prevent continual compiler warnings
# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h)
# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able)
# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread)
# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads)
# -pthreads: Solaris/gcc
# -mthreads: Mingw32/gcc, Lynx/gcc
# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it
# doesn't hurt to check since this sometimes defines pthreads too;
# also defines -D_REENTRANT)
# ... -mt is also the pthreads flag for HP/aCC
# pthread: Linux, etcetera
# --thread-safe: KAI C++
# pthread-config: use pthread-config program (for GNU Pth library)
case "${host_cpu}-${host_os}" in
*solaris*)
# On Solaris (at least, for some versions), libc contains stubbed
# (non-functional) versions of the pthreads routines, so link-based
# tests will erroneously succeed. (We need to link with -pthreads/-mt/
# -lpthread.) (The stubs are missing pthread_cleanup_push, or rather
# a function called by this macro, so we could check for that, but
# who knows whether they'll stub that too in a future libc.) So,
# we'll just look for -pthreads and -lpthread first:
acx_pthread_flags="-pthreads pthread -mt -pthread $acx_pthread_flags"
;;
esac
if test x"$acx_pthread_ok" = xno; then
for flag in $acx_pthread_flags; do
case $flag in
none)
AC_MSG_CHECKING([whether pthreads work without any flags])
;;
-*)
AC_MSG_CHECKING([whether pthreads work with $flag])
PTHREAD_CFLAGS="$flag"
;;
pthread-config)
AC_CHECK_PROG(acx_pthread_config, pthread-config, yes, no)
if test x"$acx_pthread_config" = xno; then continue; fi
PTHREAD_CFLAGS="`pthread-config --cflags`"
PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`"
;;
*)
AC_MSG_CHECKING([for the pthreads library -l$flag])
PTHREAD_LIBS="-l$flag"
;;
esac
save_LIBS="$LIBS"
save_CFLAGS="$CFLAGS"
LIBS="$PTHREAD_LIBS $LIBS"
CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
# Check for various functions. We must include pthread.h,
# since some functions may be macros. (On the Sequent, we
# need a special flag -Kthread to make this header compile.)
# We check for pthread_join because it is in -lpthread on IRIX
# while pthread_create is in libc. We check for pthread_attr_init
# due to DEC craziness with -lpthreads. We check for
# pthread_cleanup_push because it is one of the few pthread
# functions on Solaris that doesn't have a non-functional libc stub.
# We try pthread_create on general principles.
AC_TRY_LINK([#include <pthread.h>],
[pthread_t th; pthread_join(th, 0);
pthread_attr_init(0); pthread_cleanup_push(0, 0);
pthread_create(0,0,0,0); pthread_cleanup_pop(0); ],
[acx_pthread_ok=yes])
LIBS="$save_LIBS"
CFLAGS="$save_CFLAGS"
AC_MSG_RESULT($acx_pthread_ok)
if test "x$acx_pthread_ok" = xyes; then
break;
fi
PTHREAD_LIBS=""
PTHREAD_CFLAGS=""
done
fi
# Various other checks:
if test "x$acx_pthread_ok" = xyes; then
save_LIBS="$LIBS"
LIBS="$PTHREAD_LIBS $LIBS"
save_CFLAGS="$CFLAGS"
CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
# Detect AIX lossage: JOINABLE attribute is called UNDETACHED.
AC_MSG_CHECKING([for joinable pthread attribute])
attr_name=unknown
for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do
AC_TRY_LINK([#include <pthread.h>], [int attr=$attr; return attr;],
[attr_name=$attr; break])
done
AC_MSG_RESULT($attr_name)
if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then
AC_DEFINE_UNQUOTED(PTHREAD_CREATE_JOINABLE, $attr_name,
[Define to necessary symbol if this constant
uses a non-standard name on your system.])
fi
AC_MSG_CHECKING([if more special flags are required for pthreads])
flag=no
case "${host_cpu}-${host_os}" in
*-aix* | *-freebsd* | *-darwin*) flag="-D_THREAD_SAFE";;
*solaris* | *-osf* | *-hpux*) flag="-D_REENTRANT";;
esac
AC_MSG_RESULT(${flag})
if test "x$flag" != xno; then
PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS"
fi
LIBS="$save_LIBS"
CFLAGS="$save_CFLAGS"
# More AIX lossage: must compile with xlc_r or cc_r
if test x"$GCC" != xyes; then
AC_CHECK_PROGS(PTHREAD_CC, xlc_r cc_r, ${CC})
else
PTHREAD_CC=$CC
fi
else
PTHREAD_CC="$CC"
fi
AC_SUBST(PTHREAD_LIBS)
AC_SUBST(PTHREAD_CFLAGS)
AC_SUBST(PTHREAD_CC)
# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND:
if test x"$acx_pthread_ok" = xyes; then
ifelse([$1],,AC_DEFINE(HAVE_PTHREAD,1,[Define if you have POSIX threads libraries and header files.]),[$1])
:
else
acx_pthread_ok=no
$2
fi
AC_LANG_RESTORE
])dnl ACX_PTHREAD

Просмотреть файл

@ -1,160 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MIN_HEAP_H_
#define _MIN_HEAP_H_
#include "event2/event-config.h"
#include "event2/event.h"
#include "event2/event_struct.h"
#include "event2/util.h"
#include "util-internal.h"
#include "mm-internal.h"
typedef struct min_heap
{
struct event** p;
unsigned n, a;
} min_heap_t;
static inline void min_heap_ctor(min_heap_t* s);
static inline void min_heap_dtor(min_heap_t* s);
static inline void min_heap_elem_init(struct event* e);
static inline int min_heap_elt_is_top(const struct event *e);
static inline int min_heap_elem_greater(struct event *a, struct event *b);
static inline int min_heap_empty(min_heap_t* s);
static inline unsigned min_heap_size(min_heap_t* s);
static inline struct event* min_heap_top(min_heap_t* s);
static inline int min_heap_reserve(min_heap_t* s, unsigned n);
static inline int min_heap_push(min_heap_t* s, struct event* e);
static inline struct event* min_heap_pop(min_heap_t* s);
static inline int min_heap_erase(min_heap_t* s, struct event* e);
static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
int min_heap_elem_greater(struct event *a, struct event *b)
{
return evutil_timercmp(&a->ev_timeout, &b->ev_timeout, >);
}
void min_heap_ctor(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
void min_heap_dtor(min_heap_t* s) { if (s->p) mm_free(s->p); }
void min_heap_elem_init(struct event* e) { e->ev_timeout_pos.min_heap_idx = -1; }
int min_heap_empty(min_heap_t* s) { return 0u == s->n; }
unsigned min_heap_size(min_heap_t* s) { return s->n; }
struct event* min_heap_top(min_heap_t* s) { return s->n ? *s->p : 0; }
int min_heap_push(min_heap_t* s, struct event* e)
{
if (min_heap_reserve(s, s->n + 1))
return -1;
min_heap_shift_up_(s, s->n++, e);
return 0;
}
struct event* min_heap_pop(min_heap_t* s)
{
if (s->n)
{
struct event* e = *s->p;
min_heap_shift_down_(s, 0u, s->p[--s->n]);
e->ev_timeout_pos.min_heap_idx = -1;
return e;
}
return 0;
}
int min_heap_elt_is_top(const struct event *e)
{
return e->ev_timeout_pos.min_heap_idx == 0;
}
int min_heap_erase(min_heap_t* s, struct event* e)
{
if (((int)-1) != e->ev_timeout_pos.min_heap_idx)
{
struct event *last = s->p[--s->n];
unsigned parent = (e->ev_timeout_pos.min_heap_idx - 1) / 2;
/* we replace e with the last element in the heap. We might need to
shift it upward if it is less than its parent, or downward if it is
greater than one or both its children. Since the children are known
to be less than the parent, it can't need to shift both up and
down. */
if (e->ev_timeout_pos.min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last))
min_heap_shift_up_(s, e->ev_timeout_pos.min_heap_idx, last);
else
min_heap_shift_down_(s, e->ev_timeout_pos.min_heap_idx, last);
e->ev_timeout_pos.min_heap_idx = -1;
return 0;
}
return -1;
}
int min_heap_reserve(min_heap_t* s, unsigned n)
{
if (s->a < n)
{
struct event** p;
unsigned a = s->a ? s->a * 2 : 8;
if (a < n)
a = n;
if (!(p = (struct event**)mm_realloc(s->p, a * sizeof *p)))
return -1;
s->p = p;
s->a = a;
}
return 0;
}
void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
{
unsigned parent = (hole_index - 1) / 2;
while (hole_index && min_heap_elem_greater(s->p[parent], e))
{
(s->p[hole_index] = s->p[parent])->ev_timeout_pos.min_heap_idx = hole_index;
hole_index = parent;
parent = (hole_index - 1) / 2;
}
(s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
}
void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
{
unsigned min_child = 2 * (hole_index + 1);
while (min_child <= s->n)
{
min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
if (!(min_heap_elem_greater(e, s->p[min_child])))
break;
(s->p[hole_index] = s->p[min_child])->ev_timeout_pos.min_heap_idx = hole_index;
hole_index = min_child;
min_child = 2 * (hole_index + 1);
}
(s->p[hole_index] = e)->ev_timeout_pos.min_heap_idx = hole_index;
}
#endif /* _MIN_HEAP_H_ */

Просмотреть файл

@ -1,63 +0,0 @@
/*
* Copyright (c) 2007-2010 Niels Provos and Nick Mathewson
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _EVENT_MM_INTERNAL_H
#define _EVENT_MM_INTERNAL_H
#include <sys/types.h>
#include "opal_rename.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef _EVENT_DISABLE_MM_REPLACEMENT
/* Internal use only: Memory allocation functions. We give them nice short
* mm_names for our own use, but make sure that the symbols have longer names
* so they don't conflict with other libraries (like, say, libmm). */
void *event_mm_malloc_(size_t sz);
void *event_mm_calloc_(size_t count, size_t size);
char *event_mm_strdup_(const char *s);
void *event_mm_realloc_(void *p, size_t sz);
void event_mm_free_(void *p);
#define mm_malloc(sz) event_mm_malloc_(sz)
#define mm_calloc(count, size) event_mm_calloc_((count), (size))
#define mm_strdup(s) event_mm_strdup_(s)
#define mm_realloc(p, sz) event_mm_realloc_((p), (sz))
#define mm_free(p) event_mm_free_(p)
#else
#define mm_malloc(sz) malloc(sz)
#define mm_calloc(n, sz) calloc((n), (sz))
#define mm_strdup(s) strdup(s)
#define mm_realloc(p, sz) realloc((p), (sz))
#define mm_free(p) free(p)
#endif
#ifdef __cplusplus
}
#endif
#endif

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше