
    Mh=                      % S SK Jr  S SKrS SKJr  S SKrS SKJr  S SK	r	S SK
Jr  S SKrS SKrS SKrS SKrS SKJrJrJrJrJrJrJrJrJr  S SKrS SKrS SKrS SKJrJ r J!r!  S SK"J#r#  S S	K$J%r%  S S
K&J'r'J(r(J)r)J*r*  S SK+J,r,  S SK-J.r.J/r/J0r0J1r1J2r2J3r3J4r4J5r5J6r6J7r7J8r8J9r9J:r:J;r;J<r<J=r=J>r>J?r?J@r@JArAJBrBJCrCJDrDJErEJFrFJGrGJHrHJIrIJJrJJKrKJLrLJMrMJNrNJOrOJPrPJQrQJRrRJSrSJTrTJUrUJVrVJWrWJXrXJYrYJZrZ  S SK[J\r\  S SK]J^r^  S SK_J`r`  S SKaJbrc  S SKdJereJfrfJgrgJhrhJiriJjrjJkrkJlrl  S SKmJnrnJoro  S SKpJqrq  S SKrJsrsJtrtJuruJvrvJwrw  S SKxJyry  S SKzJ{r{J|r|J}r}J~r~JrJrJrJrJrJrJrJrJr  S SKJrJr  S SKJrJr  S SKJrJr  S SKJrJr  S SKJrJrJrJrJrJrJr  S SKJr  S SKJr  S SKJr  S SKJr  S S KJr  S S!KJrJrJrJrJrJrJr  S S"KJrJrJr  S S#KJrJr  S S$KJr  S S%KJrJrJr  S S&KJr  S S'KJr  S S(KJr  S S)KJrJrJrJr  S S*KJrJr  S S+KJr  \(       a*  S S,KJrJrJrJr  S S-K&Jr  S S.KJrJrJrJr  S S/KJr  S S0KJr  0 \ErS1S2S3S4S5S6.r\r " S7 S8\\GR                  5      rS9rS:rS;rS<rS=rS>rS?rS@rSArSBrSCrSDrSErSFrSGrSHrSIrSJrSK\SL'   \SL   GR                  SMSNSOSPSQSR9r\SS-  r\SL   GR                  STSUSQSVSQSR9rSW\SX'   \SL   GR                  SYSZS S[S SR9rSW\S\'   S]rS^rS_rSaS` jrg)b    )annotationsN)deepcopy)partial)loads)	TYPE_CHECKINGAnyCallableClassVarLiteralNoReturncastfinaloverload)configusing_copy_on_writewarn_copy_on_write)lib)is_range_indexer)PeriodTick	Timestamp	to_offset)freq_to_period_freqstr)-	AlignJoinAnyArrayLike	ArrayLikeAxesAxisAxisIntCompressionOptionsDtypeArgDtypeBackendDtypeObjFilePathFillnaOptionsFloatFormatTypeFormattersType	FrequencyIgnoreRaiseIndexKeyFunc
IndexLabelInterpolateOptionsIntervalClosedTypeJSONSerializableLevelManager
NaPositionNDFrameTOpenFileErrorsRandomStateReindexMethodRenamerScalarSelfSequenceNotStrSortKindStorageOptionsSuffixesTTimeAmbiguousTimedeltaConvertibleTypesTimeNonexistentTimestampConvertibleTypesTimeUnitValueKeyFuncWriteBufferWriteExcelBuffernpt)PYPY)	REF_COUNT)import_optional_dependency)function)AbstractMethodErrorChainedAssignmentErrorInvalidIndexErrorSettingWithCopyErrorSettingWithCopyWarning_chained_assignment_method_msg&_chained_assignment_warning_method_msg_check_cacher)deprecate_nonkeyword_argumentsdoc)find_stack_level)check_dtype_backendvalidate_ascendingvalidate_bool_kwargvalidate_fillna_kwargsvalidate_inclusive)astype_is_view)ensure_objectensure_platform_int
ensure_stris_boolis_bool_dtypeis_dict_likeis_extension_array_dtypeis_list_like	is_numberis_numeric_dtypeis_re_compilable	is_scalarpandas_dtype)DatetimeTZDtypeExtensionDtype)ABCDataFrame	ABCSeries)is_hashableis_nested_list_like)isnanotna)
algorithms	arraylikecommonindexingmissingnanopssample)should_use_regex)ExtensionArray)PandasObject)extract_array)Flags)DatetimeIndexIndex
MultiIndexPeriodIndex
RangeIndexdefault_indexensure_index)ArrayManagerBlockManagerSingleArrayManager)
mgr_to_mgrndarray_to_mgr)describe_ndframe)clean_fill_methodclean_reindex_fill_methodfind_valid_index)concat)_shared_docs)get_indexer_indexer)	ExpandingExponentialMovingWindowRollingWindow)DataFrameFormatterDataFrameRenderer)pprint_thing)HashableIteratorMappingSequence)
BaseOffset)	DataFrameExcelWriterHDFStoreSeries)BaseIndexer)	Resamplerzkeywords for axeszSeries/DataFramezG{0 or 'index'} for Series, {0 or 'index', 1 or 'columns'} for DataFramez`
    inplace : bool, default False
        If True, performs operation inplace and returns None.zM
        by : str or list of str
            Name or list of names to sort by)axesklassaxes_single_arginplaceoptional_byc                  0  ^  \ rS rSr% Sr/ SQrS\S'   \" \5      rS\S'   \" 5       r	S\S'   \
" / 5      rS	\S
'   / rS\S'   SrS\S'   S\S'   S\S'   S\S'   GSgS jr\\  GSh         GSiS jj5       5       r\GSjGSkS jj5       r\\GSlS j5       5       r\GSmS j5       r\R,                  GSnS j5       r\\GSoS j5       5       r\SSS.     GSpS jj5       r\\GSqS  j5       5       r\GSrS! j5       r\\S" 5       5       rS#\S$'   S%S%S%S&.rS'\S('   S)\S*'   S+\S,'   S)\S-'   \GSsGStS. jj5       r\\GSuS/ j5       5       r\\GSvS0 j5       5       r\GSwS1 j5       r \\GSuS2 j5       5       r!\GSxS3 j5       r"\GSyS4 j5       r#\GSzS5 j5       r$\\GS{S6 j5       5       r%S7 r&\GS|S8 j5       r'\GS}S9 j5       r(\\GS~S: j5       5       r)\\GS~S; j5       5       r*S%SS<.     GSS= jjr+\      GSS? j5       r,\GSS@ j5       r-\GSsGSSA jj5       r.\\/" \0SB   SC9GSGSSD jj5       5       r1GSSE jr2\GSsGSSF jj5       r3\ GSsSSSSSSSGSH.                 GSSI jjj5       r4\5 GSSJSJSJSJSJSK.         GSSL jjj5       r6\5 GSSJSJSJSJSM.         GSSN jjj5       r6\5 GSSJSJSJSJSJSK.         GSSO jjj5       r6\7Rp                  4\7Rp                  \7Rp                  S%SSSK.         GSSP jjjr6\ GS     GSSQ jj5       r9\GSSR j5       r:\GSST j5       r;\GSSU j5       r<\GSSV j5       r=\GSSW j5       r>\GSSX j5       r?\?r@\GSSY j5       rA\GSSZ j5       rB\GSS[ j5       rC\GSGSS\ jj5       rD\GSGSS] jj5       rE\GSGSS^ jj5       rF\GSGSS_ jj5       rG\GSGSS` jj5       rH\GSGSSa jj5       rI\GSGSSb jj5       rJSc\Sd'   GSSe jrKGS{Sf jrLSg rMGS~Sh jrN\GSSi j5       rO\GSSj j5       rPSkrQS)\Sl'    GS     GSSm jjrR\        GSSn j5       rS\GSSo j5       rT\GSSp j5       rUGSSq jrV\Sr 5       rW\Ss 5       rX\\Y" StSuSv/SwSx9\/" SS\ZSy   SzS{9               GS                                 GSS} jj5       5       5       r[\\Y" StSuS~/SSx9\/" \ZSy   \ZS   S~-  S9             GS                           GSS jj5       5       5       r\\\Y" StSuS~/SSx9            GS                           GSS jj5       5       r]\\Y" St/ SQSSx9       GS                 GSS jj5       5       r^\\Y" StSuS/SSx9\/" \ZSy   \ZS   S-  S9S\_R                  S4         GSS jj5       5       5       ra\\Y" StSu/SSx9 GS     GSS jj5       5       rb\S 5       rc\5                     GS                                           GSS jj5       rd\5                    GS                                           GSS jj5       rd\\Y" StSuS/SSx9                     GS                                           GSS jj5       5       rd\ GSsSSSSSS.         GSS jjj5       re\5                     GS                                           GSS jj5       rf\5                    GS                                           GSS jj5       rf\\Y" StSuS~/SSx9\/" \ZSy   \ZS   S~-  S9                     GS                                           GSS jj5       5       5       rfGSS jrg   GS       GSS jjrhGSS jri\GSGSS jj5       rj\GSGSS jj5       rk\   GS         GSS jj5       rlS rm\GSS j5       rnGSGSS jjro\GSjGSS jj5       rpGSS jrq\GSGSS jj5       rr\GSS j5       rs\GSS j5       rt\GSsS j5       ru\\GSS j5       5       rv\    GS       GSS jj5       rw\5 GSSJSJSJSJSJS.               GSS jjj5       rx\5 GSSJSJSJSJSJSJS.               GSS jjj5       rx\5 GSSJSJSJSJSJSJS.               GSS jjj5       rx GSsS%SSSSSS.               GSS jjjrx\   GS     GSS jj5       ry\GSjGSS jj5       rz\GSsGSS jj5       r{\GSsGSS jj5       r|\5SJSJSJSJSJSJSJS.               GSS jj5       r}\5SJSJSJSJSJSJS.               GSS jj5       r}\5SJSJSJSJSJSJSJS.               GSS jj5       r}S%SSSSSSS.               GSS jjr}\5SJSJSJSJSJSJSJSJS.                   GSS jj5       r~\5SJSJSJSJSJSJSJSJSJS.	                   GSS jj5       r~\5SJSJSJSJSJSJSJSJSJS.	                   GSS jj5       r~S%SSSSSSSSS.	                   GSS jjr~\/" \0SB   S|S9 GSsSSSSSS\GR                   SSS.	             GSS jjj5       r\          GSS j5       rGSS jrS r\   GS     GSS jj5       r    GS       GSS jjr\GSGSS jj5       r\GSGSS jj5       r\       GS             GSS jj5       r\\/" \0SB   SC9    GSS j5       5       r\GSsGSS jj5       r\GSS j5       r\GSS j5       r\GSU 4S jj5       r\S 5       r\GSS j5       r\S 5       r\\GSS j5       5       r\GSS j5       r\S 5       r\S 5       r\GSS j5       r\S 5       r\ GS     GSS jj5       r\GSjGSS jj5       r\GSjGSS jj5       r\GSsGSS jj5       r\GSsGSS jj5       r\      GS             GSS jj5       rGSS jr\SSSSSS.           GSS jj5       r\5 GSSJSJSJSJSJS.             GSS jjj5       r\5 GSSJSJSJSJS.             GSS jjj5       r\5 GSSJSJSJSJSJS.             GSS jjj5       r\\/" \0SB   \0S   S9 GSsSSSS\7Rp                  S.             GSS jjj5       5       r\5SJSJSJSJSJS.           GSS jj5       r\5SJSJSJSJS.           GSS jj5       r\5SJSJSJSJSJS.           GSS jj5       r\\/" \0SB   \0S   S9SSSS\7Rp                  S.           GSS jj5       5       r\\/" \0SB   SC9SSS\7Rp                  S.         GSS jj5       5       r\5SJSJSJSJSJS.           GSS jj5       r\5SJSJSJS.         GSS jj5       r\5SJSJSJSJSJS.           GSS jj5       r\\/" \0SB   \0S   S9SSSS\7Rp                  S.           GSS jj5       5       r\\/" \0SB   SC9SSS\7Rp                  S.         GSS jj5       5       r\5  GSSJSJSJSJS.         GSS jjj5       r\5  GSSJSJSJS.         GSS jjj5       r\5  GSSJSJSJSJS.         GSGS  jjj5       r\\/" \ZS   \0SB   \0S>   GS9S\7Rp                  4SSS\7Rp                  S.         GSGS jjj5       5       r\5 GSSJSJSJSJSJSJGS.               GSGS jjj5       r\5 GSSJSJSJSJSJGS.               GSGS jjj5       r\5 GSSJSJSJSJSJSJGS.               GSGS jjj5       r\ GSS%SSSS\7Rp                  GS.               GSGS jjj5       r\GSsGS	 j5       r\/" \0SB   SC9GSGS
 j5       r\/" \\0SB   SC9GSGS j5       r\/" \0SB   SC9GSGS j5       r\/" \\0SB   SC9GSGS j5       r\GSGSGS jj5       r\GS 5       r\5  GSSJSJGS.     GSGS jjj5       r\5  GSSJGS.     GSGS jjj5       r\5  GSSJSJGS.     GSGS jjj5       r\  GSSSGS.     GSGS jjj5       r\\/" \0SB   SC9    GS            GSGS jj5       5       r\GSGSGS jj5       r\  GS     GSGS jj5       r\\/" \0SB   SC9\7Rp                  SS\7Rp                  \7Rp                  SSGSSS4
                     GSGS jj5       5       r\GSGS j5       r\GSGS j5       r\      GS             GSGS jj5       r\/" \ZGS   \0SB   SC9    GS	       GS
GS  jj5       r\\/" \0SB   \0S   S9GS!SSSS\7Rp                  \7Rp                  \7Rp                  \7Rp                  4	                     GSGS" jj5       5       r\        GS             GSGS# jj5       r\        GS             GSGS$ jj5       r\\7Rp                  SSSS4     GSGS% jj5       r\5 GSSJSJSJGS&.       GSGS' jjj5       r\5 GSSJSJGS(.       GSGS) jjj5       r\5 GSSJSJSJGS&.       GSGS* jjj5       r\\/" \0SB   GS+GS,GS-GS.GS/9\GR                   4SSSGS&.       GSGS0 jjj5       5       r\5 GSSJSJSJGS&.       GSGS1 jjj5       r\5 GSSJSJGS(.       GSGS2 jjj5       r\5 GSSJSJSJGS&.       GSGS3 jjj5       r\\/" \\0SB   GS,GS+GS.GS-GS/9\7Rp                  4SSSGS&.       GSGS4 jjj5       5       r\/" \0SB   SC9GSSS%\7Rp                  S4         GSGS5 jj5       r\GSGS6 j5       r\    GS     GSGS7 jj5       r\\/" \0SB   SC9 GS     GSGS8 jj5       5       r\\/" \0SB   SC9     GS         GSGS9 jj5       5       r\   GS GSGS: jj5       r\GS\7Rp                  \7Rp                  S4       GSGS; jj5       r\   GS         GSGS< jj5       r   GS       GSGS= jjr   GS       GSGS> jjr\  GS     GS GS? jj5       rGSGS!GS@ jjrGSGS!GSA jjrGSGS!GSB jjrGSGS!GSC jjr\\7Rp                  SGSS4           GS"GSD jj5       r    GS#         GS$GSE jjr    GS#         GS$GSF jjr    GS#         GS$GSG jjr\   GS%       GS&GSH jj5       r   GS%     GS'GSI jjr   GS%     GS'GSJ jjr   GS%       GS(GSK jjr   GS%       GS(GSL jjr   GS%       GS(GSM jjr   GS%       GS(GSN jjr\r\\7Rp                  SSS%4         GS)GSO jj5       r    GS*       GS+GSP jjr    GS*       GS+GSQ jjr\r\\/" \5      SSSS\7Rp                  SSGSR4                   GS,GSS jj5       5       r\\/" \5      GS\7Rp                  GSR4       GS-GST jj5       5       r\\/" \5      SSSSS%SS\7Rp                  SGSR4
                     GS.GSU jj5       5       r\GSGSV j5       r\GSGSW j5       r\GSGSX j5       r\GSGSY j5       r\GSGSZ j5       r\GSGS[ j5       r\GSGS\ j5       r\GSGS] j5       r\GSGS^ j5       r\GSGS_ j5       r\GSGS` j5       r\GS/GSa j5       r\\/" GSb\0SB   GSc9GS0GSd j5       5       r\\/" \S\0SB   GSc9GS0GSe j5       5       rGSfrU =r$ (1  NDFrame   z
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure

Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
)_mgr_cacher_item_cache_cache_is_copy_name	_metadata_flagsz	list[str]_internal_namesset[str]_internal_names_set
_accessorszfrozenset[str]_hidden_attrsr   Nz+weakref.ReferenceType[NDFrame] | str | Noner   r0   r   dict[Hashable, Any]_attrsstr_typc           	         [         R                  U SS 5        [         R                  U SU5        [         R                  U S0 5        [         R                  U S0 5        [         R                  U S[        U SS95        g )Nr   r   r   r   r   T)allows_duplicate_labels)object__setattr__r|   selfdatas     E/var/www/html/env/lib/python3.13/site-packages/pandas/core/generic.py__init__NDFrame.__init__  sc    4T24.4342.45t+TU    Fc                   UR                  5        H5  u  pVUc  M
  [        U5      nU R                  U5      nUR                  XgS9nM7     U(       a  UR	                  5       nUbg  [        U[        5      (       aC  [        UR                  5      S:X  a*  UR                  S   R                  R                  U:X  a   U$ UR                  US9nU$ )z passed a manager and a axes dictaxis   r   dtype)itemsr   _get_block_manager_axisreindex_axiscopy
isinstancer   lenblocksvaluesr   astype)clsmgrr   r   r   aaxebm_axiss           r   	_init_mgrNDFrame._init_mgr  s     jjlFA"3'55a8&&s&9	 # ((*C 3--

Oq(JJqM((..%7 
 jjuj-
r   Tc                z    [        U R                  XS9nU R                  X3R                  S9R	                  U 5      $ )a  
Private helper function to create a DataFrame with specific manager.

Parameters
----------
typ : {"block", "array"}
copy : bool, default True
    Only controls whether the conversion from Block->ArrayManager
    copies the 1D arrays (to ensure proper/contiguous memory layout).

Returns
-------
DataFrame
    New DataFrame using specified manager type. Is not guaranteed
    to be a copy or not.
)typr   r   )r   r   _constructor_from_mgrr   __finalize__)r   r   r   new_mgrs       r   _as_managerNDFrame._as_manager<  s8    & TYYC;))')ERRSWXXr   c                R    U R                  U 5      n[        R                  X15        U$ )a3  
Construct a new object of this type from a Manager object and axes.

Parameters
----------
mgr : Manager
    Must have the same ndim as cls.
axes : list[Index]

Notes
-----
The axes must match mgr.axes, but are required for future-proofing
in the event that axes are refactored out of the Manager objects.
)__new__r   r   )r   r   r   objs       r   	_from_mgrNDFrame._from_mgrS  s%    " kk#"
r   c                    U R                   $ )a  
Dictionary of global attributes of this dataset.

.. warning::

   attrs is experimental and may change without warning.

See Also
--------
DataFrame.flags : Global flags applying to this object.

Notes
-----
Many operations that create new datasets will copy ``attrs``. Copies
are always deep so that changing ``attrs`` will only affect the
present dataset. ``pandas.concat`` copies ``attrs`` only if all input
datasets have the same ``attrs``.

Examples
--------
For Series:

>>> ser = pd.Series([1, 2, 3])
>>> ser.attrs = {"A": [10, 20, 30]}
>>> ser.attrs
{'A': [10, 20, 30]}

For DataFrame:

>>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})
>>> df.attrs = {"A": [10, 20, 30]}
>>> df.attrs
{'A': [10, 20, 30]}
)r   r   s    r   attrsNDFrame.attrsk  s    H {{r   c                $    [        U5      U l        g N)dictr   )r   values     r   r   r     s    5kr   c                    U R                   $ )a)  
Get the properties associated with this pandas object.

The available flags are

* :attr:`Flags.allows_duplicate_labels`

See Also
--------
Flags : Flags that apply to pandas objects.
DataFrame.attrs : Global metadata applying to this dataset.

Notes
-----
"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.

Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags
<Flags(allows_duplicate_labels=True)>

Flags can be get or set using ``.``

>>> df.flags.allows_duplicate_labels
True
>>> df.flags.allows_duplicate_labels = False

Or by slicing with a key

>>> df.flags["allows_duplicate_labels"]
False
>>> df.flags["allows_duplicate_labels"] = True
)r   r   s    r   flagsNDFrame.flags  s    N {{r   )r   r   c               r    U R                  U=(       a    [        5       (       + S9nUb  X#R                  S'   U$ )ai  
Return a new object with updated flags.

Parameters
----------
copy : bool, default False
    Specify if a copy of the object should be made.

    .. note::
        The `copy` keyword will change behavior in pandas 3.0.
        `Copy-on-Write
        <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
        will be enabled by default, which means that all methods with a
        `copy` keyword will use a lazy copy mechanism to defer the copy and
        ignore the `copy` keyword. The `copy` keyword will be removed in a
        future version of pandas.

        You can already get the future behavior and improvements through
        enabling copy on write ``pd.options.mode.copy_on_write = True``
allows_duplicate_labels : bool, optional
    Whether the returned object allows duplicate labels.

Returns
-------
Series or DataFrame
    The same type as the caller.

See Also
--------
DataFrame.attrs : Global metadata applying to this dataset.
DataFrame.flags : Global flags applying to this object.

Notes
-----
This method returns a new object that's a view on the same data
as the input. Mutating the input or the output values will be reflected
in the other.

This method is intended to be used in method chains.

"Flags" differ from "metadata". Flags reflect properties of the
pandas object (the Series or DataFrame). Metadata refer to properties
of the dataset, and should be stored in :attr:`DataFrame.attrs`.

Examples
--------
>>> df = pd.DataFrame({"A": [1, 2]})
>>> df.flags.allows_duplicate_labels
True
>>> df2 = df.set_flags(allows_duplicate_labels=False)
>>> df2.flags.allows_duplicate_labels
False
deepr   )r   r   r   )r   r   r   dfs       r   	set_flagsNDFrame.set_flags  s9    x YYD>)<)>%>Y?".2IHH./	r   c                t    Ub4  [        U5      nUR                  S:X  a  [        SU R                   S35      eU$ )zvalidate the passed dtypeVz+compound dtypes are not implemented in the z constructor)rh   kindNotImplementedError__name__)r   r   s     r   _validate_dtypeNDFrame._validate_dtype  sK      'E zzS )!ll^<9 
 r   c                    [        U 5      e)zJ
Used when a manipulation result has the same dimensions as the
original.
rK   r   s    r   _constructorNDFrame._constructor  s     "$''r   c                    [         R                  " [        U 5      R                   S3[        [        5       S9  U R                  $ )NzV._data is deprecated and will be removed in a future version. Use public APIs instead.
stacklevel)warningswarntyper   DeprecationWarningrU   r   r   s    r   _dataNDFrame._data  s?     	Dz""# $9 9')		
 yyr   z!list[Literal['index', 'columns']]_AXIS_ORDERSr   )r   indexrowszdict[Axis, AxisInt]_AXIS_TO_AXIS_NUMBERint_info_axis_numberLiteral['index', 'columns']_info_axis_name	_AXIS_LENc                    U=(       d    U R                    Vs0 s H  o3U R                  U5      _M     nnUR                  U5        U$ s  snf )z%Return an axes dictionary for myself.)r  	_get_axisupdate)r   r   kwargsr   ds        r   _construct_axes_dictNDFrame._construct_axes_dict3  sM     -1,ED4E4E,EG,Eaq!!,EG 	
	 Hs   Ac                t     U R                   U   $ ! [         a    [        SU SU R                   35      ef = f)NzNo axis named z for object type )r  KeyError
ValueErrorr   )r   r   s     r   _get_axis_numberNDFrame._get_axis_number<  sG    	U++D11 	U~dV3DS\\NSTT	Us    &7c                B    U R                  U5      nU R                  U   $ r   )r!  r  )r   r   axis_numbers      r   _get_axis_nameNDFrame._get_axis_nameD  s%     **40,,r   c                p    U R                  U5      nUS;   d   eUS:X  a  U R                  $ U R                  $ )N>   r   r   r   )r!  r  columns)r   r   r$  s      r   r  NDFrame._get_axisJ  s:    ++D1f$$$(A-tzz?4<<?r   c                V    U R                  U5      nU R                  nUS:X  a  SU-
  $ U$ )z'Map the axis to the block_manager axis.   r   )r!  r  )r   r   ndims      r   r   NDFrame._get_block_manager_axisP  s2     ##D)}}19t8Or   c                4   [        X5      n0 nUS   n[        UR                  5       H@  u  pVUb  U=pxO	U SU 3nUnUR                  U5      n	U	R	                  5       n
X*l        XU'   MB     [        U[        5      (       a  UnOUR	                  5       nXU'   U$ )Nr   level_)getattr	enumeratenamesget_level_values	to_seriesr  r   r   )r   r   
axis_indexr  prefixinamekeylevellevel_valuessdindexs               r   _get_axis_resolversNDFrame._get_axis_resolvers[  s     T(
a !1!12GA""e
  qc*%66u=L&&(A GcF 3  j*--F))+F$r   c                
   SSK Jn  0 nU R                   H#  nUR                  U R	                  U5      5        M%     UR                  5        VVs0 s H%  u  pE[        U[        5      (       a  M  U" U5      U_M'     snn$ s  snnf )Nr   clean_column_name)pandas.core.computation.parsingrB  r  r  r>  r   r   r  )r   rB  r  	axis_namekvs         r   _get_index_resolversNDFrame._get_index_resolversz  sl    E,.**IHHT--i89 + 56GGIXIDAZPQSVEW'!!$a'IXXXs   A?.A?c                   SSK Jn  SSKJn  [	        U [
        5      (       a  U" U R                  5      U 0$ [        U R                  U R                  5       5       VVs0 s HR  u  p4[	        U[        5      (       a  M  U" U5      U" USU R                  X0R                  U   S9R                  U 5      _MT     snn$ s  snnf )z
Return the special character free column resolvers of a dataframe.

Column names with special characters are 'cleaned up' so that they can
be referred to by backtick quoting.
Used in :meth:`DataFrame.eval`.
r   rA  r   F)r   r  r8  r   )rC  rB  pandas.core.seriesr   r   rl   r8  zipr(  _iter_column_arraysr  r  dtypesr   )r   rB  r   rE  rF  s        r   _get_cleaned_column_resolvers%NDFrame._get_cleaned_column_resolvers  s     	F-dI&&%dii0$77 DLL$*B*B*DE	
 Fa%	!a &TZZa{{1~#l4 ! F	
 	
 
s   B:<:B:c                ,    [        X R                  5      $ r   )r0  r  r   s    r   
_info_axisNDFrame._info_axis  s     t1122r   c                    [        U R                  R                  5      S:X  a  gU R                  R                  S   R                  R	                  5       $ )Nr   F)r   r   r   refshas_referencer   s    r   _is_view_after_cow_rules NDFrame._is_view_after_cow_rules  sB     tyy A%yy"''5577r   c                B   ^  [        U 4S jT R                   5       5      $ )z#
Return a tuple of axis dimensions
c              3  X   >#    U  H  n[        TR                  U5      5      v   M!     g 7fr   r   r  .0r   r   s     r   	<genexpr> NDFrame.shape.<locals>.<genexpr>  s$     G5FS*++5Fs   '*)tupler  r   s   `r   shapeNDFrame.shape  s    
 GT5F5FGGGr   c                b    U R                    Vs/ s H  oR                  U5      PM     sn$ s  snf )z/
Return index label(s) of the internal NDFrame
)r  r  )r   r   s     r   r   NDFrame.axes  s+     ,0+<+<=+<aq!+<===s   ,c                .    U R                   R                  $ )aJ  
Return an int representing the number of axes / array dimensions.

Return 1 if Series. Otherwise return 2 if DataFrame.

See Also
--------
ndarray.ndim : Number of array dimensions.

Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1

>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
)r   r,  r   s    r   r,  NDFrame.ndim  s    , yy~~r   c                T    [        [        R                  " U R                  5      5      $ )a  
Return an int representing the number of elements in this object.

Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.

See Also
--------
ndarray.size : Number of elements in the array.

Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3

>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
)r  npprodra  r   s    r   sizeNDFrame.size  s    0 2774::&''r   r   r   c               $    U R                  XSUS9$ )a  
Assign desired index to given axis.

Indexes for%(extended_summary_sub)s row labels can be changed by assigning
a list-like or Index.

Parameters
----------
labels : list-like, Index
    The values for the new index.

axis : %(axes_single_arg)s, default 0
    The axis to update. The value 0 identifies the rows. For `Series`
    this parameter is unused and defaults to 0.

copy : bool, default True
    Whether to make a copy of the underlying data.

    .. note::
        The `copy` keyword will change behavior in pandas 3.0.
        `Copy-on-Write
        <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
        will be enabled by default, which means that all methods with a
        `copy` keyword will use a lazy copy mechanism to defer the copy and
        ignore the `copy` keyword. The `copy` keyword will be removed in a
        future version of pandas.

        You can already get the future behavior and improvements through
        enabling copy on write ``pd.options.mode.copy_on_write = True``

Returns
-------
%(klass)s
    An object of type %(klass)s.

See Also
--------
%(klass)s.rename_axis : Alter the name of the index%(see_also_sub)s.
F)r   r   )_set_axis_nocheck)r   labelsr   r   s       r   set_axisNDFrame.set_axis  s    \ %%fE%MMr   r   c                    U(       a  [        X R                  U5      U5        g U R                  U=(       a    [        5       (       + S9n[        XUR                  U5      U5        U$ Nr   )setattrr%  r   r   )r   ro  r   r   r   r   s         r   rn  NDFrame._set_axis_nocheck  sT     D--d3V< ))!C.A.C*C)DCC++D16:Jr   c                p    [        U5      nU R                  R                  X5        U R                  5         g)zr
This is called from the cython code when we set the `index` attribute
directly, e.g. `series.index = [1, 2, 3]`.
N)r   r   rp  _clear_item_cache)r   r   ro  s      r   	_set_axisNDFrame._set_axis'  s,     f%		4( r   c           	        [         R                  " S[        U 5      R                   S[        U 5      R                   S3[        [        5       S9  U R                  U5      nU R                  U5      nXE:X  a%  U R                  U=(       a    [        5       (       + S9$ XEXT0n[        U R                  5       Vs/ s H"  opR                  UR                  Xw5      5      PM$     nnU R                  R                  XE5      n	U R                  R                   (       Ga"  [#        U R                  [$        5      (       Ga  ['        U	US   US   SS	S
S9n
[#        U
[$        5      (       d   e[#        U R                  [$        5      (       d   eU R                  R(                  S   R*                  U
R(                  S   l        U
R(                  S   R*                  R-                  U
R(                  S   5        [        5       (       d  US	La  U
R                  SS9n
U R/                  XR0                  S9nUR3                  U SS9$ U R4                  " U	/UQ7SS	06R3                  U SS9$ s  snf )a  
Interchange axes and swap values axes appropriately.

.. deprecated:: 2.1.0
    ``swapaxes`` is deprecated and will be removed.
    Please use ``transpose`` instead.

Returns
-------
same as input

Examples
--------
Please see examples for :meth:`DataFrame.transpose`.
'zN.swapaxes' is deprecated and will be removed in a future version. Please use 'z.transpose' instead.r  r   r   r   NFblock)r   r   r   Tr   swapaxesmethodr   )r  r	  r
  r   FutureWarningrU   r!  r   r   ranger  r  get_valuesr}  r   is_single_blockr   r   r   r   rU  add_referencer   r   r   r  )r   axis1axis2r   r7  jmappingrE  new_axes
new_valuesr   outs               r   r}  NDFrame.swapaxes1  s$   " 	T
##$ %:..//CE ')	
 !!%(!!%(699$"D/B/D+D9EE,?DT^^?TU?T!NN7;;q#45?TU\\**10
99$$$DII|)L)L %G g|4444dii6666%)YY%5%5a%8%=%=GNN1"NN1""001BC&((T->!,,D,1,,W<<,HC##D#<<  

 	

 ,tJ,
/	0/ Vs   7)Ir   )r   c                f    U R                  U5      nUR                  U5      nU R                  XBSS9$ )a  
Return {klass} with requested index / column level(s) removed.

Parameters
----------
level : int, str, or list-like
    If a string is given, must be the name of a level
    If list-like, elements must be names or positional indexes
    of levels.

axis : {{0 or 'index', 1 or 'columns'}}, default 0
    Axis along which the level(s) is removed:

    * 0 or 'index': remove level(s) in column.
    * 1 or 'columns': remove level(s) in row.

    For `Series` this parameter is unused and defaults to 0.

Returns
-------
{klass}
    {klass} with requested index / column level(s) removed.

Examples
--------
>>> df = pd.DataFrame([
...     [1, 2, 3, 4],
...     [5, 6, 7, 8],
...     [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])

>>> df.columns = pd.MultiIndex.from_tuples([
...     ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])

>>> df
level_1   c   d
level_2   e   f
a b
1 2      3   4
5 6      7   8
9 10    11  12

>>> df.droplevel('a')
level_1   c   d
level_2   e   f
b
2        3   4
6        7   8
10      11  12

>>> df.droplevel('level_2', axis=1)
level_1   c   d
a b
1 2      3   4
5 6      7   8
9 10    11  12
Nrl  )r  	droplevelrp  )r   r:  r   ro  
new_labelss        r   r  NDFrame.droplevelq  s6    z %%%e,
}}Z}>>r   c                    X   nX	 U$ r    )r   itemresults      r   popNDFrame.pop  s    Jr   c                  ^ Uc  [        U R                  5      OU R                  U5      4mU R                  [	        U4S j[        U R                  5       5       5         n[        U[        5      (       a  UR                  U SS9nU$ )a  
Squeeze 1 dimensional axis objects into scalars.

Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.

This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.

Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
    A specific axis to squeeze. By default, all length-1 axes are
    squeezed. For `Series` this parameter is unused and defaults to `None`.

Returns
-------
DataFrame, Series, or scalar
    The projection after squeezing `axis` or all the axes.

See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
    single-column DataFrame.

Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])

Slicing might produce a Series with a single value:

>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0    2
dtype: int64

>>> even_primes.squeeze()
2

Squeezing objects with more than one value in every axis does nothing:

>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1    3
2    5
3    7
dtype: int64

>>> odd_primes.squeeze()
1    3
2    5
3    7
dtype: int64

Squeezing is even more effective when used with DataFrames.

>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
   a  b
0  1  2
1  3  4

Slicing a single column will produce a DataFrame with the columns
having only one value:

>>> df_a = df[['a']]
>>> df_a
   a
0  1
1  3

So the columns can be squeezed down, resulting in a Series:

>>> df_a.squeeze('columns')
0    1
1    3
Name: a, dtype: int64

Slicing a single row from a single column will produce a single
scalar DataFrame:

>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
   a
0  1

Squeezing the rows produces a single scalar Series:

>>> df_0a.squeeze('rows')
a    1
Name: 0, dtype: int64

Squeezing all axes will project directly into a scalar:

>>> df_0a.squeeze()
1
c              3  l   >#    U  H)  u  pUT;   a  [        U5      S :X  a  SO
[        S5      v   M+     g7f)r   r   N)r   slice)r]  r7  r   r   s      r   r^  "NDFrame.squeeze.<locals>.<genexpr>"  s3      0DA $Y3q6Q;E$K?0s   14squeezer~  )
r  r  r!  ilocr`  r1  r   r   r   r   )r   r   r  r   s      @r   r  NDFrame.squeeze  s~    P )-uT^^$4;P;PQU;V:X %dii0 
 fg&&((i(@Fr   ignore)r  r(  r   r   r   r:  errorsc                  Uc  Uc  Uc  [        S5      eUc  Ub  Ub  [        S5      eUb  [        S5      eO!U(       a  U R                  U5      S:X  a  UnOUnU R                  U5        U(       a  U O$U R                  U=(       a    [	        5       (       + S9n	[        X#45       GH  u  pUc  M  U R                  U
5      n[        R                  " U5      nUb  UR                  U5      n[        U5      (       d  UR                  (       a$  Ub!  UR                  U5      R                  U5      nOUR                  U5      nUS:X  aL  [        XS:H     5      (       a7  [        U5       VVs/ s H  u  p/X   S:X  d  M  UPM     nnn[        U S35      eUR!                  XS	9nU	R#                  UU
S
SS9  U	R%                  5         GM"     U(       a  U R'                  U	5        g U	R)                  U SS9$ s  snnf )Nzmust pass an index to rename:Cannot specify both 'axis' and any of 'index' or 'columns'z<Cannot specify both 'mapper' and any of 'index' or 'columns'r   r   raise not found in axisr:  TFr   r   r   renamer~  )	TypeErrorr!  *_check_inplace_and_allows_duplicate_labelsr   r   r1  r  rs   get_rename_function_get_level_numbercallable	_is_multir3  get_indexer_forr   r  _transform_indexrn  rw  _update_inplacer   )r   mapperr  r(  r   r   r   r:  r  r  axis_noreplacementsaxfindexerlabelmissing_labels	new_indexs                     r   _renameNDFrame._rename.  s    >em:;; 3P  !R  " --d3q8 77@ diiT5WBUBW>Wi&X%./?%@!G#(B**<8A ,,U3 L))<<E$5 11%8HHVG 00>GW$W]-C)D)D -6l,C&,CLE">R/ ,C # &
 #n%55G#HII++A+;I$$YWdQV$W$$&7 &A:   (&&tH&==&s   )G1;G1.)r  r(  r   r   r   c                   g r   r  r   r  r  r(  r   r   r   s          r   rename_axisNDFrame.rename_axisv       	r   )r  r(  r   r   c                   g r   r  r  s          r   r  r    r  r   c                   g r   r  r  s          r   r  r    r  r   c               j   X#S.nUb  U R                  U5      n[        US5      nU(       a  [        5       (       a  SnU[        R                  LaV  [        U5      =(       d"    [        U5      =(       a    [        U5      (       + nU(       a  U R                  XXeS9$ [        S5      eU(       a  U OU R                  US9n	[        U R                  5       H  nUR                  U R                  U5      5      n
U
[        R                  L a  M8  [        U
5      =(       d"    [        U
5      =(       a    [        U
5      (       + nU(       a  U
nOH[        R                   " U
5      nU R#                  U5      R$                  nU Vs/ s H
  o" U5      PM     nnU	R                  XSUS9  M     U(       d  U	$ gs  snf )	ap  
Set the name of the axis for the index or columns.

Parameters
----------
mapper : scalar, list-like, optional
    Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
    A scalar, list-like, dict-like or functions transformations to
    apply to that axis' values.
    Note that the ``columns`` parameter is not allowed if the
    object is a Series. This parameter only apply for DataFrame
    type objects.

    Use either ``mapper`` and ``axis`` to
    specify the axis to target with ``mapper``, or ``index``
    and/or ``columns``.
axis : {0 or 'index', 1 or 'columns'}, default 0
    The axis to rename. For `Series` this parameter is unused and defaults to 0.
copy : bool, default None
    Also copy underlying data.

    .. note::
        The `copy` keyword will change behavior in pandas 3.0.
        `Copy-on-Write
        <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
        will be enabled by default, which means that all methods with a
        `copy` keyword will use a lazy copy mechanism to defer the copy and
        ignore the `copy` keyword. The `copy` keyword will be removed in a
        future version of pandas.

        You can already get the future behavior and improvements through
        enabling copy on write ``pd.options.mode.copy_on_write = True``
inplace : bool, default False
    Modifies the object directly, instead of creating a new Series
    or DataFrame.

Returns
-------
Series, DataFrame, or None
    The same type as the caller or None if ``inplace=True``.

See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.

Notes
-----
``DataFrame.rename_axis`` supports two calling conventions

* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``

The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.

The second calling convention will modify the names of the
corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.

We *highly* recommend using keyword arguments to clarify your
intent.

Examples
--------
**Series**

>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0       dog
1       cat
2    monkey
dtype: object
>>> s.rename_axis("animal")
animal
0    dog
1    cat
2    monkey
dtype: object

**DataFrame**

>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
...                    "num_arms": [0, 0, 2]},
...                   ["dog", "cat", "monkey"])
>>> df
        num_legs  num_arms
dog            4         0
cat            4         0
monkey         2         2
>>> df = df.rename_axis("animal")
>>> df
        num_legs  num_arms
animal
dog            4         0
cat            4         0
monkey         2         2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs   num_legs  num_arms
animal
dog            4         0
cat            4         0
monkey         2         2

**MultiIndex**

>>> df.index = pd.MultiIndex.from_product([['mammal'],
...                                        ['dog', 'cat', 'monkey']],
...                                       names=['type', 'name'])
>>> df
limbs          num_legs  num_arms
type   name
mammal dog            4         0
       cat            4         0
       monkey         2         2

>>> df.rename_axis(index={'type': 'class'})
limbs          num_legs  num_arms
class  name
mammal dog            4         0
       cat            4         0
       monkey         2         2

>>> df.rename_axis(columns=str.upper)
LIMBS          num_legs  num_arms
type   name
mammal dog            4         0
       cat            4         0
       monkey         2         2
r  r(  Nr   Fr  z,Use `.rename` to alter labels with a mapper.r   T)r!  rX   r   r   
no_defaultrg   rc   ra   _set_axis_namer   r   r  r  r  r%  rs   r  r  r2  )r   r  r  r(  r   r   r   r   
non_mapperr  rF  newnamesr  curnamesr8  s                  r   r  r    s~   b 3((.D%gy9'))D'"6* V$A\&-A)A  **w +   !!OPP %T$)))*>Fdnn-HHT0067&&q\Vl1o.UlSToBU
 H2215A#~~d399H4<=HD$HH=%%h4d%S . 	  >s   >F0c                    U R                  U5      nU R                  U5      R                  U5      n[        US5      nU(       a  U OU R	                  US9nUS:X  a  XVl        OXVl        U(       d  U$ g)a  
Set the name(s) of the axis.

Parameters
----------
name : str or list of str
    Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
    The axis to set the label. The value 0 or 'index' specifies index,
    and the value 1 or 'columns' specifies columns.
inplace : bool, default False
    If `True`, do operation inplace and return None.
copy:
    Whether to make a copy of the result.

Returns
-------
Series, DataFrame, or None
    The same type as the caller or `None` if `inplace` is `True`.

See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
    of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.

Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
...                   ["dog", "cat", "monkey"])
>>> df
        num_legs
dog            4
cat            4
monkey         2
>>> df._set_axis_name("animal")
        num_legs
animal
dog            4
cat            4
monkey         2
>>> df.index = pd.MultiIndex.from_product(
...                [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
               num_legs
type   name
mammal dog        4
       cat        4
       monkey     2
r   r   r   N)r!  r  	set_namesrX   r   r  r(  )r   r8  r   r   r   idxrenameds          r   r  NDFrame._set_axis_nameX  sl    n $$T*nnT",,T2%gy9!$tyydy';19M!ON r   c                F   ^ ^ [        UU 4S jT R                   5       5      $ )Nc              3     >#    U  H4  nTR                  U5      R                  TR                  U5      5      v   M6     g 7fr   )r  equals)r]  r   otherr   s     r   r^  (NDFrame._indexed_same.<locals>.<genexpr>  s5      
BSQDNN1$$U__Q%788BSs   <?)allr  r   r  s   ``r   _indexed_sameNDFrame._indexed_same  s#     
BFBSBS
 
 	
r   r   c                    [        U[        U 5      5      (       d  [        U [        U5      5      (       d  g[        [        U5      nU R                  R                  UR                  5      $ )a8	  
Test whether two objects contain the same elements.

This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal.

The row/column index do not need to have the same type, as long
as the values are considered equal. Corresponding columns and
index must be of the same dtype.

Parameters
----------
other : Series or DataFrame
    The other Series or DataFrame to be compared with the first.

Returns
-------
bool
    True if all elements are the same in both objects, False
    otherwise.

See Also
--------
Series.eq : Compare two Series objects of the same length
    and return a Series where each element is True if the element
    in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
    return a DataFrame where each element is True if the respective
    element in each DataFrame is equal, False otherwise.
testing.assert_series_equal : Raises an AssertionError if left and
    right are not equal. Provides an easy interface to ignore
    inequality in dtypes, indexes and precision among others.
testing.assert_frame_equal : Like assert_series_equal, but targets
    DataFrames.
numpy.array_equal : Return True if two arrays have the same shape
    and elements, False otherwise.

Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
    1   2
0  10  20

DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.

>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
    1   2
0  10  20
>>> df.equals(exactly_equal)
True

DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.

>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
   1.0  2.0
0   10   20
>>> df.equals(different_column_type)
True

DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.

>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
      1     2
0  10.0  20.0
>>> df.equals(different_data_type)
False
F)r   r
  r   r   r   r  r  s     r   r  NDFrame.equals  sN    ^ 5$t*--D$u+1N1NWe$yy

++r   c                    SS jnU R                   R                  U5      nU R                  X"R                  S9nUR	                  U SS9$ )Nc                    [        U R                  5      (       a  [        R                  " U 5      $ [        R                  " U 5      $ r   )r`   r   operatorinvnegr   s    r   blk_func!NDFrame.__neg__.<locals>.blk_func  s3    V\\**  ||F++
  ||F++r   r   __neg__r~  r   r   r   applyr   r   r   r   r  new_dataress       r   r  NDFrame.__neg__  sH    
	, 99??8,(((FY77r   c                    SS jnU R                   R                  U5      nU R                  X"R                  S9nUR	                  U SS9$ )Nc                    [        U R                  5      (       a  U R                  5       $ [        R                  " U 5      $ r   )r`   r   r   r  posr  s    r   r  !NDFrame.__pos__.<locals>.blk_func  s.    V\\**{{}$
  ||F++r   r   __pos__r~  r  r  r  s       r   r  NDFrame.__pos__  sH    	, 99??8,(((FY77r   c                    U R                   (       d  U R                  SS9$ U R                  R                  [        R
                  5      nU R                  XR                  S9nUR                  U SS9$ )NFr   r   
__invert__r~  )	rj  r   r   r  r  invertr   r   r   )r   r  r  s      r   r  NDFrame.__invert__  s]    yy99%9((99??8??3(((F\::r   c                F    [        S[        U 5      R                   S35      e)NzThe truth value of a zC is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().)r   r
  r   r   s    r   __nonzero__NDFrame.__nonzero__'  s-    #DJ$7$7#8 9C C
 	
r   c                v   [         R                  " [        U 5      R                   S3[        [        5       S9  U R                  5       n[        U[        [        R                  45      (       a  [        U5      $ [        U5      (       a!  [        S[        U 5      R                   35      eU R                  5         g)a  
Return the bool of a single element Series or DataFrame.

.. deprecated:: 2.1.0

   bool is deprecated and will be removed in future version of pandas.
   For ``Series`` use ``pandas.Series.item``.

This must be a boolean scalar value, either True or False. It will raise a
ValueError if the Series or DataFrame does not have exactly 1 element, or that
element is not boolean (integer values 0 and 1 will also raise an exception).

Returns
-------
bool
    The value in the Series or DataFrame.

See Also
--------
Series.astype : Change the data type of a Series, including to boolean.
DataFrame.astype : Change the data type of a DataFrame, including to boolean.
numpy.bool_ : NumPy boolean data type, used by pandas for boolean values.

Examples
--------
The method will only work for single element objects with a boolean value:

>>> pd.Series([True]).bool()  # doctest: +SKIP
True
>>> pd.Series([False]).bool()  # doctest: +SKIP
False

>>> pd.DataFrame({'col': [True]}).bool()  # doctest: +SKIP
True
>>> pd.DataFrame({'col': [False]}).bool()  # doctest: +SKIP
False

This is an alternative method and will only work
for single element objects with a boolean value:

>>> pd.Series([True]).item()  # doctest: +SKIP
True
>>> pd.Series([False]).item()  # doctest: +SKIP
False
zG.bool is now deprecated and will be removed in future version of pandasr  z0bool cannot act on a non-boolean single element T)r  r	  r
  r   r  rU   r  r   boolrh  bool_rg   r   r  )r   rF  s     r   r  NDFrame.bool0  s    ` 	Dz""# $* *')		
 LLNa$)**7Nq\\B:&&') 
 	r   c                    U R                   R                  [        R                  5      nU R	                  XR
                  S9R                  U SS9$ )a&  
Return a Series/DataFrame with absolute numeric value of each element.

This function only applies to elements that are all numeric.

Returns
-------
abs
    Series/DataFrame containing the absolute value of each element.

See Also
--------
numpy.absolute : Calculate the absolute value element-wise.

Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\sqrt{ a^2 + b^2 }`.

Examples
--------
Absolute numeric values in a Series.

>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0    1.10
1    2.00
2    3.33
3    4.00
dtype: float64

Absolute numeric values in a Series with complex numbers.

>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0    1.56205
dtype: float64

Absolute numeric values in a Series with a Timedelta element.

>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0   1 days
dtype: timedelta64[ns]

Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).

>>> df = pd.DataFrame({
...     'a': [4, 5, 6, 7],
...     'b': [10, 20, 30, 40],
...     'c': [100, 50, -30, -50]
... })
>>> df
     a    b    c
0    4   10  100
1    5   20   50
2    6   30  -30
3    7   40  -50
>>> df.loc[(df.c - 43).abs().argsort()]
     a    b    c
1    5   20   50
0    4   10  100
2    6   30  -30
3    7   40  -50
r   abs)r8  )r   r  rh  r  r   r   r   )r   res_mgrs     r   r  NDFrame.abss  sK    H ))//"&&)))')ERRu S 
 	
r   c                "    U R                  5       $ r   )r  r   s    r   __abs__NDFrame.__abs__  s    xxzr   c                @    U R                  U5      R                  U SS9$ )N	__round__r~  )roundr   )r   decimalss     r   r  NDFrame.__round__  s!    zz(#00k0JJr   c                    U R                  U5      nUSL=(       aH    [        U5      =(       a6    XR                  U   R                  ;   =(       a    U R	                  XS9(       + $ )a  
Test whether a key is a level reference for a given axis.

To be considered a level reference, `key` must be a string that:
  - (axis=0): Matches the name of an index level and does NOT match
    a column label.
  - (axis=1): Matches the name of a column level and does NOT match
    an index label.

Parameters
----------
key : Hashable
    Potential level name for the given axis
axis : int, default 0
    Axis that levels are associated with (0 for index, 1 for columns)

Returns
-------
is_level : bool
Nr   )r!  rm   r   r2  _is_label_reference)r   r9  r   axis_ints       r   _is_level_referenceNDFrame._is_level_reference  sg    , ((. tO AC Ayy*000A ,,S,@@		
r   c                   ^ ^^ T R                  U5      mU4S j[        T R                  5       5       nTSL=(       a(    [        T5      =(       a    [	        UU 4S jU 5       5      $ )a  
Test whether a key is a label reference for a given axis.

To be considered a label reference, `key` must be a string that:
  - (axis=0): Matches a column label
  - (axis=1): Matches an index label

Parameters
----------
key : Hashable
    Potential label name, i.e. Index entry.
axis : int, default 0
    Axis perpendicular to the axis that labels are associated with
    (0 means search for column labels, 1 means search for index labels)

Returns
-------
is_label: bool
c              3  6   >#    U  H  oT:w  d  M
  Uv   M     g 7fr   r  r]  r  r
  s     r   r^  .NDFrame._is_label_reference.<locals>.<genexpr>       K#8R(Nbb#8   		Nc              3  H   >#    U  H  nTTR                   U   ;   v   M     g 7fr   r   r]  r  r9  r   s     r   r^  r         >:RC499R=(:   ")r!  r  r  rm   any)r   r9  r   
other_axesr
  s   ``  @r   r	  NDFrame._is_label_reference  sV    * ((.K5#8K
 tO ?C ?>:>>	
r   c                L    U R                  XS9=(       d    U R                  XS9$ )a  
Test whether a key is a label or level reference for a given axis.

To be considered either a label or a level reference, `key` must be a
string that:
  - (axis=0): Matches a column label or an index level
  - (axis=1): Matches an index label or a column level

Parameters
----------
key : Hashable
    Potential label or level name
axis : int, default 0
    Axis that levels are associated with (0 for index, 1 for columns)

Returns
-------
bool
r   )r  r	  )r   r9  r   s      r   _is_label_or_level_reference$NDFrame._is_label_or_level_reference	  s5    * '''7 
4;S;S <T <
 	
r   c                p  ^ ^^	 T R                  U5      m	U	4S j[        T R                  5       5       nTb  [        T5      (       ao  TT R                  T	   R
                  ;   aQ  [        UU 4S jU 5       5      (       a5  T	S:X  a  SOSu  pET	S:X  a  SOSu  pgST SU S	U S
U S	U S3n[        U5      egggg)aT  
Check whether `key` is ambiguous.

By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.

Parameters
----------
key : Hashable
    Label or level name.
axis : int, default 0
    Axis that levels are associated with (0 for index, 1 for columns).

Raises
------
ValueError: `key` is ambiguous
c              3  6   >#    U  H  oT:w  d  M
  Uv   M     g 7fr   r  r  s     r   r^  :NDFrame._check_label_or_level_ambiguity.<locals>.<genexpr>7  r  r  Nc              3  H   >#    U  H  nTTR                   U   ;   v   M     g 7fr   r   r  s     r   r^  r  =  r  r  r   )anr  )r   columnr{  z
' is both  z level and z label, which is ambiguous.)r!  r  r  rm   r   r2  r  r   )
r   r9  r   r  level_article
level_typelabel_article
label_typemsgr
  s
   ``       @r   _check_label_or_level_ambiguity'NDFrame._check_label_or_level_ambiguity"  s    ( ((.K5#8K
 OC  tyy*000>:>>> $,q=o &M
 $,q=o &M
 C5
=/:,k /:,.IK  S/! ? 1 ! r   c                \   U R                  U5      n[        U R                  5       Vs/ s H  o3U:w  d  M
  UPM     nnU R                  XS9(       a,  U R	                  XS9  U R                  XS   S9R                  nOHU R                  XS9(       a)  U R                  U   R                  U5      R                  nO[        U5      eUR                  S:  aQ  U(       a*  [        U R                  US   5      [        5      (       a  SnOSnUS:X  a  SOSn[        SU S	U S
U 35      eU$ s  snf )a  
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.

Retrieval logic:
  - (axis=0): Return column values if `key` matches a column label.
    Otherwise return index level values if `key` matches an index
    level.
  - (axis=1): Return row values if `key` matches an index label.
    Otherwise return column level values if 'key' matches a column
    level

Parameters
----------
key : Hashable
    Label or level name.
axis : int, default 0
    Axis that levels are associated with (0 for index, 1 for columns)

Returns
-------
np.ndarray or ExtensionArray

Raises
------
KeyError
    if `key` matches neither a label nor a level
ValueError
    if `key` matches multiple labels
r   r   r   zX
For a multi-index, the label must be a tuple with elements corresponding to each level. r"  r  zThe z label 'z' is not unique.)r!  r  r  r	  r)  xsr  r  r   r3  r  r,  r   r  r   r   )r   r9  r   r  r  r   multi_messagelabel_axis_names           r   _get_label_or_level_values"NDFrame._get_label_or_level_valuesN  s'   @ $$T*#(#8G#8R$Jb#8
G##C#3000@WWS!}W5==F%%c%5YYt_55c:BBF3- ;;?j
1)F
SSG  !#*.!)hO'xu4D]OT  5 Hs
   	D)D)c                    U R                  U5      n[        R                  " U5      nU Vs/ s H  o0R                  X2S9(       a  M  UPM     nnU(       a  [	        SU SU 35      eU Vs/ s H  o0R                  X2S9(       d  M  UPM     nnU Vs/ s H  o0R                  X2S9(       a  M  UPM     nnU R                  SS9nUS:X  a2  U(       a  UR                  USSS9  U(       a  UR                  US	SS
9  U$ U(       ad  [        UR                  [        5      (       a!  UR                  R                  U5      Ul
        O$[        UR                  R                  5      Ul
        U(       a  UR                  USSS
9  U$ s  snf s  snf s  snf )ai  
Drop labels and/or levels for the given `axis`.

For each key in `keys`:
  - (axis=0): If key matches a column label then drop the column.
    Otherwise if key matches an index level then drop the level.
  - (axis=1): If key matches an index label then drop the row.
    Otherwise if key matches a column level then drop the level.

Parameters
----------
keys : str or list of str
    labels or levels to drop
axis : int, default 0
    Axis that levels are associated with (0 for index, 1 for columns)

Returns
-------
dropped: DataFrame

Raises
------
ValueError
    if any `keys` match neither a label nor a level
r   z;The following keys are not valid labels or levels for axis z: Fr   r   T)dropr   r   r   r   )r!  rs   maybe_make_listr  r   r  r   reset_indexr3  r   r(  r   r  r   rj  )r   keysr   rE  invalid_keyslevels_to_droplabels_to_dropdroppeds           r   _drop_labels_or_levelsNDFrame._drop_labels_or_levels  s{   6 $$T* %%d+
!#D#DQ#D#RAt 	 
 ##'&<.:  &*TT-E-Ea-E-S!TT%)XT1I1I!1I1W!TX
 )))'19##Nt#L ^!TB   gooz::&-oo&?&?&OGO '11E1E&FGO ^!TBW
 UXs#   FF+FFF-FzClassVar[None]__hash__c                ,    [        U R                  5      $ )z
Iterate over info axis.

Returns
-------
iterator
    Info axis as iterator.

Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
>>> for x in df:
...     print(x)
A
B
)iterrR  r   s    r   __iter__NDFrame.__iter__  s    " DOO$$r   c                    U R                   $ )aV  
Get the 'info axis' (see Indexing for more).

This is index for Series, columns for DataFrame.

Returns
-------
Index
    Info axis.

Examples
--------
>>> d = pd.DataFrame(data={'A': [1, 2, 3], 'B': [0, 4, 8]},
...                  index=['a', 'b', 'c'])
>>> d
   A  B
a  1  0
b  2  4
c  3  8
>>> d.keys()
Index(['A', 'B'], dtype='object')
rR  r   s    r   r7  NDFrame.keys  s    . r   c              #  B   #    U R                    H  nXU   4v   M     g7f)z{
Iterate over (label, values) on info axis

This is index for Series and columns for DataFrame.

Returns
-------
Generator
NrD  )r   hs     r   r   NDFrame.items  s!      A!W* !s   c                ,    [        U R                  5      $ )zReturns length of info axis)r   rR  r   s    r   __len__NDFrame.__len__  s    4??##r   c                    XR                   ;   $ )z#True if the key is in the info axisrD  )r   r9  s     r   __contains__NDFrame.__contains__  s     oo%%r   c                B   ^  [        U 4S jT R                   5       5      $ )a[  
Indicator whether Series/DataFrame is empty.

True if Series/DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.

Returns
-------
bool
    If Series/DataFrame is empty, return True, if not return False.

See Also
--------
Series.dropna : Return series without null values.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
    where (all or any) data are missing.

Notes
-----
If Series/DataFrame contains only NaNs, it is still not considered empty. See
the example below.

Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:

>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True

If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:

>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
    A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True

>>> ser_empty = pd.Series({'A' : []})
>>> ser_empty
A    []
dtype: object
>>> ser_empty.empty
False
>>> ser_empty = pd.Series()
>>> ser_empty.empty
True
c              3  ^   >#    U  H"  n[        TR                  U5      5      S :H  v   M$     g7f)r   Nr[  r\  s     r   r^   NDFrame.empty.<locals>.<genexpr>\  s&     J8I13t~~a()Q.8Is   *-)r  r  r   s   `r   emptyNDFrame.empty"  s    t J8I8IJJJr   i  __array_priority__c                   U R                   n[        R                  " X1S9n[        UR                  UR                  5      (       a  [        5       (       a  U R                  R                  (       ax  [        U R                  R                  S   UR                  5      (       aF  [        UR                  UR                  5      (       a!  UR                  5       nSUR                  l        U$ )Nr   r   F)r  rh  asarrayr[   r   r   r   r  rN  r  viewr   	writeable)r   r   r   r   arrs        r   	__array__NDFrame.__array__e  s     jj-6<<33#%%		)) dkk..q16<<@@^ciiF F hhj&+		#
r   c                8    [         R                  " XU/UQ70 UD6$ r   )rr   array_ufunc)r   ufuncr  inputsr  s        r   __array_ufunc__NDFrame.__array_ufunc__w  s!     $$T&L6LVLLr   c           
     *   U R                    Vs0 s H  o[        XS 5      _M     nnU R                  U R                  U R                   U R                  U R
                  R                   Vs0 s H  oU R
                  U   _M     snS.UE$ s  snf s  snf )N)r   r   r   r   r   )r   r0  r   r   r   r   _keys)r   rE  metas      r   __getstate__NDFrame.__getstate__  s    37>>B>a74D))>BIIIIZZ151A1AB1AA$**Q-'1AB
 
 	
 C Cs   B+Bc           	        [        U[        5      (       a  Xl        GOf[        U[        5      (       Ga6  SU;   a  SU;  a  UR	                  S5      US'   UR                  S5      nUb  UR                  S0 5      nUc  0 n[        R                  U SU5        UR                  SSS05      n[        R                  U S[        U 40 UD65        [        U R                  U R                  -   5      n[        U5       H,  nXa;   d  M
  US:w  d  M  X   n[        R                  XU5        M.     UR                  5        H"  u  pgXe;  d  M  [        R                  XU5        M$     O%[        S5      e[        U5      S	:X  a  [        S5      e0 U l        g )
Nr  r   r   r   r   r   Tz(Pre-0.12 pickles are no longer supportedr+  )r   r   r   r   r  r  r   r   r|   setr   r   listr   r   r   r   )r   stater   r   r   rd  rE  rF  s           r   __setstate__NDFrame.__setstate__  sT   e\**It$$%F%$7 %		' 2f))F#C		(B/=E""459		(-F,MN""453G3GH 4//$..@AdAza8m!H**4A6 $
 "KKMDA}**4A6 *
 **TUUZ1_%&PQQ35r   c                ~    SSR                  [        [        U 5      5       S3n[        U 5      R                   SU S3$ )N[,]())joinmapr   r
  r   )r   preprs     r   __repr__NDFrame.__repr__  sA     CHHSt456a8t*%%&awa00r   c                X    [         R                  " S5      S:X  a  U R                  5       $ g)z}
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
zstyler.render.reprlatexN)r   
get_optionto_latexr   s    r   _repr_latex_NDFrame._repr_latex_  s'     12g===?"r   c                    [         R                  " S5      (       a\  U R                  [         R                  " S5      5      nUR                  SS9n[	        [
        U5      n[        U[        R                  S9$ g)zP
Not a real Jupyter special repr method, but we use the same
naming convention.
zdisplay.html.table_schemazdisplay.max_rowstable)orient)object_pairs_hookN)	r   rz  headto_jsonr   r   r   collectionsOrderedDict)r   r   as_jsons      r   _repr_data_resource_NDFrame._repr_data_resource_  sd     89999V../ABCDll'l2G3(GK4K4KLL :r   z3.0r   excel_writerto_excel)versionallowed_argsr8  storage_optionsz1.2.0)r   r  storage_options_versionaddedr,  c                    Uc  0 n[        U [        5      (       a  U OU R                  5       nSSKJn  U" UUUUUUUUUS9	nUR                  UUU	U
UUUUS9  g)a  
Write {klass} to an Excel sheet.

To write a single {klass} to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.

Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.

Parameters
----------
excel_writer : path-like, file-like, or ExcelWriter object
    File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
    Name of sheet which will contain DataFrame.
na_rep : str, default ''
    Missing data representation.
float_format : str, optional
    Format string for floating point numbers. For example
    ``float_format="%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
    Columns to write.
header : bool or list of str, default True
    Write out the column names. If a list of string is given it is
    assumed to be aliases for the column names.
index : bool, default True
    Write row names (index).
index_label : str or sequence, optional
    Column label for index column(s) if desired. If not specified, and
    `header` and `index` are True, then the index names are used. A
    sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
    Upper left cell row to dump data frame.
startcol : int, default 0
    Upper left cell column to dump data frame.
engine : str, optional
    Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
    via the options ``io.excel.xlsx.writer`` or
    ``io.excel.xlsm.writer``.

merge_cells : bool, default True
    Write MultiIndex and Hierarchical Rows as merged cells.
inf_rep : str, default 'inf'
    Representation for infinity (there is no native representation for
    infinity in Excel).
freeze_panes : tuple of int (length 2), optional
    Specifies the one-based bottommost row and rightmost column that
    is to be frozen.
{storage_options}

    .. versionadded:: {storage_options_versionadded}
engine_kwargs : dict, optional
    Arbitrary keyword arguments passed to excel engine.

See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
io.formats.style.Styler.to_excel : Add styles to Excel sheet.

Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.

Once a workbook has been saved it is not possible to write further
data without rewriting the whole workbook.

Examples
--------

Create, write to and save a workbook:

>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
...                    index=['row 1', 'row 2'],
...                    columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx")  # doctest: +SKIP

To specify the sheet name:

>>> df1.to_excel("output.xlsx",
...              sheet_name='Sheet_name_1')  # doctest: +SKIP

If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:

>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer:  # doctest: +SKIP
...     df1.to_excel(writer, sheet_name='Sheet_name_1')
...     df2.to_excel(writer, sheet_name='Sheet_name_2')

ExcelWriter can also be used to append to an existing Excel file:

>>> with pd.ExcelWriter('output.xlsx',
...                     mode='a') as writer:  # doctest: +SKIP
...     df1.to_excel(writer, sheet_name='Sheet_name_3')

To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):

>>> df1.to_excel('output1.xlsx', engine='xlsxwriter')  # doctest: +SKIP
Nr   )ExcelFormatter)na_repcolsheaderfloat_formatr  index_labelmerge_cellsinf_rep)
sheet_namestartrowstartcolfreeze_panesenginer  engine_kwargs)r   rk   to_framepandas.io.formats.excelr  write)r   r  r  r  r  r(  r  r  r  r  r  r  r  r  r  r  r  r   r  	formatters                       r   r  NDFrame.to_excel  s|    R  Ml33T:"%##

	 	!%+' 	 		
r   path_or_bufr  compression_options)r  r  inferc                    SSK Jn  Uc	  US:X  a  SnOUc  Sn[        R                  " U5        U=(       d    SnUR	                  UU UUUUUUUU	U
UUUS9$ )a  
Convert the object to a JSON string.

Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.

Parameters
----------
path_or_buf : str, path object, file-like object, or None, default None
    String, path object (implementing os.PathLike[str]), or file-like
    object implementing a write() function. If None, the result is
    returned as a string.
orient : str
    Indication of expected JSON string format.

    * Series:

        - default is 'index'
        - allowed values are: {{'split', 'records', 'index', 'table'}}.

    * DataFrame:

        - default is 'columns'
        - allowed values are: {{'split', 'records', 'index', 'columns',
          'values', 'table'}}.

    * The format of the JSON string:

        - 'split' : dict like {{'index' -> [index], 'columns' -> [columns],
          'data' -> [values]}}
        - 'records' : list like [{{column -> value}}, ... , {{column -> value}}]
        - 'index' : dict like {{index -> {{column -> value}}}}
        - 'columns' : dict like {{column -> {{index -> value}}}}
        - 'values' : just the values array
        - 'table' : dict like {{'schema': {{schema}}, 'data': {{data}}}}

        Describing the data, where data component is like ``orient='records'``.

date_format : {{None, 'epoch', 'iso'}}
    Type of date conversion. 'epoch' = epoch milliseconds,
    'iso' = ISO8601. The default depends on the `orient`. For
    ``orient='table'``, the default is 'iso'. For all other orients,
    the default is 'epoch'.
double_precision : int, default 10
    The number of decimal places to use when encoding
    floating point values. The possible maximal value is 15.
    Passing double_precision greater than 15 will raise a ValueError.
force_ascii : bool, default True
    Force encoded string to be ASCII.
date_unit : str, default 'ms' (milliseconds)
    The time unit to encode to, governs timestamp and ISO8601
    precision.  One of 's', 'ms', 'us', 'ns' for second, millisecond,
    microsecond, and nanosecond respectively.
default_handler : callable, default None
    Handler to call if object cannot otherwise be converted to a
    suitable format for JSON. Should receive a single argument which is
    the object to convert and return a serialisable object.
lines : bool, default False
    If 'orient' is 'records' write out line-delimited json format. Will
    throw ValueError if incorrect 'orient' since others are not
    list-like.
{compression_options}

    .. versionchanged:: 1.4.0 Zstandard support.

index : bool or None, default None
    The index is only used when 'orient' is 'split', 'index', 'column',
    or 'table'. Of these, 'index' and 'column' do not support
    `index=False`.

indent : int, optional
   Length of whitespace used to indent each record.

{storage_options}

mode : str, default 'w' (writing)
    Specify the IO mode for output when supplying a path_or_buf.
    Accepted args are 'w' (writing) and 'a' (append) only.
    mode='a' is only supported when lines is True and orient is 'records'.

Returns
-------
None or str
    If path_or_buf is None, returns the resulting json format as a
    string. Otherwise returns None.

See Also
--------
read_json : Convert a JSON string to pandas object.

Notes
-----
The behavior of ``indent=0`` varies from the stdlib, which does not
indent the output but does insert newlines. Currently, ``indent=0``
and the default ``indent=None`` are equivalent in pandas, though this
may change in a future release.

``orient='table'`` contains a 'pandas_version' field under 'schema'.
This stores the version of `pandas` used in the latest revision of the
schema.

Examples
--------
>>> from json import loads, dumps
>>> df = pd.DataFrame(
...     [["a", "b"], ["c", "d"]],
...     index=["row 1", "row 2"],
...     columns=["col 1", "col 2"],
... )

>>> result = df.to_json(orient="split")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4)  # doctest: +SKIP
{{
    "columns": [
        "col 1",
        "col 2"
    ],
    "index": [
        "row 1",
        "row 2"
    ],
    "data": [
        [
            "a",
            "b"
        ],
        [
            "c",
            "d"
        ]
    ]
}}

Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.

>>> result = df.to_json(orient="records")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4)  # doctest: +SKIP
[
    {{
        "col 1": "a",
        "col 2": "b"
    }},
    {{
        "col 1": "c",
        "col 2": "d"
    }}
]

Encoding/decoding a Dataframe using ``'index'`` formatted JSON:

>>> result = df.to_json(orient="index")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4)  # doctest: +SKIP
{{
    "row 1": {{
        "col 1": "a",
        "col 2": "b"
    }},
    "row 2": {{
        "col 1": "c",
        "col 2": "d"
    }}
}}

Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:

>>> result = df.to_json(orient="columns")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4)  # doctest: +SKIP
{{
    "col 1": {{
        "row 1": "a",
        "row 2": "c"
    }},
    "col 2": {{
        "row 1": "b",
        "row 2": "d"
    }}
}}

Encoding/decoding a Dataframe using ``'values'`` formatted JSON:

>>> result = df.to_json(orient="values")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4)  # doctest: +SKIP
[
    [
        "a",
        "b"
    ],
    [
        "c",
        "d"
    ]
]

Encoding with Table Schema:

>>> result = df.to_json(orient="table")
>>> parsed = loads(result)
>>> dumps(parsed, indent=4)  # doctest: +SKIP
{{
    "schema": {{
        "fields": [
            {{
                "name": "index",
                "type": "string"
            }},
            {{
                "name": "col 1",
                "type": "string"
            }},
            {{
                "name": "col 2",
                "type": "string"
            }}
        ],
        "primaryKey": [
            "index"
        ],
        "pandas_version": "1.4.0"
    }},
    "data": [
        {{
            "index": "row 1",
            "col 1": "a",
            "col 2": "b"
        }},
        {{
            "index": "row 2",
            "col 1": "c",
            "col 2": "d"
        }}
    ]
}}
r   )jsonr  isoepoch)r  r   r  date_formatdouble_precisionforce_ascii	date_unitdefault_handlerlinescompressionr  indentr  mode)	pandas.ior  r   is_nonnegative_intr  )r   r  r  r  r  r  r  r  r  r  r  r  r  r  r  s                  r   r  NDFrame.to_json|	  s|    P 	#6W#4K !K!!&)1||##-#+#+  
 	
r   to_hdfc                J    SSK Jn  UR                  UUU UUUUUUU	U
UUUUS9  g)ap  
Write the contained data to an HDF5 file using HDFStore.

Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.

In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.

.. warning::

   One can store a subclass of ``DataFrame`` or ``Series`` to HDF5,
   but the type of the subclass is lost upon storing.

For more information see the :ref:`user guide <io.hdf5>`.

Parameters
----------
path_or_buf : str or pandas.HDFStore
    File path or HDFStore object.
key : str
    Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
    Mode to open file:

    - 'w': write, a new file is created (an existing file with
      the same name would be deleted).
    - 'a': append, an existing file is opened for reading and
      writing, and if the file does not exist it is created.
    - 'r+': similar to 'a', but the file must already exist.
complevel : {0-9}, default None
    Specifies a compression level for data.
    A value of 0 or None disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
    Specifies the compression library to be used.
    These additional compressors for Blosc are supported
    (default if no compressor specified: 'blosc:blosclz'):
    {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
    'blosc:zlib', 'blosc:zstd'}.
    Specifying a compression library which is not available issues
    a ValueError.
append : bool, default False
    For Table formats, append the input data to the existing.
format : {'fixed', 'table', None}, default 'fixed'
    Possible values:

    - 'fixed': Fixed format. Fast writing/reading. Not-appendable,
      nor searchable.
    - 'table': Table format. Write as a PyTables Table structure
      which may perform worse but allow more flexible operations
      like searching / selecting subsets of the data.
    - If None, pd.get_option('io.hdf.default_format') is checked,
      followed by fallback to "fixed".
index : bool, default True
    Write DataFrame index as a column.
min_itemsize : dict or int, optional
    Map column names to minimum string sizes for columns.
nan_rep : Any, optional
    How to represent null values as str.
    Not allowed with append=True.
dropna : bool, default False, optional
    Remove missing values.
data_columns : list of columns or True, optional
    List of columns to create as indexed data columns for on-disk
    queries, or True to use all columns. By default only the axes
    of the object are indexed. See
    :ref:`Query via data columns<io.hdf5-query-data-columns>`. for
    more information.
    Applicable only to format='table'.
errors : str, default 'strict'
    Specifies how encoding and decoding errors are to be handled.
    See the errors argument for :func:`open` for a full list
    of options.
encoding : str, default "UTF-8"

See Also
--------
read_hdf : Read from HDF file.
DataFrame.to_orc : Write a DataFrame to the binary orc format.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a SQL table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.

Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
...                   index=['a', 'b', 'c'])  # doctest: +SKIP
>>> df.to_hdf('data.h5', key='df', mode='w')  # doctest: +SKIP

We can add another object to the same file:

>>> s = pd.Series([1, 2, 3, 4])  # doctest: +SKIP
>>> s.to_hdf('data.h5', key='s')  # doctest: +SKIP

Reading from HDF file:

>>> pd.read_hdf('data.h5', 'df')  # doctest: +SKIP
A  B
a  1  4
b  2  5
c  3  6
>>> pd.read_hdf('data.h5', 's')  # doctest: +SKIP
0    1
1    2
2    3
3    4
dtype: int64
r   )pytables)r  	complevelcomplibappendformatr  min_itemsizenan_repdropnadata_columnsr  encodingN)r  r  r  )r   r  r9  r  r  r  r  r  r  r  r  r  r  r  r  r  s                   r   r  NDFrame.to_hdf
  sH    H 	' 	%% 	 	
r   )r   r8  conto_sqlc
                >    SSK Jn
  U
R                  U UUUUUUUUU	S9
$ )a  
Write records stored in a DataFrame to a SQL database.

Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.

Parameters
----------
name : str
    Name of SQL table.
con : sqlalchemy.engine.(Engine or Connection) or sqlite3.Connection
    Using SQLAlchemy makes it possible to use any DB supported by that
    library. Legacy support is provided for sqlite3.Connection objects. The user
    is responsible for engine disposal and connection closure for the SQLAlchemy
    connectable. See `here                 <https://docs.sqlalchemy.org/en/20/core/connections.html>`_.
    If passing a sqlalchemy.engine.Connection which is already in a transaction,
    the transaction will not be committed.  If passing a sqlite3.Connection,
    it will not be possible to roll back the record insertion.

schema : str, optional
    Specify the schema (if database flavor supports this). If None, use
    default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
    How to behave if the table already exists.

    * fail: Raise a ValueError.
    * replace: Drop the table before inserting new values.
    * append: Insert new values to the existing table.

index : bool, default True
    Write DataFrame index as a column. Uses `index_label` as the column
    name in the table. Creates a table index for this column.
index_label : str or sequence, default None
    Column label for index column(s). If None is given (default) and
    `index` is True, then the index names are used.
    A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
    Specify the number of rows in each batch to be written at a time.
    By default, all rows will be written at once.
dtype : dict or scalar, optional
    Specifying the datatype for columns. If a dictionary is used, the
    keys should be the column names and the values should be the
    SQLAlchemy types or strings for the sqlite3 legacy mode. If a
    scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
    Controls the SQL insertion clause used:

    * None : Uses standard SQL ``INSERT`` clause (one per row).
    * 'multi': Pass multiple values in a single ``INSERT`` clause.
    * callable with signature ``(pd_table, conn, keys, data_iter)``.

    Details and a sample callable implementation can be found in the
    section :ref:`insert method <io.sql.method>`.

Returns
-------
None or int
    Number of rows affected by to_sql. None is returned if the callable
    passed into ``method`` does not return an integer number of rows.

    The number of returned rows affected is the sum of the ``rowcount``
    attribute of ``sqlite3.Cursor`` or SQLAlchemy connectable which may not
    reflect the exact number of written rows as stipulated in the
    `sqlite3 <https://docs.python.org/3/library/sqlite3.html#sqlite3.Cursor.rowcount>`__ or
    `SQLAlchemy <https://docs.sqlalchemy.org/en/20/core/connections.html#sqlalchemy.engine.CursorResult.rowcount>`__.

    .. versionadded:: 1.4.0

Raises
------
ValueError
    When the table already exists and `if_exists` is 'fail' (the
    default).

See Also
--------
read_sql : Read a DataFrame from a table.

Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.

Not all datastores support ``method="multi"``. Oracle, for example,
does not support multi-value insert.

References
----------
.. [1] https://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/

Examples
--------
Create an in-memory SQLite database.

>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)

Create a table from scratch with 3 rows.

>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
     name
0  User 1
1  User 2
2  User 3

>>> df.to_sql(name='users', con=engine)
3
>>> from sqlalchemy import text
>>> with engine.connect() as conn:
...    conn.execute(text("SELECT * FROM users")).fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]

An `sqlalchemy.engine.Connection` can also be passed to `con`:

>>> with engine.begin() as connection:
...     df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
...     df1.to_sql(name='users', con=connection, if_exists='append')
2

This is allowed to support operations that require that the same
DBAPI connection is used for the entire operation.

>>> df2 = pd.DataFrame({'name' : ['User 6', 'User 7']})
>>> df2.to_sql(name='users', con=engine, if_exists='append')
2
>>> with engine.connect() as conn:
...    conn.execute(text("SELECT * FROM users")).fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
 (0, 'User 4'), (1, 'User 5'), (0, 'User 6'),
 (1, 'User 7')]

Overwrite the table with just ``df2``.

>>> df2.to_sql(name='users', con=engine, if_exists='replace',
...            index_label='id')
2
>>> with engine.connect() as conn:
...    conn.execute(text("SELECT * FROM users")).fetchall()
[(0, 'User 6'), (1, 'User 7')]

Use ``method`` to define a callable insertion method to do nothing
if there's a primary key conflict on a table in a PostgreSQL database.

>>> from sqlalchemy.dialects.postgresql import insert
>>> def insert_on_conflict_nothing(table, conn, keys, data_iter):
...     # "a" is the primary key in "conflict_table"
...     data = [dict(zip(keys, row)) for row in data_iter]
...     stmt = insert(table.table).values(data).on_conflict_do_nothing(index_elements=["a"])
...     result = conn.execute(stmt)
...     return result.rowcount
>>> df_conflict.to_sql(name="conflict_table", con=conn, if_exists="append", method=insert_on_conflict_nothing)  # doctest: +SKIP
0

For MySQL, a callable to update columns ``b`` and ``c`` if there's a conflict
on a primary key.

>>> from sqlalchemy.dialects.mysql import insert
>>> def insert_on_conflict_update(table, conn, keys, data_iter):
...     # update columns "b" and "c" on primary key conflict
...     data = [dict(zip(keys, row)) for row in data_iter]
...     stmt = (
...         insert(table.table)
...         .values(data)
...     )
...     stmt = stmt.on_duplicate_key_update(b=stmt.inserted.b, c=stmt.inserted.c)
...     result = conn.execute(stmt)
...     return result.rowcount
>>> df_conflict.to_sql(name="conflict_table", con=conn, if_exists="append", method=insert_on_conflict_update)  # doctest: +SKIP
2

Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.

>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
     A
0  1.0
1  NaN
2  2.0

>>> from sqlalchemy.types import Integer
>>> df.to_sql(name='integers', con=engine, index=False,
...           dtype={"A": Integer()})
3

>>> with engine.connect() as conn:
...   conn.execute(text("SELECT * FROM integers")).fetchall()
[(1,), (None,), (2,)]
r   )sql)schema	if_existsr  r  	chunksizer   r  )r  r  r  )r   r8  r  r  r  r  r  r  r   r  r  s              r   r  NDFrame.to_sql9  s<    h 	"zz#  
 	
r   path	to_picklec                $    SSK Jn  U" U UUUUS9  g)as  
Pickle (serialize) object to file.

Parameters
----------
path : str, path object, or file-like object
    String, path object (implementing ``os.PathLike[str]``), or file-like
    object implementing a binary ``write()`` function. File path where
    the pickled object will be stored.
{compression_options}
protocol : int
    Int which indicates which protocol should be used by the pickler,
    default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
    values are 0, 1, 2, 3, 4, 5. A negative value for the protocol
    parameter is equivalent to setting its value to HIGHEST_PROTOCOL.

    .. [1] https://docs.python.org/3/library/pickle.html.

{storage_options}

See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.

Examples
--------
>>> original_df = pd.DataFrame({{"foo": range(5), "bar": range(5, 10)}})  # doctest: +SKIP
>>> original_df  # doctest: +SKIP
   foo  bar
0    0    5
1    1    6
2    2    7
3    3    8
4    4    9
>>> original_df.to_pickle("./dummy.pkl")  # doctest: +SKIP

>>> unpickled_df = pd.read_pickle("./dummy.pkl")  # doctest: +SKIP
>>> unpickled_df  # doctest: +SKIP
   foo  bar
0    0    5
1    1    6
2    2    7
3    3    8
4    4    9
r   )r  )r  protocolr  N)pandas.io.pickler  )r   r  r  r  r  r  s         r   r  NDFrame.to_pickle  s    ~ 	/#+	
r   to_clipboardc                :    SSK Jn  UR                  " U 4XS.UD6  g)a\  
Copy object to the system clipboard.

Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.

Parameters
----------
excel : bool, default True
    Produce output in a csv format for easy pasting into excel.

    - True, use the provided separator for csv pasting.
    - False, write a string representation of the object to the clipboard.

sep : str, default ``'\t'``
    Field delimiter.
**kwargs
    These parameters will be passed to DataFrame.to_csv.

See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
    (csv) file.
read_clipboard : Read text from clipboard and pass to read_csv.

Notes
-----
Requirements for your platform.

  - Linux : `xclip`, or `xsel` (with `PyQt4` modules)
  - Windows : none
  - macOS : none

This method uses the processes developed for the package `pyperclip`. A
solution to render any output string format is given in the examples.

Examples
--------
Copy the contents of a DataFrame to the clipboard.

>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])

>>> df.to_clipboard(sep=',')  # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6

We can omit the index by passing the keyword `index` and setting
it to false.

>>> df.to_clipboard(sep=',', index=False)  # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6

Using the original `pyperclip` package for any string output format.

.. code-block:: python

   import pyperclip
   html = df.style.to_html()
   pyperclip.copy(html)
r   )
clipboards)excelsepN)r  r  r  )r   r  r  r  r  s        r   r  NDFrame.to_clipboarde  s     P 	)EEEfEr   c                    [        S5      nU R                  S:X  a  UR                  R                  U 5      $ UR                  R                  U 5      $ )al	  
Return an xarray object from the pandas object.

Returns
-------
xarray.DataArray or xarray.Dataset
    Data in the pandas structure converted to Dataset if the object is
    a DataFrame, or a DataArray if the object is a Series.

See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.

Notes
-----
See the `xarray docs <https://xarray.pydata.org/en/stable/>`__

Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
...                    ('parrot', 'bird', 24.0, 2),
...                    ('lion', 'mammal', 80.5, 4),
...                    ('monkey', 'mammal', np.nan, 4)],
...                   columns=['name', 'class', 'max_speed',
...                            'num_legs'])
>>> df
     name   class  max_speed  num_legs
0  falcon    bird      389.0         2
1  parrot    bird       24.0         2
2    lion  mammal       80.5         4
3  monkey  mammal        NaN         4

>>> df.to_xarray()  # doctest: +SKIP
<xarray.Dataset>
Dimensions:    (index: 4)
Coordinates:
  * index      (index) int64 32B 0 1 2 3
Data variables:
    name       (index) object 32B 'falcon' 'parrot' 'lion' 'monkey'
    class      (index) object 32B 'bird' 'bird' 'mammal' 'mammal'
    max_speed  (index) float64 32B 389.0 24.0 80.5 nan
    num_legs   (index) int64 32B 2 2 4 4

>>> df['max_speed'].to_xarray()  # doctest: +SKIP
<xarray.DataArray 'max_speed' (index: 4)>
array([389. ,  24. ,  80.5,   nan])
Coordinates:
  * index    (index) int64 0 1 2 3

>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
...                         '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
...                               'animal': ['falcon', 'parrot',
...                                          'falcon', 'parrot'],
...                               'speed': [350, 18, 361, 15]})
>>> df_multiindex = df_multiindex.set_index(['date', 'animal'])

>>> df_multiindex
                   speed
date       animal
2018-01-01 falcon    350
           parrot     18
2018-01-02 falcon    361
           parrot     15

>>> df_multiindex.to_xarray()  # doctest: +SKIP
<xarray.Dataset>
Dimensions:  (date: 2, animal: 2)
Coordinates:
  * date     (date) datetime64[ns] 2018-01-01 2018-01-02
  * animal   (animal) object 'falcon' 'parrot'
Data variables:
    speed    (date, animal) int64 350 18 361 15
xarrayr   )rI   r,  	DataArrayfrom_seriesDatasetfrom_dataframe)r   r  s     r   	to_xarrayNDFrame.to_xarray  sF    Z ,H599>##//55>>0066r   bufc                    g r   r  r   r  r(  r  r  r  
formattersr  sparsifyindex_names	bold_rowscolumn_format	longtableescaper  decimalmulticolumnmulticolumn_formatmultirowcaptionr  positions                         r   r{  NDFrame.to_latex      2 	r   c                    g r   r  r  s                         r   r{  r     r  r   r{  c                z  ^^& U R                   S:X  a  U R                  5       n Uc  [        R                  " S5      S:H  nUc  [        R                  " S5      S:H  nUc  [        R                  " S5      nUc  [        R                  " S5      nUc  [        R                  " S	5      nUb   [	        U[
        5      (       d  [        S
5      eUc  [        U R                  5      O
[        U5      n[	        U[        [        45      (       a*  [        U5      U:w  a  [        SU S[        U5       S35      eUU(       a  SOSUS.nSS0UEnSS0UEn[	        T[
        5      (       a  U4S jm&OTm&U&4S jnSn[	        U[        5      (       a7  [        U R                  5       VVs0 s H  u  nnU[        UUU   S9_M     nnnO[	        U[        5      (       a  UR                  SS5      nUR                  SS5      nUb  UR                  SU05        Ub  UR                  SU05        UnU R!                  SS9R                  n U  H,  n!U!UR#                  5       ;  d  M  UR                  U!T&05        M.     OUc  Tb  [        US S9nUU/n"/ n#/ n$U(       a7  U#R%                  U R                   Vs/ s H  nUU;  d  M  UPM     snSS.5        USL a  U#R%                  SS05        O2[	        U[        [        45      (       a  U$R%                  USS.5        U/n"USL a  U#R%                  SS05        U	SL a  U#R%                  SSS .5        SUUU(       a  SOSU(       a  UOS!U 3U(       a  S"OS#UUUUUU(       a!  [	        U R&                  [(        5      (       a  S$OSU
S%.n%U R+                  UU#U$SU0UEU"U%S&9$ s  snnf s  snf )'a  
Render object to a LaTeX tabular, longtable, or nested table.

Requires ``\usepackage{{booktabs}}``.  The output can be copy/pasted
into a main LaTeX document or read from an external file
with ``\input{{table.tex}}``.

.. versionchanged:: 2.0.0
   Refactored to use the Styler implementation via jinja2 templating.

Parameters
----------
buf : str, Path or StringIO-like, optional, default None
    Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
    The subset of columns to write. Writes all columns by default.
header : bool or list of str, default True
    Write out the column names. If a list of strings is given,
    it is assumed to be aliases for the column names.
index : bool, default True
    Write row names (index).
na_rep : str, default 'NaN'
    Missing data representation.
formatters : list of functions or dict of {{str: function}}, optional
    Formatter functions to apply to columns' elements by position or
    name. The result of each function must be a unicode string.
    List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
    Formatter for floating point numbers. For example
    ``float_format="%.2f"`` and ``float_format="{{:0.2f}}".format`` will
    both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
    Set to False for a DataFrame with a hierarchical index to print
    every multiindex key at each row. By default, the value will be
    read from the config module.
index_names : bool, default True
    Prints the names of the indexes.
bold_rows : bool, default False
    Make the row labels bold in the output.
column_format : str, optional
    The columns format as specified in `LaTeX table format
    <https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
    columns. By default, 'l' will be used for all columns except
    columns of numbers, which default to 'r'.
longtable : bool, optional
    Use a longtable environment instead of tabular. Requires
    adding a \usepackage{{longtable}} to your LaTeX preamble.
    By default, the value will be read from the pandas config
    module, and set to `True` if the option ``styler.latex.environment`` is
    `"longtable"`.

    .. versionchanged:: 2.0.0
       The pandas option affecting this argument has changed.
escape : bool, optional
    By default, the value will be read from the pandas config
    module and set to `True` if the option ``styler.format.escape`` is
    `"latex"`. When set to False prevents from escaping latex special
    characters in column names.

    .. versionchanged:: 2.0.0
       The pandas option affecting this argument has changed, as has the
       default value to `False`.
encoding : str, optional
    A string representing the encoding to use in the output file,
    defaults to 'utf-8'.
decimal : str, default '.'
    Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
    Use \multicolumn to enhance MultiIndex columns.
    The default will be read from the config module, and is set
    as the option ``styler.sparse.columns``.

    .. versionchanged:: 2.0.0
       The pandas option affecting this argument has changed.
multicolumn_format : str, default 'r'
    The alignment for multicolumns, similar to `column_format`
    The default will be read from the config module, and is set as the option
    ``styler.latex.multicol_align``.

    .. versionchanged:: 2.0.0
       The pandas option affecting this argument has changed, as has the
       default value to "r".
multirow : bool, default True
    Use \multirow to enhance MultiIndex rows. Requires adding a
    \usepackage{{multirow}} to your LaTeX preamble. Will print
    centered labels (instead of top-aligned) across the contained
    rows, separating groups via clines. The default will be read
    from the pandas config module, and is set as the option
    ``styler.sparse.index``.

    .. versionchanged:: 2.0.0
       The pandas option affecting this argument has changed, as has the
       default value to `True`.
caption : str or tuple, optional
    Tuple (full_caption, short_caption),
    which results in ``\caption[short_caption]{{full_caption}}``;
    if a single string is passed, no short caption will be set.
label : str, optional
    The LaTeX label to be placed inside ``\label{{}}`` in the output.
    This is used with ``\ref{{}}`` in the main ``.tex`` file.

position : str, optional
    The LaTeX positional argument for tables, to be placed after
    ``\begin{{}}`` in the output.

Returns
-------
str or None
    If buf is None, returns the result as a string. Otherwise returns None.

See Also
--------
io.formats.style.Styler.to_latex : Render a DataFrame to LaTeX
    with conditional formatting.
DataFrame.to_string : Render a DataFrame to a console-friendly
    tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.

Notes
-----
As of v2.0.0 this method has changed to use the Styler implementation as
part of :meth:`.Styler.to_latex` via ``jinja2`` templating. This means
that ``jinja2`` is a requirement, and needs to be installed, for this method
to function. It is advised that users switch to using Styler, since that
implementation is more frequently updated and contains much more
flexibility with the output.

Examples
--------
Convert a general DataFrame to LaTeX with formatting:

>>> df = pd.DataFrame(dict(name=['Raphael', 'Donatello'],
...                        age=[26, 45],
...                        height=[181.23, 177.65]))
>>> print(df.to_latex(index=False,
...                   formatters={"name": str.upper},
...                   float_format="{:.1f}".format,
... ))  # doctest: +SKIP
\begin{tabular}{lrr}
\toprule
name & age & height \\
\midrule
RAPHAEL & 26 & 181.2 \\
DONATELLO & 45 & 177.7 \\
\bottomrule
\end{tabular}
r   Nzstyler.latex.environmentr  zstyler.format.escapery  zstyler.sparse.columnszstyler.latex.multicol_alignzstyler.sparse.indexz&`column_format` must be str or unicodezWriting z cols but got z aliases)r  r  r  r   r   c                   > TU -  $ r   r  )xr  s    r   <lambda>"NDFrame.to_latex.<locals>.<lambda>  s
    |a7Gr   c                `   > [        U [        [        45      (       a  Tb  T" U 5      $ U" U 5      $ r   )r   floatcomplex)r  alt_format_float_format_s     r   _wrapNDFrame.to_latex.<locals>._wrap  s/    !eW-..=3L$Q''"1~%r   )r  	__index____columns__r  r  )includec                    U $ r   r  rF  s    r   r  r  &  s    qr   r(  )subsetr   F)ro  r   r  T)r2  r   znaive-tnaivezskip-last;data)hrulessparse_indexsparse_columnsenvironmentmulticol_alignmultirow_alignr  r  r  r  r  clinesr  hiderelabel_indexr  format_indexrender_kwargs)r,  r  r   rz  r   r   r   r   r(  ri  r`  r1  r   r   r  r  select_dtypesr7  r  r  r   _to_latex_via_styler)'r   r  r(  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  lengthbase_format_index_format_column_format_r  formatters_r7  cindex_formattercolumn_formatterfloat_columnscolformat_index_hide_relabel_index_render_kwargs_r  s'          `                              @r   r{  r  ;  s   ` 99>==?D))*DETI>&&'=>'IF ++,CDK%!'!2!23P!Q(()>?H$Zs-K-KEFF&-oT\\"3w<ftUm,,V1Fxx~c&k](STT !'gT

 *0(Cl(C*0!)D|)DlC((-GM(M	& >Bj$'' &dll33DAq 75jm<<3  K 
D))(nn[$?O)~~mTB*$$k?%CD+%%{4D&EF$K ..w.?GGM$joo//&&]';< % L$<!%[AK&7 %'LL*.,,K,Q!7:Jq,K% U?LL&),-u..!!VY"GH*OME>LL&'*+%LL49: $&*3; 1,-.%-c7  *Z

J?? '"#
( (((==&( ) 
 	
}6 Ls   N2*
N88N8r  c                  SSK Jn  [        SU 5      n U" U SS9nS Hi  n	[        5       U	   n
[	        U
[
        5      (       a  [        X5      " S
0 U
D6  M8  [	        U
[        5      (       d  MO  U
 H  n[        X5      " S
0 UD6  M     Mk     Uc  0 OUnUR                  S5      (       a  UR                  S 5        UR                  " S
S	U0UD6$ )a"  
Render object to a LaTeX tabular, longtable, or nested table.

Uses the ``Styler`` implementation with the following, ordered, method chaining:

.. code-block:: python
   styler = Styler(DataFrame)
   styler.hide(**hide)
   styler.relabel_index(**relabel_index)
   styler.format(**format)
   styler.format_index(**format_index)
   styler.to_latex(buf=buf, **render_kwargs)

Parameters
----------
buf : str, Path or StringIO-like, optional, default None
    Buffer to write to. If None, the output is returned as a string.
hide : dict, list of dict
    Keyword args to pass to the method call of ``Styler.hide``. If a list will
    call the method numerous times.
relabel_index : dict, list of dict
    Keyword args to pass to the method of ``Styler.relabel_index``. If a list
    will call the method numerous times.
format : dict, list of dict
    Keyword args to pass to the method call of ``Styler.format``. If a list will
    call the method numerous times.
format_index : dict, list of dict
    Keyword args to pass to the method call of ``Styler.format_index``. If a
    list will call the method numerous times.
render_kwargs : dict
    Keyword args to pass to the method call of ``Styler.to_latex``.

Returns
-------
str or None
    If buf is None, returns the result as a string. Otherwise returns None.
r   )Stylerr   r,  )uuid)r  r  r  r  r  c                    g)Nztextbf:--rwrap;r  r  s    r   r  .NDFrame._to_latex_via_styler.<locals>.<lambda>  s    '8r   r  r  )pandas.io.formats.styler#  r   varsr   r   r0  ri  r  	map_indexr{  )r   r  r  r  r  r  r  r#  stylerkw_namekwsub_kws               r   r  NDFrame._to_latex_via_styler[  s    ` 	3K&2&JGB"d##(.2.B%% FF,6v6 ! K ,3[))89838-88r   c                    g r   r  r   r  r  r  r  r(  r  r  r  r  r  r  quoting	quotecharlineterminatorr  r  doublequote
escapecharr  r  r  s                         r   to_csvNDFrame.to_csv  r  r   c                    g r   r  r0  s                         r   r6  r7    r  r   r6  c                    [        U [        5      (       a  U OU R                  5       n[        UUUUUUS9n[	        U5      R                  UUUU
UUUUUU	UUUUUUS9$ )a  
Write object to a comma-separated values (csv) file.

Parameters
----------
path_or_buf : str, path object, file-like object, or None, default None
    String, path object (implementing os.PathLike[str]), or file-like
    object implementing a write() function. If None, the result is
    returned as a string. If a non-binary file object is passed, it should
    be opened with `newline=''`, disabling universal newlines. If a binary
    file object is passed, `mode` might need to contain a `'b'`.
sep : str, default ','
    String of length 1. Field delimiter for the output file.
na_rep : str, default ''
    Missing data representation.
float_format : str, Callable, default None
    Format string for floating point numbers. If a Callable is given, it takes
    precedence over other numeric formatting parameters, like decimal.
columns : sequence, optional
    Columns to write.
header : bool or list of str, default True
    Write out the column names. If a list of strings is given it is
    assumed to be aliases for the column names.
index : bool, default True
    Write row names (index).
index_label : str or sequence, or False, default None
    Column label for index column(s) if desired. If None is given, and
    `header` and `index` are True, then the index names are used. A
    sequence should be given if the object uses MultiIndex. If
    False do not print fields for index names. Use index_label=False
    for easier importing in R.
mode : {{'w', 'x', 'a'}}, default 'w'
    Forwarded to either `open(mode=)` or `fsspec.open(mode=)` to control
    the file opening. Typical values include:

    - 'w', truncate the file first.
    - 'x', exclusive creation, failing if the file already exists.
    - 'a', append to the end of file if it exists.

encoding : str, optional
    A string representing the encoding to use in the output file,
    defaults to 'utf-8'. `encoding` is not supported if `path_or_buf`
    is a non-binary file object.
{compression_options}

       May be a dict with key 'method' as compression mode
       and other entries as additional compression options if
       compression mode is 'zip'.

       Passing compression options as keys in dict is
       supported for compression modes 'gzip', 'bz2', 'zstd', and 'zip'.
quoting : optional constant from csv module
    Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
    then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
    will treat them as non-numeric.
quotechar : str, default '\"'
    String of length 1. Character used to quote fields.
lineterminator : str, optional
    The newline character or character sequence to use in the output
    file. Defaults to `os.linesep`, which depends on the OS in which
    this method is called ('\\n' for linux, '\\r\\n' for Windows, i.e.).

    .. versionchanged:: 1.5.0

        Previously was line_terminator, changed for consistency with
        read_csv and the standard library 'csv' module.

chunksize : int or None
    Rows to write at a time.
date_format : str, default None
    Format string for datetime objects.
doublequote : bool, default True
    Control quoting of `quotechar` inside a field.
escapechar : str, default None
    String of length 1. Character used to escape `sep` and `quotechar`
    when appropriate.
decimal : str, default '.'
    Character recognized as decimal separator. E.g. use ',' for
    European data.
errors : str, default 'strict'
    Specifies how encoding and decoding errors are to be handled.
    See the errors argument for :func:`open` for a full list
    of options.

{storage_options}

Returns
-------
None or str
    If path_or_buf is None, returns the resulting csv format as a
    string. Otherwise returns None.

See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.

Examples
--------
Create 'out.csv' containing 'df' without indices

>>> df = pd.DataFrame({{'name': ['Raphael', 'Donatello'],
...                    'mask': ['red', 'purple'],
...                    'weapon': ['sai', 'bo staff']}})
>>> df.to_csv('out.csv', index=False)  # doctest: +SKIP

Create 'out.zip' containing 'out.csv'

>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
>>> compression_opts = dict(method='zip',
...                         archive_name='out.csv')  # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
...           compression=compression_opts)  # doctest: +SKIP

To write a csv file to a new folder or nested folder you will first
need to create it using either Pathlib or os:

>>> from pathlib import Path  # doctest: +SKIP
>>> filepath = Path('folder/subfolder/out.csv')  # doctest: +SKIP
>>> filepath.parent.mkdir(parents=True, exist_ok=True)  # doctest: +SKIP
>>> df.to_csv(filepath)  # doctest: +SKIP

>>> import os  # doctest: +SKIP
>>> os.makedirs('folder/subfolder', exist_ok=True)  # doctest: +SKIP
>>> df.to_csv('folder/subfolder/out.csv')  # doctest: +SKIP
)framer  r  r  r  r  )r3  r  r  r  r  r1  r(  r  r  r  r2  r  r4  r5  r  )r   rk   r  r   r   r6  )r   r  r  r  r  r(  r  r  r  r  r  r  r1  r2  r3  r  r  r4  r5  r  r  r  r   r  s                           r   r6  r7    s    ~  l33T&%
	 !+22)####!+! 3 
 	
r   c                    [        U 5      e)z
Reset the cacher.
r  r   s    r   _reset_cacherNDFrame._reset_cacher  s     "$''r   c                    [        5       (       a  gU(       a  U R                  SS9  U(       a  U R                  5         gg)z
See if we need to update our parent cacher if clear, then clear our
cache.

Parameters
----------
clear : bool, default False
    Clear the item cache.
verify_is_copy : bool, default True
    Provide is_copy checks.
Nreferentr  )r   _check_setitem_copyrw  )r   clearverify_is_copyr   s       r   _maybe_update_cacherNDFrame._maybe_update_cacher  s8    "   $$z$2""$ r   c                    [        U 5      er   r  r   s    r   rw  NDFrame._clear_item_cache      !$''r   c                :   [         R                  " SU5        [        U[        5      (       dr  [        R
                  " U[        R                  S9nUS:X  aH  UR                  S:X  a8  [        5       (       a)  [        U[        U 5      5      (       a  U R                  SS9$ OU R                  S:X  a!  [        [        U 5      R                   S35      e[        R                   " S[        U 5      R                   S	3["        [%        5       S
9  [        R&                  " UR(                  UR*                  UR,                  [        R                  S9nU R.                  R1                  UU R3                  U5      SS9nU R5                  XDR6                  S9R9                  U SS9$ )a  
Return the elements in the given *positional* indices along an axis.

This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.

Parameters
----------
indices : array-like
    An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
    The axis on which to select elements. ``0`` means that we are
    selecting rows, ``1`` means that we are selecting columns.
    For `Series` this parameter is unused and defaults to 0.
**kwargs
    For compatibility with :meth:`numpy.take`. Has no effect on the
    output.

Returns
-------
same type as caller
    An array-like containing the elements taken from the object.

See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.

Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
...                    ('parrot', 'bird', 24.0),
...                    ('lion', 'mammal', 80.5),
...                    ('monkey', 'mammal', np.nan)],
...                   columns=['name', 'class', 'max_speed'],
...                   index=[0, 2, 3, 1])
>>> df
     name   class  max_speed
0  falcon    bird      389.0
2  parrot    bird       24.0
3    lion  mammal       80.5
1  monkey  mammal        NaN

Take elements at positions 0 and 3 along the axis 0 (default).

Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.

>>> df.take([0, 3])
     name   class  max_speed
0  falcon    bird      389.0
1  monkey  mammal        NaN

Take elements at indices 1 and 2 along the axis 1 (column selection).

>>> df.take([1, 2], axis=1)
    class  max_speed
0    bird      389.0
2    bird       24.0
3  mammal       80.5
1  mammal        NaN

We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.

>>> df.take([-1, -2])
     name   class  max_speed
1  monkey  mammal        NaN
3    lion  mammal       80.5
r  r   r   r   Nr   z1.take requires a sequence of integers, not slice.zPassing a slice to zq.take is deprecated and will raise in a future version. Use `obj[slicer]` or pass a sequence of integers instead.r  Tr   verifyr   taker~  )nvvalidate_taker   r  rh  rV  intpr,  r   r   r   r   r  r
  r   r  r	  r  rU   arangestartstopstepr   rL  r   r   r   r   )r   indicesr   r  r  s        r   rL  NDFrame.take  sd   X 	V$'5))jj8G	LLA%'))$Wc$i88yydy++YY!^:&&' (  
 MM%d4j&9&9%: ;2 2 +- iiw||W\\G 99>>--d3 " 

 ))()GTT U 
 	
r   c                    U R                  XS9nU R                  S:X  aE  UR                  U5      R                  U R                  U5      5      (       d  UR	                  U 5        U$ )a7  
Internal version of the `take` method that sets the `_is_copy`
attribute to keep track of the parent dataframe (using in indexing
for the SettingWithCopyWarning).

For Series this does the same as the public take (it never sets `_is_copy`).

See the docstring of `take` for full explanation of the parameters.
)rT  r   r+  )rL  r,  r  r  _set_is_copy)r   rT  r   r  s       r   _take_with_is_copyNDFrame._take_with_is_copy.  sV     7699>&"2"24"8"?"?t@T"U"U%r   c                   U R                  U5      nU R                  U5      n[        U[        5      (       a  [	        S5      eUb  [        U[
        5      (       d  [	        S5      eUR                  XUS9u  pg[        S5      /U R                  -  nXhU'   [        U5      n	U R                  U	   n
[        XR                  U5      U5        U
$ US:X  a  U(       a  X   $ U R                  nOU R                  n[        U[
        5      (       aA  UR                  USS9u  plU(       d'  [         R"                  " U5      (       a  XUS-    nOX   nOUR%                  U5      n[        U[&        R(                  5      (       aN  UR*                  [&        R,                  :X  a!  UR/                  5       u  nU R1                  XS9$ U R1                  XbS9$ [3        U5      (       d  X   n[3        U5      (       a  US:X  ay  U R                  S:X  a  U R4                  U   $ U R6                  R9                  U5      nU R;                  XR<                  S	9n
U R                  U   U
l        U
RA                  U 5      n
O`[3        U5      (       a   U R                  SS2[        XfS-   5      4   n
O0US:X  a  U R                  SS2U4   n
OU R                  U   n
WU
l        U
RC                  X
RD                  (       + S
9  U
$ )a
  
Return cross-section from the Series/DataFrame.

This method takes a `key` argument to select data at a particular
level of a MultiIndex.

Parameters
----------
key : label or tuple of label
    Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
    Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
    In case of a key partially contained in a MultiIndex, indicate
    which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
    If False, returns object with same levels as self.

Returns
-------
Series or DataFrame
    Cross-section from the original Series or DataFrame
    corresponding to the selected index levels.

See Also
--------
DataFrame.loc : Access a group of rows and columns
    by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
    for selection by position.

Notes
-----
`xs` can not be used to set values.

MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.

Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
...      'num_wings': [0, 0, 2, 2],
...      'class': ['mammal', 'mammal', 'mammal', 'bird'],
...      'animal': ['cat', 'dog', 'bat', 'penguin'],
...      'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
                           num_legs  num_wings
class  animal  locomotion
mammal cat     walks              4          0
       dog     walks              4          0
       bat     flies              2          2
bird   penguin walks              2          2

Get values at specified index

>>> df.xs('mammal')
                   num_legs  num_wings
animal locomotion
cat    walks              4          0
dog    walks              4          0
bat    flies              2          2

Get values at several indexes

>>> df.xs(('mammal', 'dog', 'walks'))
num_legs     4
num_wings    0
Name: (mammal, dog, walks), dtype: int64

Get values at specified index and level

>>> df.xs('cat', level=1)
                   num_legs  num_wings
class  locomotion
mammal walks              4          0

Get values at several indexes and levels

>>> df.xs(('bird', 'walks'),
...       level=[0, 'locomotion'])
         num_legs  num_wings
animal
penguin         2          2

Get values at specified column and axis

>>> df.xs('num_wings', axis=1)
class   animal   locomotion
mammal  cat      walks         0
        dog      walks         0
        bat      flies         2
bird    penguin  walks         2
Name: num_wings, dtype: int64
z7list keys are not supported in xs, pass a tuple insteadNzIndex must be a MultiIndex)r:  
drop_levelr   r   r  r   r   r   )#r!  r  r   ri  r  r   get_loc_levelr  r,  r`  r  rt  r%  r(  r  _get_loc_levelr   
is_integerget_locrh  ndarrayr   r  nonzerorX  rg   r  r   fast_xs_constructor_sliced_from_mgrr   r   r   rW  _is_view)r   r9  r   r:  r[  ro  locnew_ax_indexerr  r  r  r  indsr   s                  r   r-  
NDFrame.xs?  s   T $$T*%c4  UVVfj11 <== ..sJ.WKC d}tyy0H TNHoGYYw'FF11$7@M19y LLEJJEeZ(("11#Q1?NC>>#&& %C!G 4I %
I--$C#rzz**99(!kkmGT2242CC2232BBS>>!J	S>>daiyyA~ ||C((ii'',G66w\\6RF::c?FL((.Fs^^YYq%1W"556FQYYYq#v&FYYs^F$FL 	D??':;r   c                    [        U 5      er   r  )r   r  s     r   __getitem__NDFrame.__getitem__  rH  r   c                p   U R                   R                  USS9n[        U[        R                  5      (       am  [
        R                  " UR                  [        R                  SS9[        U 5      5      n[        U[        R                  5      (       a  U R                  USS9$ UnU R                  U5      $ )z;
__getitem__ for the case where the key is a slice object.
getitem)r   Fr\  r   r   )r  _convert_slice_indexerr   rh  ra  r   maybe_indices_to_slicer   rO  r   rL  _slice)r   r9  slobjr  s       r   _getitem_sliceNDFrame._getitem_slice  s     

11#I1FeRZZ((00RWW513t9G '2::..yyqy11E{{5!!r   c                F   [        U[        5      (       d   [        U5      5       eU R                  U5      nU R                  R                  XS9nU R                  X3R                  S9nUR                  U 5      nUS:g  =(       d    UR                  nUR                  XS9  U$ )zX
Construct a slice of this container.

Slicing with this method is *always* positional.
r   r   r   r\  )r   r  r
  r   r   	get_slicer   r   r   re  rW  )r   rs  r   r   r  is_copys         r   rr  NDFrame._slice	  s     %''4e4'++D1))%%e%7++G,,+G$$T* !).vD/r   c                b    U(       d  S U l         g Uc   e[        R                  " U5      U l         g r   )r   weakrefref)r   r|  r   s      r   rW  NDFrame._set_is_copy  s&     DM?"?#KK,DMr   c                D    U R                   (       a  U R                  SS9  g)a  
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.

Should be called just near setting a value

Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
r?  r@  F)r   rA  r   s    r   %_check_is_chained_assignment_possible-NDFrame._check_is_chained_assignment_possible#  s     ==$$z$2r   c                   [        5       (       d  [        5       (       a  gU(       d  U R                  (       d  g[        R                  " S5      nUc  gU R                  bo  [        U R                  [        5      (       dP  U R                  5       n[        R                  " U5      (       a  Ub"  UR                  U R                  :X  a  SU l        g[        U R                  [        5      (       a  U R                  nOUS:X  a  SnOSnUS:X  a  [        U5      eUS:X  a#  [        R                  " U[        [        5       S9  gg)	a  

Parameters
----------
t : str, the type of setting error
force : bool, default False
   If True, then force showing an error.

validate if we are doing a setitem on a chained copy.

It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.

df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'

# This technically need not raise SettingWithCopy if both are view
# (which is not generally guaranteed but is usually True.  However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'

Nzmode.chained_assignmentr?  z
A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copya  
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copyr  r	  r  )r   r   r   r   rz  r   r   gcget_referentsra  rN   r  r	  rO   rU   )r   r  forcer   rs        r   rA  NDFrame._check_setitem_copy2  s   4   $6$8$8 !!";<= ==$Zs-K-KA##A&&1=QWW

=R $ dmmS))A*_? ?  G&q))F?MM!3@P@RS r   c                D   SnSnU R                   S:X  a8  [        U R                  [        5      (       a   XR                  R                  ;  nU(       aZ  [        U[        5      (       d  U4nU R                   H2  n[        U[        5      (       d  M  US[        U5       U:X  d  M.  X	 SnM4     U(       d>  U R                  S   R                  U5      nU R                  R                  U5      U l
         U R                  U	 g! [
         a     Nf = f! [         a     gf = f)z
Delete item
Fr+  NTr  )r,  r   r(  r   _enginer  r`  r   r   r`  r   ideleter   r  )r   r9  deletedmaybe_shortcutr  rf  s         r   __delitem__NDFrame.__delitem__}  s   
 99>jzBB "%LL,@,@!@  c5))f||c5))c*CHo.D	"G $  ))B-'',C		))#.DI	  %+  ,  		s#   D 4D 
DD
DDc                `    U(       a'  U R                   R                  (       d  [        S5      eg g )NzQCannot specify 'inplace=True' when 'self.flags.allows_duplicate_labels' is False.)r   r   r   )r   r   s     r   r  2NDFrame._check_inplace_and_allows_duplicate_labels  s+    4::==A  >7r   c                F     X   $ ! [         [        [        4 a    Us $ f = f)a  
Get item from object for given key (ex: DataFrame column).

Returns default value if not found.

Parameters
----------
key : object

Returns
-------
same type as items contained in object

Examples
--------
>>> df = pd.DataFrame(
...     [
...         [24.3, 75.7, "high"],
...         [31, 87.8, "high"],
...         [22, 71.6, "medium"],
...         [35, 95, "medium"],
...     ],
...     columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
...     index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
... )

>>> df
            temp_celsius  temp_fahrenheit windspeed
2014-02-12          24.3             75.7      high
2014-02-13          31.0             87.8      high
2014-02-14          22.0             71.6    medium
2014-02-15          35.0             95.0    medium

>>> df.get(["temp_celsius", "windspeed"])
            temp_celsius windspeed
2014-02-12          24.3      high
2014-02-13          31.0      high
2014-02-14          22.0    medium
2014-02-15          35.0    medium

>>> ser = df['windspeed']
>>> ser.get('2014-02-13')
'high'

If the key isn't found, the default value will be used.

>>> df.get(["temp_celsius", "temp_kelvin"], default="default_value")
'default_value'

>>> ser.get('2014-02-10', '[unknown]')
'[unknown]'
)r  r   
IndexError)r   r9  defaults      r   r  NDFrame.get  s+    l	9*j1 	N	s      c                .    U R                   R                  $ )z:Return boolean indicating if self is view of another array)r   is_viewr   s    r   re  NDFrame._is_view  s     yy   r   c                `    UR                  U R                  UUUUS9nU R                  " S0 UD6$ )a  
Return an object with matching indices as other object.

Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.

Parameters
----------
other : Object of the same data type
    Its row and column indices are used to define the new indices
    of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
    Method to use for filling holes in reindexed DataFrame.
    Please note: this is only applicable to DataFrames/Series with a
    monotonically increasing/decreasing index.

    * None (default): don't fill gaps
    * pad / ffill: propagate last valid observation forward to next
      valid
    * backfill / bfill: use next valid observation to fill gap
    * nearest: use nearest valid observations to fill gap.

copy : bool, default True
    Return a new object, even if the passed indexes are the same.

    .. note::
        The `copy` keyword will change behavior in pandas 3.0.
        `Copy-on-Write
        <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
        will be enabled by default, which means that all methods with a
        `copy` keyword will use a lazy copy mechanism to defer the copy and
        ignore the `copy` keyword. The `copy` keyword will be removed in a
        future version of pandas.

        You can already get the future behavior and improvements through
        enabling copy on write ``pd.options.mode.copy_on_write = True``
limit : int, default None
    Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
    Maximum distance between original and new labels for inexact
    matches. The values of the index at the matching locations must
    satisfy the equation ``abs(index[indexer] - target) <= tolerance``.

    Tolerance may be a scalar value, which applies the same tolerance
    to all values, or list-like, which applies variable tolerance per
    element. List-like includes list, tuple, array, Series, and must be
    the same size as the index and its dtype must exactly match the
    index's type.

Returns
-------
Series or DataFrame
    Same type as caller, but with changed indices on each axis.

See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.

Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.

Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
...                     [31, 87.8, 'high'],
...                     [22, 71.6, 'medium'],
...                     [35, 95, 'medium']],
...                    columns=['temp_celsius', 'temp_fahrenheit',
...                             'windspeed'],
...                    index=pd.date_range(start='2014-02-12',
...                                        end='2014-02-15', freq='D'))

>>> df1
            temp_celsius  temp_fahrenheit windspeed
2014-02-12          24.3             75.7      high
2014-02-13          31.0             87.8      high
2014-02-14          22.0             71.6    medium
2014-02-15          35.0             95.0    medium

>>> df2 = pd.DataFrame([[28, 'low'],
...                     [30, 'low'],
...                     [35.1, 'medium']],
...                    columns=['temp_celsius', 'windspeed'],
...                    index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
...                                            '2014-02-15']))

>>> df2
            temp_celsius windspeed
2014-02-12          28.0       low
2014-02-13          30.0       low
2014-02-15          35.1    medium

>>> df2.reindex_like(df1)
            temp_celsius  temp_fahrenheit windspeed
2014-02-12          28.0              NaN       low
2014-02-13          30.0              NaN       low
2014-02-14           NaN              NaN       NaN
2014-02-15          35.1              NaN    medium
)r   r  r   limit	tolerancer  )r  r  reindex)r   r  r  r   r  r  r  s          r   reindex_likeNDFrame.reindex_like  sC    d &&"" ' 
 || a  r   )r   r  r(  r:  r  c                   g r   r  r   ro  r   r  r(  r:  r   r  s           r   r3  NDFrame.dropk       	r   )r   r  r(  r:  r   r  c                   g r   r  r  s           r   r3  r  y  r  r   c                   g r   r  r  s           r   r3  r    r  r   r  c               V   [        US5      nUb&  Uc  Ub  [        S5      eU R                  U5      nX0n	O*Uc  Ub  SU0n	U R                  S:X  a  XIS'   O[        S5      eU n
U	R	                  5        H  u  p!Uc  M
  U
R                  XXWS9n
M     U(       a  U R                  U
5        g U
$ )Nr   z2Cannot specify both 'labels' and 'index'/'columns'r  r+  r(  z>Need to specify at least one of 'labels', 'index' or 'columns'r:  r  )rX   r   r%  r,  r   
_drop_axisr  )r   ro  r   r  r(  r:  r   r  rD  r   r   s              r   r3  r    s     &gy9 G$7 !UVV++D1I&D'"5U#DyyA~")YP   JJLLD!nnVnN )   %Jr   c           	     N   U R                  U5      nU R                  U5      nUR                  (       aV  Ub1  [        U[        5      (       d  [        S5      eUR                  XUS9nOUR                  XS9nUR                  U5      nGO[        U5      =(       d    [        U[        5      n	[        [        R                  " U5      5      nUbk  [        U[        5      (       d  [        S5      eUR                  U5      R                  U5      ) n
US:X  a#  U
R                  5       (       a  [!        U S35      eO[        U[        5      (       a9  UR"                  S:X  a)  U	(       d"  UR                  S5      R                  U5      ) n
OOUR                  U5      ) n
UR%                  U5      S	:H  R'                  5       nUS:X  a  U(       a  [!        U S35      e[        U
R"                  [(        5      (       a  U
R+                  [,        S
9n
U
R/                  5       S   nUR1                  U5      nU R2                  U-
  S-
  nU R4                  R7                  UUUSSUS9nU R9                  XR:                  S9nU R2                  S:X  a  U R<                  Ul        URA                  U 5      $ )a  
Drop labels from specified axis. Used in the ``drop`` method
internally.

Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
    For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
    If 'ignore', suppress error and existing labels are dropped.
only_slice : bool, default False
    Whether indexing along columns should be view-only.

Nzaxis must be a MultiIndexr  )r  r  r  r   r   r  r   r   T)r   
allow_dupsr   
only_slicer   )!r!  r  	is_uniquer   r   AssertionErrorr3  get_indexerrn   r`  r\   rs   index_labels_to_arrayr3  isinr  r  r   r  r  rj   to_numpyr  rb  rL  r,  r   reindex_indexerr   r   r8  r   r   )r   ro  r   r:  r  r  axis_numnew_axisr  is_tuple_labelsmasklabels_missingr   r   r  s                  r   r  NDFrame._drop_axis  sR   2 ((.~~d#>> !$
33()DEE99V9H99V9;&&x0G 2&9VZPU=VO"6#?#?#GHF !$
33()DEE--e499&AA W$"fX-?#@AA4,,LLH,'
 --a055f==		&))"&"6"6v">""D!I!I!KW$"fX-?#@AA$**n55}}4}0llnQ'Gyy)H))h&*))++! , 
 ++G,,+G99>99FL""4((r   c                    U R                  5         U R                  5         UR                  U l        U R                  USS9  g)z
Replace self internals with result.

Parameters
----------
result : same type as self
verify_is_copy : bool, default True
    Provide is_copy checks.
T)rC  r   N)_reset_cacherw  r   rD  )r   r  rC  s      r   r  NDFrame._update_inplace  s:     	 KK	!!!Nr   c                z   ^ U4S jnU R                   nUb  U R                  U5      nXC0nU R                  " S0 UD6$ )a  
Prefix labels with string `prefix`.

For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.

Parameters
----------
prefix : str
    The string to add before each label.
axis : {0 or 'index', 1 or 'columns', None}, default None
    Axis to add prefix on

    .. versionadded:: 2.0.0

Returns
-------
Series or DataFrame
    New Series or DataFrame with updated labels.

See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.

Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0    1
1    2
2    3
3    4
dtype: int64

>>> s.add_prefix('item_')
item_0    1
item_1    2
item_2    3
item_3    4
dtype: int64

>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
   A  B
0  1  3
1  2  4
2  3  5
3  4  6

>>> df.add_prefix('col_')
     col_A  col_B
0       1       3
1       2       4
2       3       5
3       4       6
c                   > T U  3$ r   r  )r  r6  s    r   r  $NDFrame.add_prefix.<locals>.<lambda>`  s    nr   r  r  r%  r  )r   r6  r   r  rD  r  s    `    r   
add_prefixNDFrame.add_prefix%  sG    v %((	++D1I ||%f%%r   c                z   ^ U4S jnU R                   nUb  U R                  U5      nXC0nU R                  " S0 UD6$ )a  
Suffix labels with string `suffix`.

For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.

Parameters
----------
suffix : str
    The string to add after each label.
axis : {0 or 'index', 1 or 'columns', None}, default None
    Axis to add suffix on

    .. versionadded:: 2.0.0

Returns
-------
Series or DataFrame
    New Series or DataFrame with updated labels.

See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.

Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0    1
1    2
2    3
3    4
dtype: int64

>>> s.add_suffix('_item')
0_item    1
1_item    2
2_item    3
3_item    4
dtype: int64

>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
   A  B
0  1  3
1  2  4
2  3  5
3  4  6

>>> df.add_suffix('_col')
     A_col  B_col
0       1       3
1       2       4
2       3       5
3       4       6
c                   > U  T 3$ r   r  )r  suffixs    r   r  $NDFrame.add_suffix.<locals>.<lambda>  s    F8nr   r  r  )r   r  r   r  rD  r  s    `    r   
add_suffixNDFrame.add_suffixo  sG    v %((	++D1I ||%f%%r   )r   	ascendingr   r   na_positionignore_indexr9  c                   g r   r  r   r   r  r   r   r  r  r9  s           r   sort_valuesNDFrame.sort_values  r  r   )r   r  r   r  r  r9  c                   g r   r  r  s           r   r  r    r  r   c                   g r   r  r  s           r   r  r    r  r   	quicksortlastc                   [        U 5      e)u  
Sort by the values along either axis.

Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
     Axis to be sorted.
ascending : bool or list of bool, default True
     Sort ascending vs. descending. Specify list for multiple sort
     orders.  If this is a list of bools, must match the length of
     the by.
inplace : bool, default False
     If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
     Choice of sorting algorithm. See also :func:`numpy.sort` for more
     information. `mergesort` and `stable` are the only stable algorithms. For
     DataFrames, this option is only applied when sorting on a single
     column or label.
na_position : {'first', 'last'}, default 'last'
     Puts NaNs at the beginning if `first`; `last` puts NaNs at the
     end.
ignore_index : bool, default False
     If True, the resulting axis will be labeled 0, 1, …, n - 1.
key : callable, optional
    Apply the key function to the values
    before sorting. This is similar to the `key` argument in the
    builtin :meth:`sorted` function, with the notable difference that
    this `key` function should be *vectorized*. It should expect a
    ``Series`` and return a Series with the same shape as the input.
    It will be applied to each column in `by` independently.

Returns
-------
DataFrame or None
    DataFrame with sorted values or None if ``inplace=True``.

See Also
--------
DataFrame.sort_index : Sort a DataFrame by the index.
Series.sort_values : Similar method for a Series.

Examples
--------
>>> df = pd.DataFrame({
...     'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
...     'col2': [2, 1, 9, 8, 7, 4],
...     'col3': [0, 1, 9, 4, 2, 3],
...     'col4': ['a', 'B', 'c', 'D', 'e', 'F']
... })
>>> df
  col1  col2  col3 col4
0    A     2     0    a
1    A     1     1    B
2    B     9     9    c
3  NaN     8     4    D
4    D     7     2    e
5    C     4     3    F

Sort by col1

>>> df.sort_values(by=['col1'])
  col1  col2  col3 col4
0    A     2     0    a
1    A     1     1    B
2    B     9     9    c
5    C     4     3    F
4    D     7     2    e
3  NaN     8     4    D

Sort by multiple columns

>>> df.sort_values(by=['col1', 'col2'])
  col1  col2  col3 col4
1    A     1     1    B
0    A     2     0    a
2    B     9     9    c
5    C     4     3    F
4    D     7     2    e
3  NaN     8     4    D

Sort Descending

>>> df.sort_values(by='col1', ascending=False)
  col1  col2  col3 col4
4    D     7     2    e
5    C     4     3    F
2    B     9     9    c
0    A     2     0    a
1    A     1     1    B
3  NaN     8     4    D

Putting NAs first

>>> df.sort_values(by='col1', ascending=False, na_position='first')
  col1  col2  col3 col4
3  NaN     8     4    D
4    D     7     2    e
5    C     4     3    F
2    B     9     9    c
0    A     2     0    a
1    A     1     1    B

Sorting with a key function

>>> df.sort_values(by='col4', key=lambda col: col.str.lower())
   col1  col2  col3 col4
0    A     2     0    a
1    A     1     1    B
2    B     9     9    c
3  NaN     8     4    D
4    D     7     2    e
5    C     4     3    F

Natural sort with the key argument,
using the `natsort <https://github.com/SethMMorton/natsort>` package.

>>> df = pd.DataFrame({
...    "time": ['0hr', '128hr', '72hr', '48hr', '96hr'],
...    "value": [10, 20, 30, 40, 50]
... })
>>> df
    time  value
0    0hr     10
1  128hr     20
2   72hr     30
3   48hr     40
4   96hr     50
>>> from natsort import index_natsorted
>>> df.sort_values(
...     by="time",
...     key=lambda x: np.argsort(index_natsorted(df["time"]))
... )
    time  value
0    0hr     10
3   48hr     40
2   72hr     30
4   96hr     50
1  128hr     20
r  r  s           r   r  r    s    l "$''r   )r   r:  r  r   r  sort_remainingr  r9  c       	            g r   r  
r   r   r:  r  r   r   r  r  r  r9  s
             r   
sort_indexNDFrame.sort_indexz       	r   )	r   r:  r  r   r   r  r  r  r9  c       	            g r   r  r  s
             r   r  r    r  r   c       	            g r   r  r  s
             r   r  r    r  r   c       	   	        [        US5      nU R                  U5      n[        U5      nU R                  U5      n
[	        XX5XgU	5      nUcC  U(       a  U nOU R                  S S9nU(       a  [        [        U 5      5      Ul        U(       a  g U$ U R                  U5      nU R                  R                  XSS9nU(       d  UR                  U   R                  5       nO[        [        U5      5      nUR                  X5        U R                  XR                  S9nU(       a  U R!                  U5      $ UR#                  U SS9$ )Nr   r   FrJ  r   r  r~  )rX   r!  rW   r  r   r   r   r   r  r   r   rL  r   _sort_levels_monotonicrp  r   r  r   )r   r   r:  r  r   r   r  r  r  r9  targetr  r  baxisr  r  s                   r   r  r    s'    &gy9$$T*&y1	%%9K
 ?-,SY7,,T299>>'e>D }}U+BBDH$S\2H%*++H==+I''//&&tL&AAr   )r   optional_reindex)	r  r(  r   r  r   r:  
fill_valuer  r  c       	   	       ^  Ub  Ub  Ub  [        S5      eUc  Ub  Ub  [        S5      eUb  Ub  UnO$UnO!U(       a  T R                  U5      S:X  a  UnOUnUUS.n[        U5      nU(       a  [        5       (       a  Sn[	        U 4S jUR                  5        5       5      (       a  T R                  US9$ T R                  XU5      (       a  T R                  XU5      $ T R                  XXXXU5      R                  T SS	9$ )
a  
Conform {klass} to new index with optional filling logic.

Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.

Parameters
----------
{optional_reindex}
method : {{None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}}
    Method to use for filling holes in reindexed DataFrame.
    Please note: this is only applicable to DataFrames/Series with a
    monotonically increasing/decreasing index.

    * None (default): don't fill gaps
    * pad / ffill: Propagate last valid observation forward to next
      valid.
    * backfill / bfill: Use next valid observation to fill gap.
    * nearest: Use nearest valid observations to fill gap.

copy : bool, default True
    Return a new object, even if the passed indexes are the same.

    .. note::
        The `copy` keyword will change behavior in pandas 3.0.
        `Copy-on-Write
        <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
        will be enabled by default, which means that all methods with a
        `copy` keyword will use a lazy copy mechanism to defer the copy and
        ignore the `copy` keyword. The `copy` keyword will be removed in a
        future version of pandas.

        You can already get the future behavior and improvements through
        enabling copy on write ``pd.options.mode.copy_on_write = True``
level : int or name
    Broadcast across a level, matching Index values on the
    passed MultiIndex level.
fill_value : scalar, default np.nan
    Value to use for missing values. Defaults to NaN, but can be any
    "compatible" value.
limit : int, default None
    Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
    Maximum distance between original and new labels for inexact
    matches. The values of the index at the matching locations most
    satisfy the equation ``abs(index[indexer] - target) <= tolerance``.

    Tolerance may be a scalar value, which applies the same tolerance
    to all values, or list-like, which applies variable tolerance per
    element. List-like includes list, tuple, array, Series, and must be
    the same size as the index and its dtype must exactly match the
    index's type.

Returns
-------
{klass} with changed index.

See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.

Examples
--------
``DataFrame.reindex`` supports two calling conventions

* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={{'index', 'columns'}}, ...)``

We *highly* recommend using keyword arguments to clarify your
intent.

Create a dataframe with some fictional data.

>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({{'http_status': [200, 200, 404, 404, 301],
...                   'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}},
...                   index=index)
>>> df
           http_status  response_time
Firefox            200           0.04
Chrome             200           0.02
Safari             404           0.07
IE10               404           0.08
Konqueror          301           1.00

Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.

>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
...              'Chrome']
>>> df.reindex(new_index)
               http_status  response_time
Safari               404.0           0.07
Iceweasel              NaN            NaN
Comodo Dragon          NaN            NaN
IE10                 404.0           0.08
Chrome               200.0           0.02

We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.

>>> df.reindex(new_index, fill_value=0)
               http_status  response_time
Safari                 404           0.07
Iceweasel                0           0.00
Comodo Dragon            0           0.00
IE10                   404           0.08
Chrome                 200           0.02

>>> df.reindex(new_index, fill_value='missing')
              http_status response_time
Safari                404          0.07
Iceweasel         missing       missing
Comodo Dragon     missing       missing
IE10                  404          0.08
Chrome                200          0.02

We can also reindex the columns.

>>> df.reindex(columns=['http_status', 'user_agent'])
           http_status  user_agent
Firefox            200         NaN
Chrome             200         NaN
Safari             404         NaN
IE10               404         NaN
Konqueror          301         NaN

Or we can use "axis-style" keyword arguments

>>> df.reindex(['http_status', 'user_agent'], axis="columns")
           http_status  user_agent
Firefox            200         NaN
Chrome             200         NaN
Safari             404         NaN
IE10               404         NaN
Konqueror          301         NaN

To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).

>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({{"prices": [100, 101, np.nan, 100, 89, 88]}},
...                    index=date_index)
>>> df2
            prices
2010-01-01   100.0
2010-01-02   101.0
2010-01-03     NaN
2010-01-04   100.0
2010-01-05    89.0
2010-01-06    88.0

Suppose we decide to expand the dataframe to cover a wider
date range.

>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
            prices
2009-12-29     NaN
2009-12-30     NaN
2009-12-31     NaN
2010-01-01   100.0
2010-01-02   101.0
2010-01-03     NaN
2010-01-04   100.0
2010-01-05    89.0
2010-01-06    88.0
2010-01-07     NaN

The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.

For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.

>>> df2.reindex(date_index2, method='bfill')
            prices
2009-12-29   100.0
2009-12-30   100.0
2009-12-31   100.0
2010-01-01   100.0
2010-01-02   101.0
2010-01-03     NaN
2010-01-04   100.0
2010-01-05    89.0
2010-01-06    88.0
2010-01-07     NaN

Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.

See the :ref:`user guide <basics.reindexing>` for more.
z3Cannot specify all of 'labels', 'index', 'columns'.r  r   r  Fc              3  r   >#    U  H,  u  pUc  M
  TR                  U5      R                  U5      v   M.     g 7fr   )r  	identical)r]  rD  r  r   s      r   r^  "NDFrame.reindex.<locals>.<genexpr>  s6      
!-	 4DNN9%//33!-s   	7'7r   r  r~  )r  r!  r   r   r  r   r   _needs_reindex_multi_reindex_multi_reindex_axesr   )r   ro  r  r(  r   r  r   r:  r  r  r  r   s   `           r   r  NDFrame.reindex  s.   H !49KQRR'"5P  !$$G"E--d3q8 8
 +62 '))D 
!%
 
 

 99$9'' $$T599&&t:>> !!6t

,tI,
.	/r   c           	         U nU R                    HX  n	X   n
U
c  M  U R                  U	5      nUR                  XX4US9u  pU R                  U	5      nUR	                  XU/0UUSS9nSnMZ     U$ )z%Perform the reindex for all the axes.)r:  r  r  r  F)r  r   r  )r  r  r  r!  _reindex_with_indexers)r   r   r:  r  r  r  r  r   r   r   ro  r  r  r  r   s                  r   r  NDFrame._reindex_axes  s     ""AWF~"B!#5f ", "I ((+D,,7+,% 	 - C D% #( 
r   c                    [         R                  " UR                  5       6 U R                  :H  =(       a"    USL =(       a    USL =(       a    U R                  $ )z$Check if we do need a multi reindex.N)rs   count_not_noner   r  _can_fast_transpose)r   r   r  r:  s       r   r  NDFrame._needs_reindex_multi  sN     ""DKKM2dnnD )$))
 ((	
r   c                    [        U 5      er   r  )r   r   r   r  s       r   r  NDFrame._reindex_multi  rH  r   c           
        U R                   n[        UR                  5       5       HN  nX   u  pxU R                  U5      n	Uc  M  [	        U5      nUb  [        U5      nUR                  UUU	UUUS9nSnMP     U(       d  Uc-  XPR                   L a  [        5       (       d  UR                  US9nO,[        5       (       a  XPR                   L a  UR                  SS9nU R                  XUR                  S9R                  U 5      $ )z*allow_dups indicates an internal call here)r   r  r  r   Fr   r   )r   sortedr7  r   r   r]   r  r   r   r   r   r   )
r   
reindexersr  r   r  r  r   r  r  r  s
             r   r  NDFrame._reindex_with_indexers  s    99:??,-D'-NE006E} 'E"-g6  //%% 0 H D+ .0 T\II%'))}}$}/H ""x99'<}}%}0H))()GTT
 	
r   c                x  ^^
 [         R                  " UTU5      nUS:  a  [        S5      eUc  U R                  nU R	                  U5      nUbh  U R                  U5      n[        U5      R                  U5      n[        U5      S:X  a  UR                  UR                  5      nU R                  " S0 Xq0D6$ T(       a+  S	U4S jjnUR                  U5      n	U R                  US9U	   $ U(       aA  S	U
4S jjn[        R                  " U5      m
UR                  U5      n	U R                  US9U	   $ [        S5      e)
a  
Subset the dataframe rows or columns according to the specified index labels.

Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.

Parameters
----------
items : list-like
    Keep labels from axis which are in items.
like : str
    Keep labels from axis for which "like in label == True".
regex : str (regular expression)
    Keep labels from axis for which re.search(regex, label) == True.
axis : {0 or 'index', 1 or 'columns', None}, default None
    The axis to filter on, expressed either as an index (int)
    or axis name (str). By default this is the info axis, 'columns' for
    DataFrame. For `Series` this parameter is unused and defaults to `None`.

Returns
-------
same type as input object

See Also
--------
DataFrame.loc : Access a group of rows and columns
    by label(s) or a boolean array.

Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.

``axis`` defaults to the info axis that is used when indexing
with ``[]``.

Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
...                   index=['mouse', 'rabbit'],
...                   columns=['one', 'two', 'three'])
>>> df
        one  two  three
mouse     1    2      3
rabbit    4    5      6

>>> # select columns by name
>>> df.filter(items=['one', 'three'])
         one  three
mouse     1      3
rabbit    4      6

>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
         one  three
mouse     1      3
rabbit    4      6

>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
         one  two  three
rabbit    4    5      6
r   zDKeyword arguments `items`, `like`, or `regex` are mutually exclusiver   c                *   > Tc   eT[        U 5      ;   $ r   )r^   )r  likes    r   r  NDFrame.filter.<locals>.f  s    '''z!},,r   r   c                <   > TR                  [        U 5      5      S L$ r   )searchr^   )r  matchers    r   r  r    s    ~~jm4D@@r   z,Must pass either `items`, `like`, or `regex`r  returnbool_t)rs   r  r  r  r  r%  r~   intersectionr   r   r   r  rt  rf  recompile)r   r   r  regexr   nkwro  r8  r  r   r  s     `       @r   filterNDFrame.filterN  s#   L ##E477) 
 <''D%&&t,D%L--f5E5zQV\\2<<04-00- ZZ]F888&v..A jj'GZZ]F888&v..JKKr   c                x    [        5       (       a  U R                  SU R                  5       $ U R                  SU $ )a  
Return the first `n` rows.

This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.

For negative values of `n`, this function returns all rows except
the last `|n|` rows, equivalent to ``df[:n]``.

If n is larger than the number of rows, this function returns all rows.

Parameters
----------
n : int, default 5
    Number of rows to select.

Returns
-------
same type as caller
    The first `n` rows of the caller object.

See Also
--------
DataFrame.tail: Returns the last `n` rows.

Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
...                    'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
      animal
0  alligator
1        bee
2     falcon
3       lion
4     monkey
5     parrot
6      shark
7      whale
8      zebra

Viewing the first 5 lines

>>> df.head()
      animal
0  alligator
1        bee
2     falcon
3       lion
4     monkey

Viewing the first `n` lines (three in this case)

>>> df.head(3)
      animal
0  alligator
1        bee
2     falcon

For negative values of `n`

>>> df.head(-3)
      animal
0  alligator
1        bee
2     falcon
3       lion
4     monkey
5     parrot
Nr   r  r   r   ns     r   r  NDFrame.head  s7    R   99Ra=%%''yy!}r   c                    [        5       (       aA  US:X  a  U R                  SS R                  5       $ U R                  U* S R                  5       $ US:X  a  U R                  SS $ U R                  U* S $ )a  
Return the last `n` rows.

This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.

For negative values of `n`, this function returns all rows except
the first `|n|` rows, equivalent to ``df[|n|:]``.

If n is larger than the number of rows, this function returns all rows.

Parameters
----------
n : int, default 5
    Number of rows to select.

Returns
-------
type of caller
    The last `n` rows of the caller object.

See Also
--------
DataFrame.head : The first `n` rows of the caller object.

Examples
--------
>>> df = pd.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
...                    'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
      animal
0  alligator
1        bee
2     falcon
3       lion
4     monkey
5     parrot
6      shark
7      whale
8      zebra

Viewing the last 5 lines

>>> df.tail()
   animal
4  monkey
5  parrot
6   shark
7   whale
8   zebra

Viewing the last `n` lines (three in this case)

>>> df.tail(3)
  animal
6  shark
7  whale
8  zebra

For negative values of `n`

>>> df.tail(-3)
   animal
3    lion
4  monkey
5  parrot
6   shark
7   whale
8   zebra
r   Nr  r  s     r   tailNDFrame.tail  sr    R   Avyy1~**,,99aRS>&&((699Qq>!yy!~r   replacec                   Uc  SnU R                  U5      nU R                  U   n[        R                  " U5      n	[        R
                  " XU5      n
U
c  Uc   e[        X(-  5      n
Ub  [        R                  " XU5      n[        R                  " XX4U	5      nU R                  XS9nU(       a  [        [        U5      5      Ul        U$ )u[  
Return a random sample of items from an axis of object.

You can use `random_state` for reproducibility.

Parameters
----------
n : int, optional
    Number of items from axis to return. Cannot be used with `frac`.
    Default = 1 if `frac` = None.
frac : float, optional
    Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
    Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
    Default 'None' results in equal probability weighting.
    If passed a Series, will align with target object on index. Index
    values in weights not found in sampled object will be ignored and
    index values in sampled object not in weights will be assigned
    weights of zero.
    If called on a DataFrame, will accept the name of a column
    when axis = 0.
    Unless weights are a Series, weights must be same length as axis
    being sampled.
    If weights do not sum to 1, they will be normalized to sum to 1.
    Missing values in the weights column will be treated as zero.
    Infinite values not allowed.
random_state : int, array-like, BitGenerator, np.random.RandomState, np.random.Generator, optional
    If int, array-like, or BitGenerator, seed for random number generator.
    If np.random.RandomState or np.random.Generator, use as given.

    .. versionchanged:: 1.4.0

        np.random.Generator objects now accepted

axis : {0 or 'index', 1 or 'columns', None}, default None
    Axis to sample. Accepts axis number or name. Default is stat axis
    for given data type. For `Series` this parameter is unused and defaults to `None`.
ignore_index : bool, default False
    If True, the resulting index will be labeled 0, 1, …, n - 1.

    .. versionadded:: 1.3.0

Returns
-------
Series or DataFrame
    A new object of same type as caller containing `n` items randomly
    sampled from the caller object.

See Also
--------
DataFrameGroupBy.sample: Generates random samples from each group of a
    DataFrame object.
SeriesGroupBy.sample: Generates random samples from each group of a
    Series object.
numpy.random.choice: Generates a random sample from a given 1-D numpy
    array.

Notes
-----
If `frac` > 1, `replacement` should be set to `True`.

Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
...                    'num_wings': [2, 0, 0, 0],
...                    'num_specimen_seen': [10, 2, 1, 8]},
...                   index=['falcon', 'dog', 'spider', 'fish'])
>>> df
        num_legs  num_wings  num_specimen_seen
falcon         2          2                 10
dog            4          0                  2
spider         8          0                  1
fish           0          0                  8

Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.

>>> df['num_legs'].sample(n=3, random_state=1)
fish      0
spider    8
falcon    2
Name: num_legs, dtype: int64

A random 50% sample of the ``DataFrame`` with replacement:

>>> df.sample(frac=0.5, replace=True, random_state=1)
      num_legs  num_wings  num_specimen_seen
dog          4          0                  2
fish         0          0                  8

An upsample sample of the ``DataFrame`` with replacement:
Note that `replace` parameter has to be `True` for `frac` parameter > 1.

>>> df.sample(frac=2, replace=True, random_state=1)
        num_legs  num_wings  num_specimen_seen
dog            4          0                  2
fish           0          0                  8
falcon         2          2                 10
falcon         2          2                 10
fish           0          0                  8
dog            4          0                  2
fish           0          0                  8
dog            4          0                  2

Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.

>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
        num_legs  num_wings  num_specimen_seen
falcon         2          2                 10
fish           0          0                  8
r   r   )r!  ra  rs   random_staterw   process_sampling_sizer  preprocess_weightsrL  r   r   r  )r   r  fracr  weightsr  r   r  obj_lenrsrj  sampled_indicesr  s                r   rw   NDFrame.sampleX  s    z <D$$T***T"   .++AW=<###(D//tDG --wL?6(V5FLr   c                    [        5       (       a(  [        R                  " U R                  SS9U/UQ70 UD6$ [        R                  " X/UQ70 UD6$ )a3
  
Apply chainable functions that expect Series or DataFrames.

Parameters
----------
func : function
    Function to apply to the {klass}.
    ``args``, and ``kwargs`` are passed into ``func``.
    Alternatively a ``(callable, data_keyword)`` tuple where
    ``data_keyword`` is a string indicating the keyword of
    ``callable`` that expects the {klass}.
*args : iterable, optional
    Positional arguments passed into ``func``.
**kwargs : mapping, optional
    A dictionary of keyword arguments passed into ``func``.

Returns
-------
the return type of ``func``.

See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
DataFrame.map : Apply a function elementwise on a whole DataFrame.
Series.map : Apply a mapping correspondence on a
    :class:`~pandas.Series`.

Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects.

Examples
--------
Constructing a income DataFrame from a dictionary.

>>> data = [[8000, 1000], [9500, np.nan], [5000, 2000]]
>>> df = pd.DataFrame(data, columns=['Salary', 'Others'])
>>> df
   Salary  Others
0    8000  1000.0
1    9500     NaN
2    5000  2000.0

Functions that perform tax reductions on an income DataFrame.

>>> def subtract_federal_tax(df):
...     return df * 0.9
>>> def subtract_state_tax(df, rate):
...     return df * (1 - rate)
>>> def subtract_national_insurance(df, rate, rate_increase):
...     new_rate = rate + rate_increase
...     return df * (1 - new_rate)

Instead of writing

>>> subtract_national_insurance(
...     subtract_state_tax(subtract_federal_tax(df), rate=0.12),
...     rate=0.05,
...     rate_increase=0.02)  # doctest: +SKIP

You can write

>>> (
...     df.pipe(subtract_federal_tax)
...     .pipe(subtract_state_tax, rate=0.12)
...     .pipe(subtract_national_insurance, rate=0.05, rate_increase=0.02)
... )
    Salary   Others
0  5892.48   736.56
1  6997.32      NaN
2  3682.80  1473.12

If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``national_insurance`` takes its data as ``df``
in the second argument:

>>> def subtract_national_insurance(rate, df, rate_increase):
...     new_rate = rate + rate_increase
...     return df * (1 - new_rate)
>>> (
...     df.pipe(subtract_federal_tax)
...     .pipe(subtract_state_tax, rate=0.12)
...     .pipe(
...         (subtract_national_insurance, 'df'),
...         rate=0.05,
...         rate_increase=0.02
...     )
... )
    Salary   Others
0  5892.48   736.56
1  6997.32      NaN
2  3682.80  1473.12
Nr   )r   rs   piper   )r   funcargsr  s       r   r  NDFrame.pipe  sM    N   ;;tyydy3TKDKFKK{{47777r   c           
       ^ [        U[        5      (       a  UR                  (       a  [        UR                  5      U l        UR                  R
                  U R                  l        [        U R                  5      [        UR                  5      -   H:  n[        U[        5      (       d   e[        R                  X[        XS5      5        M<     US:X  a  [        S UR                   5       5      (       aR  UR                  S   R                  m[        U4S jUR                  SS  5       5      nU(       a  [        T5      U l        [        S UR                   5       5      nX`R                  l        U $ )a|  
Propagate metadata from other to self.

Parameters
----------
other : the object from which to get the attributes that we are going
    to propagate
method : str, optional
    A passed method name providing context on where ``__finalize__``
    was called.

    .. warning::

       The value passed as `method` are not currently considered
       stable across pandas releases.
Nr   c              3  L   #    U  H  n[        UR                  5      v   M     g 7fr   )r  r   )r]  r   s     r   r^  'NDFrame.__finalize__.<locals>.<genexpr>~  s     9js4		??j   "$r   c              3  @   >#    U  H  oR                   T:H  v   M     g 7fr   )r   )r]  r   r   s     r   r^  r    s     %SNSii5&8N   r   c              3  L   #    U  H  oR                   R                  v   M     g 7fr   )r   r   r]  r  s     r   r^  r    s      *9CA//r   )r   r   r   r   r   r   rh  r   r   r   r   r0  r  objs)r   r  r  r  r8  have_same_attrsr   r   s          @r   r   NDFrame.__finalize__\  s   $ eW%%{{
 &ekk2
161T1TDJJ.DNN+c%//.BB!$,,,,""4wuD/IJ C X9ejj999

1++"%%SEJJqrN%S"S"!)%DJ&) *9>* '# 2IJJ.r   c                    XR                   ;  aB  XR                  ;  a3  XR                  ;  a$  U R                  R	                  U5      (       a  X   $ [
        R                  X5      $ )zt
After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
)r   r   r   rR  $_can_hold_identifiers_and_holds_namer   __getattribute__)r   r8  s     r   __getattr__NDFrame.__getattr__  sR     000NN*OO+DDTJJ:&&t22r   c                    [         R                  X5        [         R                  XU5      $ ! [         a     Of = fXR                  ;   a  [         R                  XU5        gXR
                  ;   a  [         R                  XU5        g [        X5      n[        U[        5      (       a  [         R                  XU5        gXR                  ;   a  X U'   g[         R                  XU5        g! [        [        4 a[    [        U [        5      (       a-  [        U5      (       a  [        R                  " S[        5       S9  [         R                  XU5         gf = f)zq
After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
zPandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-accessr  N)r   r*  r   AttributeErrorr   r   r0  r   r~   rR  r  rk   rc   r  r	  rU   )r   r8  r   existings       r   r   NDFrame.__setattr__  s   	##D/%%d%88 		
 +++t51^^#t516"4.h..&&t59__,!&J&&t59"I. 	6dL11|E7J7JMM@ $4#5 ""4u5	6s+   *- 
::
6C, C, C, ,A(EEc                   > [         TU ]  5       nU R                  R                  (       a%  UR	                  U R                  R
                  5        U$ )zs
add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, its first level values are used.
)super_dir_additionsrR  _can_hold_stringsr  _dir_additions_for_owner)r   	additions	__class__s     r   r3  NDFrame._dir_additions  s=     G*,	??,,T__EEFr   c                   [        U R                  [        [        45      (       a  U" 5       $ [	        U R                  R
                  5      nU" 5       n[	        U R                  R
                  5      U:w  a  U R                  5         U$ )zF
Consolidate _mgr -- if the blocks have changed, then clear the
cache
)r   r   r   r   r   r   rw  )r   r  blocks_beforer  s       r   _protect_consolidateNDFrame._protect_consolidate  se     dii,0B!CDD3JDII,,-tyy M1""$r   c                8   ^  SU 4S jjnT R                  U5        g)z)Consolidate data in place and return Nonec                 D   > T R                   R                  5       T l         g r   r   consolidater   s   r   r  'NDFrame._consolidate_inplace.<locals>.f  s    		--/DIr   Nr  None)r;  )r   r  s   ` r   _consolidate_inplaceNDFrame._consolidate_inplace  s    	0 	!!!$r   c                   ^  U 4S jnT R                  U5      nT R                  X"R                  S9R                  T 5      $ )z
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).

Returns
-------
consolidated : same type as caller
c                 8   > T R                   R                  5       $ r   r?  r   s   r   r  &NDFrame._consolidate.<locals>.<lambda>  s    DII))+r   r   )r;  r   r   r   )r   r  	cons_datas   `  r   _consolidateNDFrame._consolidate  sB     ,--a0	)))..)IVV
 	
r   c                    U R                   R                  (       a  gU R                   R                  (       a  gU R                  R	                  5       S:  $ )NFTr   )r   r  any_extension_typesrN  nuniquer   s    r   _is_mixed_typeNDFrame._is_mixed_type  s>     99$$99(( {{""$q((r   c                    U R                   R                  5       nU R                  XR                  S9R	                  U 5      $ Nr   )r   get_numeric_datar   r   r   r   r   s     r   _get_numeric_dataNDFrame._get_numeric_data
  s8    )),,.))')ERRSWXXr   c                    U R                   R                  5       nU R                  XR                  S9R	                  U 5      $ rR  )r   get_bool_datar   r   r   rT  s     r   _get_bool_dataNDFrame._get_bool_data  s8    ))))+))')ERRSWXXr   c                    [        U 5      er   r  r   s    r   r   NDFrame.values  s    !$''r   c                    [        U 5      e)zinternal implementationr  r   s    r   r  NDFrame._values  s     "$''r   c                    U R                   R                  5       nU R                  XR                  [        R
                  S9$ )a  
Return the dtypes in the DataFrame.

This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.

Returns
-------
pandas.Series
    The data type of each column.

Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
...                    'int': [1],
...                    'datetime': [pd.Timestamp('20180310')],
...                    'string': ['foo']})
>>> df.dtypes
float              float64
int                  int64
datetime    datetime64[ns]
string              object
dtype: object
)r  r   )r   
get_dtypes_constructor_slicedrR  rh  object_r   s     r   rN  NDFrame.dtypes   s4    8 yy##%''OO2::'VVr   c           
       ^ U(       a  [        5       (       a  Sn[        T5      (       Ga'  U R                  S:X  aK  [        T5      S:  d  U R                  T;  a  [        S5      eTU R                     nU R                  XBU5      $ SSKJn  U" T[        S9nUR                   H  nXp;  d  M
  [        SU S35      e   UR                  U R                  S	SS
9n/ n[        U R                  5       5       HY  u  n	u  pzUR                  U	   n[!        U5      (       a  U
R#                  US9nO U
R                  XUS9nUR)                  U5        M[     O[+        T5      (       a  U R                  S:  a  [-        T5      m[/        T[0        5      (       a=  [3        U4S jU R4                  R6                   5       5      (       a  U R#                  US9$ U R                  5        VVs/ s H  u  pUR                  TX#S9PM     nnnODU R4                  R                  TX#S9nU R9                  UUR:                  S9nUR=                  U SS9$ U(       d  U R#                  S	S9$ [?        USSS9nU RA                  U5      nU R                  Ul        UR=                  U SS9n[C        [D        U5      $ ! [$         a  nU SU S34Ul        e S	nAff = fs  snnf )a  
Cast a pandas object to a specified dtype ``dtype``.

Parameters
----------
dtype : str, data type, Series or Mapping of column name -> data type
    Use a str, numpy.dtype, pandas.ExtensionDtype or Python type to
    cast entire pandas object to the same type. Alternatively, use a
    mapping, e.g. {col: dtype, ...}, where col is a column label and dtype is
    a numpy.dtype or Python type to cast one or more of the DataFrame's
    columns to column-specific types.
copy : bool, default True
    Return a copy when ``copy=True`` (be very careful setting
    ``copy=False`` as changes to values then may propagate to other
    pandas objects).

    .. note::
        The `copy` keyword will change behavior in pandas 3.0.
        `Copy-on-Write
        <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
        will be enabled by default, which means that all methods with a
        `copy` keyword will use a lazy copy mechanism to defer the copy and
        ignore the `copy` keyword. The `copy` keyword will be removed in a
        future version of pandas.

        You can already get the future behavior and improvements through
        enabling copy on write ``pd.options.mode.copy_on_write = True``
errors : {'raise', 'ignore'}, default 'raise'
    Control raising of exceptions on invalid data for provided dtype.

    - ``raise`` : allow exceptions to be raised
    - ``ignore`` : suppress exceptions. On error return original object.

Returns
-------
same type as caller

See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.

Notes
-----
.. versionchanged:: 2.0.0

    Using ``astype`` to convert from timezone-naive dtype to
    timezone-aware dtype will raise an exception.
    Use :meth:`Series.dt.tz_localize` instead.

Examples
--------
Create a DataFrame:

>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1    int64
col2    int64
dtype: object

Cast all columns to int32:

>>> df.astype('int32').dtypes
col1    int32
col2    int32
dtype: object

Cast col1 to int32 using a dictionary:

>>> df.astype({'col1': 'int32'}).dtypes
col1    int32
col2    int64
dtype: object

Create a series:

>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0    1
1    2
dtype: int32
>>> ser.astype('int64')
0    1
1    2
dtype: int64

Convert to categorical type:

>>> ser.astype('category')
0    1
1    2
dtype: category
Categories (2, int32): [1, 2]

Convert to ordered categorical type with custom ordering:

>>> from pandas.api.types import CategoricalDtype
>>> cat_dtype = CategoricalDtype(
...     categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0    1
1    2
dtype: category
Categories (2, int64): [2 < 1]

Create a series of dates:

>>> ser_date = pd.Series(pd.date_range('20200101', periods=3))
>>> ser_date
0   2020-01-01
1   2020-01-02
2   2020-01-03
dtype: datetime64[ns]
Fr   zFOnly the Series name can be used for the key in Series dtype mappings.r   rJ  r   zJOnly a column name can be used for the key in a dtype mappings argument. 'z' not found in columns.N)r  r   r   )r   r   r  z': Error while type casting for column 'r{  c              3  @   >#    U  H  oR                   T:H  v   M     g 7fr   r   )r]  rY  r   s     r   r^  !NDFrame.astype.<locals>.<genexpr>  s      9.>s		U".>r"  )r   r  r   r   r~  rl  )#r   ra   r,  r   r8  r  r   pandasr   r   r  r  r(  r1  r   iatro   r   r   r  r  rb   rh   r   rj   r  r   arraysr   r   r   r   r  r   r8   )r   r   r   r  new_typer   	dtype_sercol_nameresultsr7  r  cdtres_colex_serr  r  r  s    `                 r   r   NDFrame.astype?  s   r '))DyyA~u:>TYYe%;"<  !+{{86:: &uF3I%OO'"$:%<>  , "))$,,4e)TIG&/

&="?HmmA&99!hhDh1G"%**3&*"Q w' '> &e,,Q 'E%00S 9.2ii.>.>9 6 6 yydy++ KO**,JV

5t
;,  G yy''e$'NH,,XHMM,JC##D#:: 99$9'' ae4 ""6*$$T($;D&!!O & !d"I(STU# 	 s   ,J)"K
)
K3KKc                    U R                   R                  US9nU R                  5         U R                  X"R                  S9R                  U SS9$ )a  
Make a copy of this object's indices and data.

When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).

When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).

.. note::
    The ``deep=False`` behaviour as described above will change
    in pandas 3.0. `Copy-on-Write
    <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
    will be enabled by default, which means that the "shallow" copy
    is that is returned with ``deep=False`` will still avoid making
    an eager copy, but changes to the data of the original will *no*
    longer be reflected in the shallow copy (or vice versa). Instead,
    it makes use of a lazy (deferred) copy mechanism that will copy
    the data only when any changes to the original or shallow copy is
    made.

    You can already get the future behavior and improvements through
    enabling copy on write ``pd.options.mode.copy_on_write = True``

Parameters
----------
deep : bool, default True
    Make a deep copy, including a copy of the data and the indices.
    With ``deep=False`` neither the indices nor the data are copied.

Returns
-------
Series or DataFrame
    Object type matches caller.

Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).

While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.

Since pandas is not thread safe, see the
:ref:`gotchas <gotchas.thread-safety>` when copying in a threading
environment.

When ``copy_on_write`` in pandas config is set to ``True``, the
``copy_on_write`` config takes effect even when ``deep=False``.
This means that any changes to the copied data would make a new copy
of the data upon write (and vice versa). Changes made to either the
original or copied variable would not be reflected in the counterpart.
See :ref:`Copy_on_Write <copy_on_write>` for more information.

Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a    1
b    2
dtype: int64

>>> s_copy = s.copy()
>>> s_copy
a    1
b    2
dtype: int64

**Shallow copy versus default (deep) copy:**

>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)

Shallow copy shares data and index with original.

>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True

Deep copy has own copy of data and index.

>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False

Updates to the data shared by shallow copy and original is reflected
in both (NOTE: this will no longer be true for pandas >= 3.0);
deep copy remains unchanged.

>>> s.iloc[0] = 3
>>> shallow.iloc[1] = 4
>>> s
a    3
b    4
dtype: int64
>>> shallow
a    3
b    4
dtype: int64
>>> deep
a    1
b    2
dtype: int64

Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.

>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0    [10, 2]
1     [3, 4]
dtype: object
>>> deep
0    [10, 2]
1     [3, 4]
dtype: object

**Copy-on-Write is set to true**, the shallow copy is not modified
when the original data is changed:

>>> with pd.option_context("mode.copy_on_write", True):
...     s = pd.Series([1, 2], index=["a", "b"])
...     copy = s.copy(deep=False)
...     s.iloc[0] = 100
...     s
a    100
b      2
dtype: int64
>>> copy
a    1
b    2
dtype: int64
r   r   r   r~  )r   r   rw  r   r   r   )r   r   r   s      r   r   NDFrame.copy  sU    j yy~~4~( ))$YY)?LL M 
 	
r   c                     U R                  US9$ rs  r\  )r   r   s     r   __copy__NDFrame.__copy__  s    yydy##r   c                     U R                  SS9$ )zI
Parameters
----------
memo, default None
    Standard signature. Unused
Tr   r\  )r   memos     r   __deepcopy__NDFrame.__deepcopy__  s     yydy##r   c                    U R                   R                  US9nU R                  X"R                  S9nUR	                  U SS9$ )a  
Attempt to infer better dtypes for object columns.

Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.

Parameters
----------
copy : bool, default True
    Whether to make a copy for non-object or non-inferable columns
    or Series.

    .. note::
        The `copy` keyword will change behavior in pandas 3.0.
        `Copy-on-Write
        <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
        will be enabled by default, which means that all methods with a
        `copy` keyword will use a lazy copy mechanism to defer the copy and
        ignore the `copy` keyword. The `copy` keyword will be removed in a
        future version of pandas.

        You can already get the future behavior and improvements through
        enabling copy on write ``pd.options.mode.copy_on_write = True``

Returns
-------
same type as input object

See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
convert_dtypes : Convert argument to best possible dtype.

Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
   A
1  1
2  2
3  3

>>> df.dtypes
A    object
dtype: object

>>> df.infer_objects().dtypes
A    int64
dtype: object
r\  r   infer_objectsr~  )r   convertr   r   r   )r   r   r   r  s       r   r~  NDFrame.infer_objects  sI    r ))###.((||(D_==r   c           	         [        U5        U R                  R                  UUUUUUS9nU R                  XwR                  S9nUR                  U SS9$ )a  
Convert columns to the best possible dtypes using dtypes supporting ``pd.NA``.

Parameters
----------
infer_objects : bool, default True
    Whether object dtypes should be converted to the best possible types.
convert_string : bool, default True
    Whether object dtypes should be converted to ``StringDtype()``.
convert_integer : bool, default True
    Whether, if possible, conversion can be done to integer extension types.
convert_boolean : bool, defaults True
    Whether object dtypes should be converted to ``BooleanDtypes()``.
convert_floating : bool, defaults True
    Whether, if possible, conversion can be done to floating extension types.
    If `convert_integer` is also True, preference will be give to integer
    dtypes if the floats can be faithfully casted to integers.
dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
    Back-end data type applied to the resultant :class:`DataFrame`
    (still experimental). Behaviour is as follows:

    * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
      (default).
    * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
      DataFrame.

    .. versionadded:: 2.0

Returns
-------
Series or DataFrame
    Copy of input object with new dtype.

See Also
--------
infer_objects : Infer dtypes of objects.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.

Notes
-----
By default, ``convert_dtypes`` will attempt to convert a Series (or each
Series in a DataFrame) to dtypes that support ``pd.NA``. By using the options
``convert_string``, ``convert_integer``, ``convert_boolean`` and
``convert_floating``, it is possible to turn off individual conversions
to ``StringDtype``, the integer extension types, ``BooleanDtype``
or floating extension types, respectively.

For object-dtyped columns, if ``infer_objects`` is ``True``, use the inference
rules as during normal Series/DataFrame construction.  Then, if possible,
convert to ``StringDtype``, ``BooleanDtype`` or an appropriate integer
or floating extension type, otherwise leave as ``object``.

If the dtype is integer, convert to an appropriate integer extension type.

If the dtype is numeric, and consists of all integers, convert to an
appropriate integer extension type. Otherwise, convert to an
appropriate floating extension type.

In the future, as new dtypes are added that support ``pd.NA``, the results
of this method will change to support those new dtypes.

Examples
--------
>>> df = pd.DataFrame(
...     {
...         "a": pd.Series([1, 2, 3], dtype=np.dtype("int32")),
...         "b": pd.Series(["x", "y", "z"], dtype=np.dtype("O")),
...         "c": pd.Series([True, False, np.nan], dtype=np.dtype("O")),
...         "d": pd.Series(["h", "i", np.nan], dtype=np.dtype("O")),
...         "e": pd.Series([10, np.nan, 20], dtype=np.dtype("float")),
...         "f": pd.Series([np.nan, 100.5, 200], dtype=np.dtype("float")),
...     }
... )

Start with a DataFrame with default dtypes.

>>> df
   a  b      c    d     e      f
0  1  x   True    h  10.0    NaN
1  2  y  False    i   NaN  100.5
2  3  z    NaN  NaN  20.0  200.0

>>> df.dtypes
a      int32
b     object
c     object
d     object
e    float64
f    float64
dtype: object

Convert the DataFrame to use best possible dtypes.

>>> dfn = df.convert_dtypes()
>>> dfn
   a  b      c     d     e      f
0  1  x   True     h    10   <NA>
1  2  y  False     i  <NA>  100.5
2  3  z   <NA>  <NA>    20  200.0

>>> dfn.dtypes
a             Int32
b    string[python]
c           boolean
d    string[python]
e             Int64
f           Float64
dtype: object

Start with a Series of strings and missing data represented by ``np.nan``.

>>> s = pd.Series(["a", "b", np.nan])
>>> s
0      a
1      b
2    NaN
dtype: object

Obtain a Series with dtype ``StringDtype``.

>>> s.convert_dtypes()
0       a
1       b
2    <NA>
dtype: string
)r~  convert_stringconvert_integerconvert_booleanconvert_floatingdtype_backendr   convert_dtypesr~  )rV   r   r  r   r   r   )	r   r~  r  r  r  r  r  r   r  s	            r   r  NDFrame.convert_dtypes  sh    T 	M*))**')++-' + 
 ((||(D-=>>r   c                    U[         R                  La(  [        R                  " SU S3[        [        5       S9  U$ S nU$ )NThe 'downcast' keyword in z is deprecated and will be removed in a future version. Use res.infer_objects(copy=False) to infer non-object dtype, or pd.to_numeric with the 'downcast' keyword to downcast numeric results.r  )r   r  r  r	  r  rU   )r   downcastmethod_names      r   _deprecate_downcastNDFrame._deprecate_downcast  sI    3>>)MM,[M : 
 +-  Hr   r   r   r  
limit_arear  c          	        Uc  SnU R                  U5      n[        U5      nU R                  R                  (       d=  US:X  a7  U(       a
  [	        5       eU R
                  R                  XUS9R
                  nU$ U R                  R                  UU R                  U5      UUUUS9nU R                  XR                  S9nU(       a  U R                  U5      $ UR                  U SS9$ )Nr   r   )r  r  r  r  r   r  r  r   r  r   fillnar~  )r!  r   r   r  r   r=   _pad_or_backfillpad_or_backfillr   r   r   r  r   )	r   r  r   r   r  r  r  r  r   s	            r   r  NDFrame._pad_or_backfill  s     <D$$T*"6*yy((TQY )++VV,,z - a  M))++--d3! , 
 ++G,,+G''//&&tH&==r   )r  r   r   r  r  c                   g r   r  r   r   r  r   r   r  r  s          r   r  NDFrame.fillna  r  r   )r  r   r  r  c                   g r   r  r  s          r   r  r    r  r   c                   g r   r  r  s          r   r  r    r  r   r   )r   r   c               X   [        US5      nU(       a  [        (       dL  [        5       (       a=  [        R                  " U 5      [
        ::  a  [        R                  " [        [        SS9  O[        (       d  [        5       (       d~  U R                  5       (       ai  [        R                  " U 5      n[
        n[        U [        5      (       a  [        U 5      (       a  US-  nXx::  a  [        R                  " [        [        SS9  [!        X5      u  pUb8  [        R                  " [#        U 5      R$                   S3[        ['        5       S9  U[(        R*                  L n	U R-                  US5      nUc  SnU R/                  U5      nUc  U R1                  UUUUUS	9$ U R2                  S:X  a  [        U[4        [        45      (       a\  [7        U5      (       d  U(       a  gU R9                  SS
9$ SSKJn
  U
" U5      nUR?                  U R@                  SS9nURB                  nO3[E        U5      (       d  O"[G        S[#        U5      R$                   S35      eU RH                  RK                  XXFS9nGOz[        U[4        [        45      (       Ga  US:X  a  [M        S5      e[        5       (       a  U R9                  SS
9nOU(       a  U OU R9                  5       n[        U[4        5      nURO                  5        GH  u  pX;  a  M  U	(       a  [(        R*                  nOU(       d  UOURQ                  U5      nX   RK                  XUS9nU(       d  UX'   M^  [        U[        5      (       a7  URR                  X   RR                  :X  a  UURT                  SS2U4'   M  UX'   M  URV                  RY                  U5      n[        U[Z        5      (       a'  [\        R^                  " U R`                  S   5      U   nO[        U[\        Rb                  5      (       a.  URR                  Rd                  S:X  a  URg                  5       S   nOD[        U[\        Rb                  5      (       a  URR                  Rd                  S:X  d  [M        S5      e[i        U5       Hm  u  nnURj                  SS2U4   nU Rj                  SS2U4   nURR                  URR                  :X  a  UURj                  SS2U4'   M[  URm                  UU5        Mo     GM     U(       a  U Ro                  U5      $ U$ [E        U5      (       dQ  US:X  a0  U Rp                  RK                  XS9Rp                  nURH                  nOU RH                  RK                  XXFS9nOg[        U[r        5      (       a;  U R2                  S:X  a+  U Ru                  U Rw                  5       U5      RH                  nO[y        S[#        U5       35      eU R{                  XR|                  S9nU(       a  U Ro                  U5      $ UR                  U SS9$ )aw  
Fill NA/NaN values using the specified method.

Parameters
----------
value : scalar, dict, Series, or DataFrame
    Value to use to fill holes (e.g. 0), alternately a
    dict/Series/DataFrame of values specifying which value to use for
    each index (for a Series) or column (for a DataFrame).  Values not
    in the dict/Series/DataFrame will not be filled. This value cannot
    be a list.
method : {{'backfill', 'bfill', 'ffill', None}}, default None
    Method to use for filling holes in reindexed Series:

    * ffill: propagate last valid observation forward to next valid.
    * backfill / bfill: use next valid observation to fill gap.

    .. deprecated:: 2.1.0
        Use ffill or bfill instead.

axis : {axes_single_arg}
    Axis along which to fill missing values. For `Series`
    this parameter is unused and defaults to 0.
inplace : bool, default False
    If True, fill in-place. Note: this will modify any
    other views on this object (e.g., a no-copy slice for a column in a
    DataFrame).
limit : int, default None
    If method is specified, this is the maximum number of consecutive
    NaN values to forward/backward fill. In other words, if there is
    a gap with more than this number of consecutive NaNs, it will only
    be partially filled. If method is not specified, this is the
    maximum number of entries along the entire axis where NaNs will be
    filled. Must be greater than 0 if not None.
downcast : dict, default is None
    A dict of item->dtype of what to downcast if possible,
    or the string 'infer' which will try to downcast to an appropriate
    equal type (e.g. float64 to int64 if possible).

    .. deprecated:: 2.2.0

Returns
-------
{klass} or None
    Object with missing values filled or None if ``inplace=True``.

See Also
--------
ffill : Fill values by propagating the last valid observation to next valid.
bfill : Fill values by using the next valid observation to fill the gap.
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.

Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
...                    [3, 4, np.nan, 1],
...                    [np.nan, np.nan, np.nan, np.nan],
...                    [np.nan, 3, np.nan, 4]],
...                   columns=list("ABCD"))
>>> df
     A    B   C    D
0  NaN  2.0 NaN  0.0
1  3.0  4.0 NaN  1.0
2  NaN  NaN NaN  NaN
3  NaN  3.0 NaN  4.0

Replace all NaN elements with 0s.

>>> df.fillna(0)
     A    B    C    D
0  0.0  2.0  0.0  0.0
1  3.0  4.0  0.0  1.0
2  0.0  0.0  0.0  0.0
3  0.0  3.0  0.0  4.0

Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.

>>> values = {{"A": 0, "B": 1, "C": 2, "D": 3}}
>>> df.fillna(value=values)
     A    B    C    D
0  0.0  2.0  2.0  0.0
1  3.0  4.0  2.0  1.0
2  0.0  1.0  2.0  3.0
3  0.0  3.0  2.0  4.0

Only replace the first NaN element.

>>> df.fillna(value=values, limit=1)
     A    B    C    D
0  0.0  2.0  2.0  0.0
1  3.0  4.0  NaN  1.0
2  NaN  1.0  NaN  3.0
3  NaN  3.0  NaN  4.0

When filling using a DataFrame, replacement happens along
the same column names and same indices

>>> df2 = pd.DataFrame(np.zeros((4, 4)), columns=list("ABCE"))
>>> df.fillna(df2)
     A    B    C    D
0  0.0  2.0  0.0  0.0
1  3.0  4.0  0.0  1.0
2  0.0  0.0  0.0  NaN
3  0.0  3.0  0.0  4.0

Note that column D is not affected since it is not present in df2.
r   r+  r  r   Nzo.fillna with 'method' is deprecated and will raise in a future version. Use obj.ffill() or obj.bfill() instead.r  r   )r   r  r   r  r   rJ  Fr\  zF"value" parameter must be a scalar, dict or Series, but you passed a "")r   r  r   r  z9Currently only can fill with dict/Series column by column)r  r  br7  zVUnexpected get_loc result, please report a bug at https://github.com/pandas-dev/pandas)r   r  zinvalid fill value with a r   r~  )@rX   rG   r   sysgetrefcountrH   r  r	  rP   rL   rW  r   rl   rR   rQ   r  rY   r
  r   rU   r   r  r  r!  r  r,  r   r   r   rg  r   r  r  r  rc   r  r   r  r   r   r  r   rf  r(  r`  r  rh  rP  ra  ra  r   rb  r1  r  isetitemr  r=   rk   whererp   r   r   r   r   )r   r   r  r   r   r  r  ctr	ref_countwas_no_defaultr   r  r  is_dictrE  rF  
downcast_kres_klocsr7  rf  res_locr  s                          r   r  r    s8   z &gy94/11??4(I5MM6.#$ D+--1133ood+%	dI..=3F3FNI#MM>%#$ /u=MM:&&' (  +- "S^^3++Hh? <D$$T*=(( 
 " )   yyA~edI%677u::"#'#yydy33-"5ME!MM$**5MAE!MME%e,,# K0014   99++g ,  ED)#45519-$ 
 '((!YYDY1F%,T$))+F$Xt4!KKMDA %%(^^
 $+ % "*a # #I,,Qj,QE"$)	 &eY77${{fioo=38

1a4 0 -2	 $*>>#9#9!#<D)$66')yyA'?'E *4 < <TWAW'+||~a'8 *4 < <TWAW ':%K'" !"
 +4D/3*/**QT*:)-1c6):#*==FLL#@:AFKK3$7$*OOC$A +:g *v //77!M!%((19!VV]]]DFFF%{{H#yy//#'  0  H E<00TYY!^::djjlE:?? #=d5k]!KLL++H==+I''//&&tH&==r   c                   g r   r  r   r   r   r  r  r  s         r   ffillNDFrame.ffill       	r   )r   r  r  r  c                   g r   r  r  s         r   r  r  $  r  r   c                   g r   r  r  s         r   r  r  0  r  r   c          	     R   U R                  US5      n[        US5      nU(       a  [        (       dL  [        5       (       a=  [        R
                  " U 5      [        ::  a  [        R                  " [        [        SS9  O[        (       d  [        5       (       d~  U R                  5       (       ai  [        R
                  " U 5      n[        n[        U [        5      (       a  [        U 5      (       a  US-  nXg::  a  [        R                  " [        [         SS9  U R#                  SUUUUUS9$ )aG  
Fill NA/NaN values by propagating the last valid observation to next valid.

Parameters
----------
axis : {axes_single_arg}
    Axis along which to fill missing values. For `Series`
    this parameter is unused and defaults to 0.
inplace : bool, default False
    If True, fill in-place. Note: this will modify any
    other views on this object (e.g., a no-copy slice for a column in a
    DataFrame).
limit : int, default None
    If method is specified, this is the maximum number of consecutive
    NaN values to forward/backward fill. In other words, if there is
    a gap with more than this number of consecutive NaNs, it will only
    be partially filled. If method is not specified, this is the
    maximum number of entries along the entire axis where NaNs will be
    filled. Must be greater than 0 if not None.
limit_area : {{`None`, 'inside', 'outside'}}, default None
    If limit is specified, consecutive NaNs will be filled with this
    restriction.

    * ``None``: No fill restriction.
    * 'inside': Only fill NaNs surrounded by valid values
      (interpolate).
    * 'outside': Only fill NaNs outside valid values (extrapolate).

    .. versionadded:: 2.2.0

downcast : dict, default is None
    A dict of item->dtype of what to downcast if possible,
    or the string 'infer' which will try to downcast to an appropriate
    equal type (e.g. float64 to int64 if possible).

    .. deprecated:: 2.2.0

Returns
-------
{klass} or None
    Object with missing values filled or None if ``inplace=True``.

Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
...                    [3, 4, np.nan, 1],
...                    [np.nan, np.nan, np.nan, np.nan],
...                    [np.nan, 3, np.nan, 4]],
...                   columns=list("ABCD"))
>>> df
     A    B   C    D
0  NaN  2.0 NaN  0.0
1  3.0  4.0 NaN  1.0
2  NaN  NaN NaN  NaN
3  NaN  3.0 NaN  4.0

>>> df.ffill()
     A    B   C    D
0  NaN  2.0 NaN  0.0
1  3.0  4.0 NaN  1.0
2  3.0  4.0 NaN  1.0
3  3.0  3.0 NaN  4.0

>>> ser = pd.Series([1, np.nan, 2, 3])
>>> ser.ffill()
0   1.0
1   1.0
2   2.0
3   3.0
dtype: float64
r  r   r+  r  r   r  r  rX   rG   r   r  r  rH   r  r	  rP   rL   rW  r   rl   rR   rQ   r  r  r   r   r   r  r  r  r  r  s           r   r  r  <  s    j ++Hg>%gy94/11??4(I5MM6.#$ D+--1133ood+%	dI..=3F3FNI#MM>%#$ $$!  % 

 
	
r   r   r   r  r  c               f    [         R                  " S[        [        5       S9  U R	                  XX4S9$ )a]  
Fill NA/NaN values by propagating the last valid observation to next valid.

.. deprecated:: 2.0

    {klass}.pad is deprecated. Use {klass}.ffill instead.

Returns
-------
{klass} or None
    Object with missing values filled or None if ``inplace=True``.

Examples
--------
Please see examples for :meth:`DataFrame.ffill` or :meth:`Series.ffill`.
zPDataFrame.pad/Series.pad is deprecated. Use DataFrame.ffill/Series.ffill insteadr  r  )r  r	  r  rU   r  r   r   r   r  r  s        r   padNDFrame.pad  3    4 	3')		
 zztEzUUr   c                   g r   r  r  s         r   bfillNDFrame.bfill  r  r   )r   r  r  c                   g r   r  r  s        r   r  r    s     	r   c                   g r   r  r  s         r   r  r    r  r   c          	     R   U R                  US5      n[        US5      nU(       a  [        (       dL  [        5       (       a=  [        R
                  " U 5      [        ::  a  [        R                  " [        [        SS9  O[        (       d  [        5       (       d~  U R                  5       (       ai  [        R
                  " U 5      n[        n[        U [        5      (       a  [        U 5      (       a  US-  nXg::  a  [        R                  " [        [         SS9  U R#                  SUUUUUS9$ )a  
Fill NA/NaN values by using the next valid observation to fill the gap.

Parameters
----------
axis : {axes_single_arg}
    Axis along which to fill missing values. For `Series`
    this parameter is unused and defaults to 0.
inplace : bool, default False
    If True, fill in-place. Note: this will modify any
    other views on this object (e.g., a no-copy slice for a column in a
    DataFrame).
limit : int, default None
    If method is specified, this is the maximum number of consecutive
    NaN values to forward/backward fill. In other words, if there is
    a gap with more than this number of consecutive NaNs, it will only
    be partially filled. If method is not specified, this is the
    maximum number of entries along the entire axis where NaNs will be
    filled. Must be greater than 0 if not None.
limit_area : {{`None`, 'inside', 'outside'}}, default None
    If limit is specified, consecutive NaNs will be filled with this
    restriction.

    * ``None``: No fill restriction.
    * 'inside': Only fill NaNs surrounded by valid values
      (interpolate).
    * 'outside': Only fill NaNs outside valid values (extrapolate).

    .. versionadded:: 2.2.0

downcast : dict, default is None
    A dict of item->dtype of what to downcast if possible,
    or the string 'infer' which will try to downcast to an appropriate
    equal type (e.g. float64 to int64 if possible).

    .. deprecated:: 2.2.0

Returns
-------
{klass} or None
    Object with missing values filled or None if ``inplace=True``.

Examples
--------
For Series:

>>> s = pd.Series([1, None, None, 2])
>>> s.bfill()
0    1.0
1    2.0
2    2.0
3    2.0
dtype: float64
>>> s.bfill(limit=1)
0    1.0
1    NaN
2    2.0
3    2.0
dtype: float64

With DataFrame:

>>> df = pd.DataFrame({{'A': [1, None, None, 4], 'B': [None, 5, None, 7]}})
>>> df
      A     B
0   1.0   NaN
1   NaN   5.0
2   NaN   NaN
3   4.0   7.0
>>> df.bfill()
      A     B
0   1.0   5.0
1   4.0   5.0
2   4.0   7.0
3   4.0   7.0
>>> df.bfill(limit=1)
      A     B
0   1.0   5.0
1   NaN   5.0
2   4.0   7.0
3   4.0   7.0
r  r   r+  r  r   r  r  r  s           r   r  r    s    @ ++Hg>%gy94/11??4(I5MM6.#$ D+--1133ood+%	dI..=3F3FNI#MM>%#$ $$!  % 

 
	
r   c               f    [         R                  " S[        [        5       S9  U R	                  XX4S9$ )a^  
Fill NA/NaN values by using the next valid observation to fill the gap.

.. deprecated:: 2.0

    {klass}.backfill is deprecated. Use {klass}.bfill instead.

Returns
-------
{klass} or None
    Object with missing values filled or None if ``inplace=True``.

Examples
--------
Please see examples for :meth:`DataFrame.bfill` or :meth:`Series.bfill`.
zZDataFrame.backfill/Series.backfill is deprecated. Use DataFrame.bfill/Series.bfill insteadr  r  )r  r	  r  rU   r  r  s        r   backfillNDFrame.backfill  r  r   )r   r  r  r  c                   g r   r  r   
to_replacer   r   r  r  r  s          r   r  NDFrame.replace  r  r   )r  r  r  c                   g r   r  r  s          r   r  r    r  r   c                   g r   r  r  s          r   r  r    r  r   )r   r   c               <   U[         R                  La:  [        R                  " S[	        U 5      R
                   S3[        [        5       S9  O<Ub9  [        R                  " S[	        U 5      R
                   S3[        [        5       S9  U[         R                  L a`  U[         R                  L aM  [        U5      (       d=  USL a8  [        R                  " [	        U 5      R
                   S3[        [        5       S9  [        U5      (       dJ  [        U5      (       d:  [        U5      (       d*  [        S[        [	        U5      R
                  5       35      e[        US5      nU(       a  [        (       dL  [!        5       (       a=  ["        R$                  " U 5      [&        ::  a  [        R                  " [(        [*        S	S9  O[        (       d  [!        5       (       d~  U R-                  5       (       ai  ["        R$                  " U 5      n[&        n[/        U [0        5      (       a  [3        U 5      (       a  US
-  nXx::  a  [        R                  " [4        [        S	S9  [7        U5      (       d  Ub  [9        S5      eU[         R                  L d  U[         R                  LGa  U[         R                  L a  Sn[        U5      (       d  [        U5      (       d  U/n[/        U[:        [<        45      (       aS  [/        U [>        5      (       a,  SSK J!n	  U RE                  U	RF                  XX44S9n
U(       a  g U
$ U RG                  XX45      $ [        U5      (       d  [        U5      (       d  [        S5      eUnSn[=        URI                  5       5      nU(       a  [K        U6 u  pO/ / pU Vs/ s H  n[        U5      PM     nn[M        U5      (       aw  [O        U5      (       d  [        S5      e0 n0 nU HN  u  nn[=        [K        URI                  5       6 5      =(       d    / / 4u  p[=        U5      UU'   [=        U5      UU'   MP     UUp!OXp!U RQ                  XX4US9$ U RR                  (       d  U(       a  g U RU                  S S9$ [        U5      (       a  [        U5      (       aY  URW                  5        Vs0 s H,  nUURW                  5       ;   d  M  UU ;   d  M!  UUU   UU   4_M.     nnU RY                  UX55      $ [        U5      (       dU  U RZ                  S
:X  a  [9        S5      eURI                  5        VVs0 s H  u  nnUUU4_M     nnnU RY                  UX55      $ [        S5      e[        U5      (       ay  [        U5      (       d  U/[]        U5      -  n[]        U5      []        U5      :w  a$  [9        S[]        U5       S[]        U5       S35      eU R^                  Ra                  UUUUS9nGOVUck  [        U5      (       dJ  [        U5      (       d:  [        U5      (       d*  [        S[        [	        U5      R
                  5       35      eU RQ                  XRX4SS9$ [        U5      (       aU  U RZ                  S
:X  a  [9        S5      eURI                  5        VVs0 s H  u  nnUUU4_M     nnnU RY                  UX55      $ [        U5      (       dI  [c        XQ5      nU(       a  U R^                  Re                  UUUS9nOEU R^                  RQ                  XUS9nO*[        S[        [	        U5      R
                  5       35      eU Rg                  UURh                  S9n
U(       a  U Rk                  U
5      $ U
Rm                  U S S!9$ s  snf s  snf s  snnf s  snnf )"NzThe 'method' keyword in z?.replace is deprecated and will be removed in a future version.r  zThe 'limit' keyword in Fz.replace without 'value' and with non-dict-like 'to_replace' is deprecated and will raise in a future version. Explicitly specify the new values instead.zYExpecting 'to_replace' to be either a scalar, array-like, dict or None, got invalid type r   r+  r   z4'to_replace' must be 'None' if 'regex' is not a boolr  r   rJ  )r  zfIf "to_replace" and "value" are both None and "to_replace" is not a list, then regex must be a mappingTzSIf a nested mapping is passed, all values of the top level mapping must be mappings)r   r  r  r   zASeries.replace cannot use dict-like to_replace and non-None valuez.value argument must be scalar, dict, or Seriesz2Replacement lists must match in length. Expecting z got r#  )src_list	dest_listr   r  z|'regex' must be a string or a compiled regular expression or a list or dict of strings or regular expressions, you passed a z<Series.replace cannot use dict-value and non-None to_replace)r  r   r   zInvalid "to_replace" type: r   r  r~  )7r   r  r  r	  r
  r   r  rU   ra   rg   rf   rc   r  reprrX   rG   r   r  r  rH   rP   rL   rW  r   rl   rR   rQ   r_   r   r`  ri  rk   rg  r   r  _replace_singler   rL  r  r  r  rj  r   r7  _replace_columnwiser,  r   r   replace_listrx   replace_regexr   r   r  r   )r   r  r   r   r  r  r  r  r  r   r  r   r7  r   rF  are_mappingsto_rep_dict
value_dictrE  r  r  to_repr  vals                           r   r  r    sH     'MM*4:+>+>*? @F F+- MM)$t**=*=)> ?F F+- S^^##..( ,, MM:&&' (= = +- j!!
++J''2Z(11235  &gy94/11??4(I5MM6.#$ D+--1133ood+%	dI..=3F3F NI#MM>%#$ u~~*"8STTCNN"fCNN&B
 '  
++L4G4G(\
*udm44dL11-!ZZ..('A ( F #!M++JOO
++#E**#2 
 #
))+,E"E{f !#Bf5;<VLOVL<<  <((#D 
 !
!DAq $(QWWY#8 $=LD
 &*$ZKN$(LJqM " %0E$(E<<7u    
 99yydy++J''&&
 $.??#4#4C%**,. ;36$; ;jouSz::#4  
  33GWLL &e,,yyA~(1 
 AK@P@P@R@Rffe_,@R    33GWLL#$TUUj))#E** #Gc*o5E z?c%j0$%%(_$5U3u:,aI   9911'##	 2  #$U++#E**#E**#((,T%[-A-A(B'CE 
 ||'d $  
  &&yyA~(2  GLkkmTm(#ssZ$55mGT33GWLL%e,,,U?E#'99#:#:'1"'$+ $; $ $(99#4#4'1 $5 $ $5d4
;K;T;T6U5VW  ++H8==+I''//&&tI&>>C =Rb Us$   %^0^^^5^^)r   r  r   limit_directionr  r  c                   g r   r  	r   r  r   r  r   r  r  r  r  s	            r   interpolateNDFrame.interpolate       	r   )r   r  r  r  r  c                   g r   r  r  s	            r   r  r    r  r   c                   g r   r  r  s	            r   r  r    r  r   c               V	   U[         R                  La:  [        R                  " S[	        U 5      R
                   S3[        [        5       S9  OSnUb  US:w  a  [        S5      e[        US5      nU(       a  [        (       dL  [        5       (       a=  [        R                  " U 5      [        ::  a  [        R                  " [        [         SS9  O[        (       d  [        5       (       d~  U R#                  5       (       ai  [        R                  " U 5      n	[        n
[%        U [&        5      (       a  [)        U 5      (       a  U
S	-  n
X::  a  [        R                  " [*        [        SS9  U R-                  U5      nU R.                  (       a  U(       a  gU R1                  5       $ [%        U[2        5      (       d  [        S
5      e/ SQnUR5                  5       U;   a?  [        R                  " [	        U 5      R
                   SU S3[        [        5       S9  U SpOUS	:X  a  U R6                  S4OU S4u  p[8        R:                  " UR<                  [>        :H  5      (       at  UR@                  S:X  a,  [8        RB                  " UR<                  [>        :H  5      (       d8  [        R                  " [	        U 5      R
                   S3[        [        5       S9  X;   a*  SU;   a$  [        S[	        U 5      R
                   SU 35      e[%        URD                  [F        5      (       a  US:w  a  [        S5      e[H        RJ                  " XQ5      nUR@                  S:X  a7  [8        RB                  " UR<                  [>        :H  5      (       a  [M        S5      eUR5                  5       U;   ar  U RN                  RP                  (       d)  US	:X  a#  U(       a
  [S        5       eU R6                  S	U-
  SpnURN                  RU                  UU RW                  U5      UUUUS9nOD[H        RX                  " XRD                  5      nURN                  RZ                  " SUUUUUUUS.UD6nU R]                  XR^                  S9nU(       a  UR6                  nU(       a  U Ra                  U5      $ URc                  U SS9$ )a+  
Fill NaN values using an interpolation method.

Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.

Parameters
----------
method : str, default 'linear'
    Interpolation technique to use. One of:

    * 'linear': Ignore the index and treat the values as equally
      spaced. This is the only method supported on MultiIndexes.
    * 'time': Works on daily and higher resolution data to interpolate
      given length of interval.
    * 'index', 'values': use the actual numerical values of the index.
    * 'pad': Fill in NaNs using existing values.
    * 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
      'barycentric', 'polynomial': Passed to
      `scipy.interpolate.interp1d`, whereas 'spline' is passed to
      `scipy.interpolate.UnivariateSpline`. These methods use the numerical
      values of the index.  Both 'polynomial' and 'spline' require that
      you also specify an `order` (int), e.g.
      ``df.interpolate(method='polynomial', order=5)``. Note that,
      `slinear` method in Pandas refers to the Scipy first order `spline`
      instead of Pandas first order `spline`.
    * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima',
      'cubicspline': Wrappers around the SciPy interpolation methods of
      similar names. See `Notes`.
    * 'from_derivatives': Refers to
      `scipy.interpolate.BPoly.from_derivatives`.

axis : {{0 or 'index', 1 or 'columns', None}}, default None
    Axis to interpolate along. For `Series` this parameter is unused
    and defaults to 0.
limit : int, optional
    Maximum number of consecutive NaNs to fill. Must be greater than
    0.
inplace : bool, default False
    Update the data in place if possible.
limit_direction : {{'forward', 'backward', 'both'}}, Optional
    Consecutive NaNs will be filled in this direction.

    If limit is specified:
        * If 'method' is 'pad' or 'ffill', 'limit_direction' must be 'forward'.
        * If 'method' is 'backfill' or 'bfill', 'limit_direction' must be
          'backwards'.

    If 'limit' is not specified:
        * If 'method' is 'backfill' or 'bfill', the default is 'backward'
        * else the default is 'forward'

    raises ValueError if `limit_direction` is 'forward' or 'both' and
        method is 'backfill' or 'bfill'.
    raises ValueError if `limit_direction` is 'backward' or 'both' and
        method is 'pad' or 'ffill'.

limit_area : {{`None`, 'inside', 'outside'}}, default None
    If limit is specified, consecutive NaNs will be filled with this
    restriction.

    * ``None``: No fill restriction.
    * 'inside': Only fill NaNs surrounded by valid values
      (interpolate).
    * 'outside': Only fill NaNs outside valid values (extrapolate).

downcast : optional, 'infer' or None, defaults to None
    Downcast dtypes if possible.

    .. deprecated:: 2.1.0

``**kwargs`` : optional
    Keyword arguments to pass on to the interpolating function.

Returns
-------
Series or DataFrame or None
    Returns the same object type as the caller, interpolated at
    some or all ``NaN`` values or None if ``inplace=True``.

See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
    (Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
    Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
    interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
    interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.

Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<https://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__.

Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.

>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0    0.0
1    1.0
2    NaN
3    3.0
dtype: float64
>>> s.interpolate()
0    0.0
1    1.0
2    2.0
3    3.0
dtype: float64

Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).

>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0    0.000000
1    2.000000
2    4.666667
3    8.000000
dtype: float64

Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.

Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.

>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
...                    (np.nan, 2.0, np.nan, np.nan),
...                    (2.0, 3.0, np.nan, 9.0),
...                    (np.nan, 4.0, -4.0, 16.0)],
...                   columns=list('abcd'))
>>> df
     a    b    c     d
0  0.0  NaN -1.0   1.0
1  NaN  2.0  NaN   NaN
2  2.0  3.0  NaN   9.0
3  NaN  4.0 -4.0  16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
     a    b    c     d
0  0.0  NaN -1.0   1.0
1  1.0  2.0 -2.0   5.0
2  2.0  3.0 -3.0   9.0
3  2.0  4.0 -4.0  16.0

Using polynomial interpolation.

>>> df['d'].interpolate(method='polynomial', order=2)
0     1.0
1     4.0
2     9.0
3    16.0
Name: d, dtype: float64
r  z.interpolate is deprecated and will be removed in a future version. Call result.infer_objects(copy=False) on the result instead.r  Nr  z'downcast must be either None or 'infer'r   r+  r   z&'method' should be a string, not None.)r  r  r  r  z.interpolate with method=zZ is deprecated and will raise in a future version. Use obj.ffill() or obj.bfill() instead.FTz.interpolate with object dtype is deprecated and will raise in a future version. Call obj.infer_objects(copy=False) before interpolating instead.r  z('fill_value' is not a valid keyword for z.interpolate with method from linearz@Only `method=linear` interpolation is supported on MultiIndexes.zvCannot interpolate with all object-dtype columns in the DataFrame. Try setting at least one column to a numeric dtype.r  )r  r  r  r  r  r   r  r   r  r~  r  )2r   r  r  r	  r
  r   r  rU   r   rX   rG   r   r  r  rH   rP   rL   rW  r   rl   rR   rQ   r!  rR  r   r   lowerr=   rh  r  rN  r   r,  r  r  r   ru   infer_limit_directionr  r   r  r   r  r   get_interp_indexr  r   r   r  r   )r   r  r   r  r   r  r  r  r  r  r  fillna_methodsr   should_transposer  r  r  s                    r   r  r     s    l 3>>)MM,T$Z-@-@,A BO O +- HH$7FGG%gy94/11??4(I5MM6.#$ D+--1133ood+%	dI..=3F3FNI#MM>%#$ $$T*::99;&#&&EFF><<>^+MM:&&''@ I: : +- %)%!6:aiTVVTNdE]!CvvcjjF*++A"&&v1E*F*FMM:../ 0V V &#3#5 #(>::&&''E!"$  cii,,81CR  "77P88q=RVVCJJ&$899-  <<>^+ 99,,-//.2ffa$h+xx//11$7%! 0 H ,,VYY?Exx++ 	 /%!	 	H ++H==+IXXF''//&&tM&BBr   c                   [        U[        5      (       a  [        U5      nU R                  R                  (       d  [        S5      e[        U [        5      nU(       a  Ub  [        S5      eO"Uc  U R                  n[        U5      (       d  U/n[        U5      nU(       Gd  U R                  S   n[        U R                  [        5      (       a  [        XR                  R                  S9nX:  a@  U(       d)  U R                  U R                  U[        R                  S9$ [        R                  $ U(       al  U R                  R!                  USS9nUS:  a  US-  nU R"                  nUS:  a1  [%        Xv   5      (       a  US-  nUS:  a  [%        Xv   5      (       a  M  Xv   $ [        U[&        5      (       d  U(       a  ['        U5      O['        U/5      nU(       a  U R%                  5       OX   R%                  5       R)                  SS	9nUR+                  5       (       a  U(       a4  [-        S
U 5      n U R/                  [        R                  XR0                  S9$ U(       a4  [-        SU 5      n U R/                  [        R                  XR                  S9$ [-        SU 5      n U R                  [        R                  U R                  US   S9$ U R                  R3                  XR"                  ) 5      n	U	S:H  n
U R5                  U	5      nXl        U
R)                  5       (       a  [        R                  UR6                  U
'   U(       a  U$ UR8                  S   $ )a
  
Return the last row(s) without any NaNs before `where`.

The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)

If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame

Parameters
----------
where : date or array-like of dates
    Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
    For DataFrame, if not `None`, only use these columns to
    check for NaNs.

Returns
-------
scalar, Series, or DataFrame

    The return can be:

    * scalar : when `self` is a Series and `where` is a scalar
    * Series: when `self` is a Series and `where` is an array-like,
      or when `self` is a DataFrame and `where` is a scalar
    * DataFrame : when `self` is a DataFrame and `where` is an
      array-like

See Also
--------
merge_asof : Perform an asof merge. Similar to left join.

Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.

Examples
--------
A Series and a scalar `where`.

>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10    1.0
20    2.0
30    NaN
40    4.0
dtype: float64

>>> s.asof(20)
2.0

For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.

>>> s.asof([5, 20])
5     NaN
20    2.0
dtype: float64

Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.

>>> s.asof(30)
2.0

Take all columns into consideration

>>> df = pd.DataFrame({'a': [10., 20., 30., 40., 50.],
...                    'b': [None, None, None, None, 500]},
...                   index=pd.DatetimeIndex(['2018-02-27 09:01:00',
...                                           '2018-02-27 09:02:00',
...                                           '2018-02-27 09:03:00',
...                                           '2018-02-27 09:04:00',
...                                           '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
...                           '2018-02-27 09:04:30']))
                      a   b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN

Take a single column into consideration

>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
...                           '2018-02-27 09:04:30']),
...         subset=['a'])
                        a   b
2018-02-27 09:03:30  30.0 NaN
2018-02-27 09:04:30  40.0 NaN
zasof requires a sorted indexzsubset is not valid for Seriesr   )freq)r  r8  r   rightsider   r   r   )r  r8  r   r  r  )r   r   r   r  is_monotonic_increasingr   rl   r(  rc   r   r   r  ra  rh  float64nansearchsortedr  ro   r~   r  r  r   r  r8  	asof_locsrL  rf  r  )r   r  r  	is_seriesis_listrQ  rf  r   nullsr  r  r   s               r   asofNDFrame.asofI!  s   ~ eS!!e$Ezz11;<<tY/	! !ABB " ~'' u%JJqME$**k22u::??;} 33"llbjj 4   vv jj--e'-B71HCAg$v{"3"31HC Ag$v{"3"3{"%''$+E%LwE(		dl.?.?.A.E.E1.E.M99;;Hd+((u99(MMK.((ull(SSK.//FF$,,U1X 0   zz##E]]+;< rzyy
88::  VVDHHTNt1DIIbM1r   c                4    [        U 5      R                  U SS9$ )a  
Detect missing values.

Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).

Returns
-------
{klass}
    Mask of bool values for each element in {klass} that
    indicates whether an element is an NA value.

See Also
--------
{klass}.isnull : Alias of isna.
{klass}.notna : Boolean inverse of isna.
{klass}.dropna : Omit axes labels with missing values.
isna : Top-level isna.

Examples
--------
Show which entries in a DataFrame are NA.

>>> df = pd.DataFrame(dict(age=[5, 6, np.nan],
...                        born=[pd.NaT, pd.Timestamp('1939-05-27'),
...                              pd.Timestamp('1940-04-25')],
...                        name=['Alfred', 'Batman', ''],
...                        toy=[None, 'Batmobile', 'Joker']))
>>> df
   age       born    name        toy
0  5.0        NaT  Alfred       None
1  6.0 1939-05-27  Batman  Batmobile
2  NaN 1940-04-25              Joker

>>> df.isna()
     age   born   name    toy
0  False   True  False   True
1  False  False  False  False
2   True  False  False  False

Show which entries in a Series are NA.

>>> ser = pd.Series([5, 6, np.nan])
>>> ser
0    5.0
1    6.0
2    NaN
dtype: float64

>>> ser.isna()
0    False
1    False
2     True
dtype: bool
ro   r~  ro   r   r   s    r   ro   NDFrame.isna!  s    z Dz&&tF&;;r   c                4    [        U 5      R                  U SS9$ )Nisnullr~  r  r   s    r   r  NDFrame.isnull4"  s    Dz&&tH&==r   c                4    [        U 5      R                  U SS9$ )a  
Detect existing (non-missing) values.

Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.

Returns
-------
{klass}
    Mask of bool values for each element in {klass} that
    indicates whether an element is not an NA value.

See Also
--------
{klass}.notnull : Alias of notna.
{klass}.isna : Boolean inverse of notna.
{klass}.dropna : Omit axes labels with missing values.
notna : Top-level notna.

Examples
--------
Show which entries in a DataFrame are not NA.

>>> df = pd.DataFrame(dict(age=[5, 6, np.nan],
...                        born=[pd.NaT, pd.Timestamp('1939-05-27'),
...                              pd.Timestamp('1940-04-25')],
...                        name=['Alfred', 'Batman', ''],
...                        toy=[None, 'Batmobile', 'Joker']))
>>> df
   age       born    name        toy
0  5.0        NaT  Alfred       None
1  6.0 1939-05-27  Batman  Batmobile
2  NaN 1940-04-25              Joker

>>> df.notna()
     age   born  name    toy
0   True  False  True  False
1   True   True  True   True
2  False   True  True   True

Show which entries in a Series are not NA.

>>> ser = pd.Series([5, 6, np.nan])
>>> ser
0    5.0
1    6.0
2    NaN
dtype: float64

>>> ser.notna()
0     True
1     True
2    False
dtype: bool
rp   r~  rp   r   r   s    r   rp   NDFrame.notna8"  s    z T{''W'==r   c                4    [        U 5      R                  U SS9$ )Nnotnullr~  r  r   s    r   r  NDFrame.notnullw"  s    T{''Y'??r   c                Z   Ub$  [         R                  " [        U5      5      (       d'  Ub/  [         R                  " [        U5      5      (       a  [        S5      eU nU R                  5       nUb  XPU:  -  nUR	                  XaUS9nUb"  XPU:*  -  nU(       a  U OUnUR	                  XbUS9nU$ )Nz*Cannot use an NA value as a clip thresholdr   )rh  r  ro   r   r  )r   r  upperr   r  r  conds          r   _clip_with_scalarNDFrame._clip_with_scalar{"  s    "&&e"5"5"&&e"5"5IJJyy{5=)D\\W " F 5=)D$T&F\\W " F r   c                   Ub  U R                  U5      n[        U5      (       aA  [        U5      (       a1  UR                  S:X  a  U R	                  S XS9$ U R	                  US US9$ [        U[        5      (       dR  [        U5      (       aB  [        U [        5      (       a  U R                  XR                  S9nOU R                  XS S9S   n[        U5      (       aC  UR                  S:X  a  [        R                  O[        R                  * nUR                  U5      nOUnU" XcS9[        U 5      -  nU R                  XqX4S9$ )Nler  )r  )flexr   r   r4  )r!  rg   rd   r   r  r   rl   rc   r  r  _align_for_oprh  infr  ro   r  )r   	thresholdr  r   r   r  threshold_infr  s           r   _clip_with_one_boundNDFrame._clip_with_one_bound"  s&   ((.D YIi$8$8$&--dI-OO)))T7)KK
 9i00l96M6M$	** --izz-J	 ..yT.J1M	 	""#)??d#:J%,,Z8M%M1DJ> zz&$zHHr   r4  c                   g r   r  r   r  r  r   r   r  s         r   clipNDFrame.clip"  r  r   r   c                   g r   r  r  s         r   r  r  "  r  r   c                   g r   r  r  s         r   r  r  "  r  r   c               (   [        US5      nU(       a  [        (       dL  [        5       (       a=  [        R                  " U 5      [
        ::  a  [        R                  " [        [        SS9  O[        (       d  [        5       (       d  U R                  5       (       aj  [        R                  " U 5      n[
        n[        U [        5      (       a  [        U S5      (       a  US-  nXg::  a  [        R                  " [        [        SS9  [         R"                  " USU5      nUb  U R%                  U5      n['        U5      n[)        U5      (       d  [*        R,                  " U5      (       a  SnO[*        R.                  " U5      (       a  Sn['        U5      n	[)        U5      (       d  [*        R,                  " U	5      (       a  SnO[*        R.                  " U	5      (       a  SnUb8  Ub5  [1        U5      (       a%  [1        U5      (       a  [3        X5      [5        X5      p!Ub  [7        U5      (       a#  Ub  [7        U5      (       a  U R9                  XUS9$ U n
Ub  U
R;                  XR<                  X4S	9n
Ub#  U(       a  U n
U
R;                  X R>                  X4S	9n
U
$ )
a	  
Trim values at input threshold(s).

Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.

Parameters
----------
lower : float or array-like, default None
    Minimum threshold value. All values below this
    threshold will be set to it. A missing
    threshold (e.g `NA`) will not clip the value.
upper : float or array-like, default None
    Maximum threshold value. All values above this
    threshold will be set to it. A missing
    threshold (e.g `NA`) will not clip the value.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
    Align object with lower and upper along the given axis.
    For `Series` this parameter is unused and defaults to `None`.
inplace : bool, default False
    Whether to perform the operation in place on the data.
*args, **kwargs
    Additional keywords have no effect but might be accepted
    for compatibility with numpy.

Returns
-------
Series or DataFrame or None
    Same type as calling object with the values outside the
    clip boundaries replaced or None if ``inplace=True``.

See Also
--------
Series.clip : Trim values at input threshold in series.
DataFrame.clip : Trim values at input threshold in dataframe.
numpy.clip : Clip (limit) the values in an array.

Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
   col_0  col_1
0      9     -2
1     -3     -7
2      0      6
3     -1      8
4      5     -5

Clips per column using lower and upper thresholds:

>>> df.clip(-4, 6)
   col_0  col_1
0      6     -2
1     -3     -4
2      0      6
3     -1      6
4      5     -4

Clips using specific lower and upper thresholds per column:

>>> df.clip([-2, -1], [4, 5])
    col_0  col_1
0      4     -1
1     -2     -1
2      0      5
3     -1      5
4      4     -1

Clips using specific lower and upper thresholds per column element:

>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0    2
1   -4
2   -1
3    6
4    3
dtype: int64

>>> df.clip(t, t + 4, axis=0)
   col_0  col_1
0      6      2
1     -3     -4
2      0      3
3      6      8
4      5      3

Clips using specific lower threshold per column element, with missing values:

>>> t = pd.Series([2, -4, np.nan, 6, 3])
>>> t
0    2.0
1   -4.0
2    NaN
3    6.0
4    3.0
dtype: float64

>>> df.clip(t, axis=0)
col_0  col_1
0      9      2
1     -3     -4
2      0      6
3      6      8
4      5      3
r   r+  r  r   r   r  Nr  )r  r   r   ) rX   rG   r   r  r  rH   r  r	  rP   rL   rW  r   rl   hasattrrQ   r  rM  validate_clip_with_axisr!  ro   rc   rh  r  r  rg   minmaxrd   r  r  ger  )r   r  r  r   r   r  r  r  
isna_lower
isna_upperr  s              r   r  r  "  s(   l &gy94/11??4(I5MM6.#$ D+--1133ood+%	dI..743K3KNI#MM>%#$ ))$F;((.D %[
E""vvj!!VVJE%[
E""vvj!!VVJE !%  %  u,c%.?5 MYu--EMYuEUEU))%)HH00ggD 1 F 00ggD 1 F r   c           	     $    SSK Jn  U" U UUUUUS9$ )a  
Convert time series to specified frequency.

Returns the original data conformed to a new index with the specified
frequency.

If the index of this {klass} is a :class:`~pandas.PeriodIndex`, the new index
is the result of transforming the original index with
:meth:`PeriodIndex.asfreq <pandas.PeriodIndex.asfreq>` (so the original index
will map one-to-one to the new index).

Otherwise, the new index will be equivalent to ``pd.date_range(start, end,
freq=freq)`` where ``start`` and ``end`` are, respectively, the first and
last entries in the original index (see :func:`pandas.date_range`). The
values corresponding to any timesteps in the new index which were not present
in the original index will be null (``NaN``), unless a method for filling
such unknowns is provided (see the ``method`` parameter below).

The :meth:`resample` method is more appropriate if an operation on each group of
timesteps (such as an aggregate) is necessary to represent the data at the new
frequency.

Parameters
----------
freq : DateOffset or str
    Frequency DateOffset or string.
method : {{'backfill'/'bfill', 'pad'/'ffill'}}, default None
    Method to use for filling holes in reindexed Series (note this
    does not fill NaNs that already were present):

    * 'pad' / 'ffill': propagate last valid observation forward to next
      valid
    * 'backfill' / 'bfill': use NEXT valid observation to fill.
how : {{'start', 'end'}}, default end
    For PeriodIndex only (see PeriodIndex.asfreq).
normalize : bool, default False
    Whether to reset output index to midnight.
fill_value : scalar, optional
    Value to use for missing values, applied during upsampling (note
    this does not fill NaNs that already were present).

Returns
-------
{klass}
    {klass} object reindexed to the specified frequency.

See Also
--------
reindex : Conform DataFrame to new index with optional filling logic.

Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.

Examples
--------
Start by creating a series with 4 one minute timestamps.

>>> index = pd.date_range('1/1/2000', periods=4, freq='min')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({{'s': series}})
>>> df
                       s
2000-01-01 00:00:00    0.0
2000-01-01 00:01:00    NaN
2000-01-01 00:02:00    2.0
2000-01-01 00:03:00    3.0

Upsample the series into 30 second bins.

>>> df.asfreq(freq='30s')
                       s
2000-01-01 00:00:00    0.0
2000-01-01 00:00:30    NaN
2000-01-01 00:01:00    NaN
2000-01-01 00:01:30    NaN
2000-01-01 00:02:00    2.0
2000-01-01 00:02:30    NaN
2000-01-01 00:03:00    3.0

Upsample again, providing a ``fill value``.

>>> df.asfreq(freq='30s', fill_value=9.0)
                       s
2000-01-01 00:00:00    0.0
2000-01-01 00:00:30    9.0
2000-01-01 00:01:00    NaN
2000-01-01 00:01:30    9.0
2000-01-01 00:02:00    2.0
2000-01-01 00:02:30    9.0
2000-01-01 00:03:00    3.0

Upsample again, providing a ``method``.

>>> df.asfreq(freq='30s', method='bfill')
                       s
2000-01-01 00:00:00    0.0
2000-01-01 00:00:30    NaN
2000-01-01 00:01:00    NaN
2000-01-01 00:01:30    2.0
2000-01-01 00:02:00    2.0
2000-01-01 00:02:30    3.0
2000-01-01 00:03:00    3.0
r   )asfreq)r  how	normalizer  )pandas.core.resampler  )r   r  r  r   r!  r  r  s          r   r  NDFrame.asfreq#  s'    f 	0!
 	
r   c                    Uc  SnU R                  U5      nU R                  U5      n[        U[        5      (       d  [	        S5      eUR                  XS9nU R                  XSS9$ )a  
Select values at particular time of day (e.g., 9:30AM).

Parameters
----------
time : datetime.time or str
    The values to select.
axis : {0 or 'index', 1 or 'columns'}, default 0
    For `Series` this parameter is unused and defaults to 0.

Returns
-------
Series or DataFrame

Raises
------
TypeError
    If the index is not  a :class:`DatetimeIndex`

See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
    values at particular time of the day.

Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12h')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
                     A
2018-04-09 00:00:00  1
2018-04-09 12:00:00  2
2018-04-10 00:00:00  3
2018-04-10 12:00:00  4

>>> ts.at_time('12:00')
                     A
2018-04-09 12:00:00  2
2018-04-10 12:00:00  4
r   Index must be DatetimeIndex)r  r   )r!  r  r   r}   r  indexer_at_timerX  )r   timer  r   r  r  s         r   at_timeNDFrame.at_time$  sm    Z <D$$T*t$%//9::'''8&&w&::r   c                    Uc  SnU R                  U5      nU R                  U5      n[        U[        5      (       d  [	        S5      e[        U5      u  pgUR                  UUUUS9nU R                  XS9$ )a  
Select values between particular times of the day (e.g., 9:00-9:30 AM).

By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.

Parameters
----------
start_time : datetime.time or str
    Initial time as a time filter limit.
end_time : datetime.time or str
    End time as a time filter limit.
inclusive : {"both", "neither", "left", "right"}, default "both"
    Include boundaries; whether to set each bound as closed or open.
axis : {0 or 'index', 1 or 'columns'}, default 0
    Determine range time on index or columns value.
    For `Series` this parameter is unused and defaults to 0.

Returns
-------
Series or DataFrame
    Data from the original object filtered to the specified dates range.

Raises
------
TypeError
    If the index is not  a :class:`DatetimeIndex`

See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
    values between particular times of the day.

Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
                     A
2018-04-09 00:00:00  1
2018-04-10 00:20:00  2
2018-04-11 00:40:00  3
2018-04-12 01:00:00  4

>>> ts.between_time('0:15', '0:45')
                     A
2018-04-10 00:20:00  2
2018-04-11 00:40:00  3

You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:

>>> ts.between_time('0:45', '0:15')
                     A
2018-04-09 00:00:00  1
2018-04-12 01:00:00  4
r   r%  )include_startinclude_endr   )r!  r  r   r}   r  rZ   indexer_between_timerX  )	r   
start_timeend_time	inclusiver   r  left_inclusiveright_inclusiver  s	            r   between_timeNDFrame.between_timeQ$  s    H <D$$T*t$%//9::*<Y*G',,('	 - 
 &&w&::r   	start_dayc                   SSK Jn  U[        R                  Lat  U R	                  U5      nUS:X  a#  [
        R                  " S[        [        5       S9  O<[
        R                  " S[        U 5      R                   S3[        [        5       S9  OSnU[        R                  La:  [
        R                  " S[        U 5      R                   S	3[        [        5       S9  OS
nU[        R                  La:  [
        R                  " S[        U 5      R                   S3[        [        5       S9  OSnU" [        SU 5      UUUUUUUUU	U
US9$ )a
)  
Resample time-series data.

Convenience method for frequency conversion and resampling of time series.
The object must have a datetime-like index (`DatetimeIndex`, `PeriodIndex`,
or `TimedeltaIndex`), or the caller must pass the label of a datetime-like
series/index to the ``on``/``level`` keyword parameter.

Parameters
----------
rule : DateOffset, Timedelta or str
    The offset string or object representing target conversion.
axis : {{0 or 'index', 1 or 'columns'}}, default 0
    Which axis to use for up- or down-sampling. For `Series` this parameter
    is unused and defaults to 0. Must be
    `DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.

    .. deprecated:: 2.0.0
        Use frame.T.resample(...) instead.
closed : {{'right', 'left'}}, default None
    Which side of bin interval is closed. The default is 'left'
    for all frequency offsets except for 'ME', 'YE', 'QE', 'BME',
    'BA', 'BQE', and 'W' which all have a default of 'right'.
label : {{'right', 'left'}}, default None
    Which bin edge label to label bucket with. The default is 'left'
    for all frequency offsets except for 'ME', 'YE', 'QE', 'BME',
    'BA', 'BQE', and 'W' which all have a default of 'right'.
convention : {{'start', 'end', 's', 'e'}}, default 'start'
    For `PeriodIndex` only, controls whether to use the start or
    end of `rule`.

    .. deprecated:: 2.2.0
        Convert PeriodIndex to DatetimeIndex before resampling instead.
kind : {{'timestamp', 'period'}}, optional, default None
    Pass 'timestamp' to convert the resulting index to a
    `DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
    By default the input representation is retained.

    .. deprecated:: 2.2.0
        Convert index to desired type explicitly instead.

on : str, optional
    For a DataFrame, column to use instead of index for resampling.
    Column must be datetime-like.
level : str or int, optional
    For a MultiIndex, level (name or number) to use for
    resampling. `level` must be datetime-like.
origin : Timestamp or str, default 'start_day'
    The timestamp on which to adjust the grouping. The timezone of origin
    must match the timezone of the index.
    If string, must be one of the following:

    - 'epoch': `origin` is 1970-01-01
    - 'start': `origin` is the first value of the timeseries
    - 'start_day': `origin` is the first day at midnight of the timeseries

    - 'end': `origin` is the last value of the timeseries
    - 'end_day': `origin` is the ceiling midnight of the last day

    .. versionadded:: 1.3.0

    .. note::

        Only takes effect for Tick-frequencies (i.e. fixed frequencies like
        days, hours, and minutes, rather than months or quarters).
offset : Timedelta or str, default is None
    An offset timedelta added to the origin.

group_keys : bool, default False
    Whether to include the group keys in the result index when using
    ``.apply()`` on the resampled object.

    .. versionadded:: 1.5.0

        Not specifying ``group_keys`` will retain values-dependent behavior
        from pandas 1.4 and earlier (see :ref:`pandas 1.5.0 Release notes
        <whatsnew_150.enhancements.resample_group_keys>` for examples).

    .. versionchanged:: 2.0.0

        ``group_keys`` now defaults to ``False``.

Returns
-------
pandas.api.typing.Resampler
    :class:`~pandas.core.Resampler` object.

See Also
--------
Series.resample : Resample a Series.
DataFrame.resample : Resample a DataFrame.
groupby : Group {klass} by mapping, function, label, or list of labels.
asfreq : Reindex a {klass} with the given frequency without grouping.

Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`__
for more.

To learn more about the offset strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.

Examples
--------
Start by creating a series with 9 one minute timestamps.

>>> index = pd.date_range('1/1/2000', periods=9, freq='min')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00    0
2000-01-01 00:01:00    1
2000-01-01 00:02:00    2
2000-01-01 00:03:00    3
2000-01-01 00:04:00    4
2000-01-01 00:05:00    5
2000-01-01 00:06:00    6
2000-01-01 00:07:00    7
2000-01-01 00:08:00    8
Freq: min, dtype: int64

Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.

>>> series.resample('3min').sum()
2000-01-01 00:00:00     3
2000-01-01 00:03:00    12
2000-01-01 00:06:00    21
Freq: 3min, dtype: int64

Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).

>>> series.resample('3min', label='right').sum()
2000-01-01 00:03:00     3
2000-01-01 00:06:00    12
2000-01-01 00:09:00    21
Freq: 3min, dtype: int64

To include this value close the right side of the bin interval,
as shown below.

>>> series.resample('3min', label='right', closed='right').sum()
2000-01-01 00:00:00     0
2000-01-01 00:03:00     6
2000-01-01 00:06:00    15
2000-01-01 00:09:00    15
Freq: 3min, dtype: int64

Upsample the series into 30 second bins.

>>> series.resample('30s').asfreq()[0:5]   # Select first 5 rows
2000-01-01 00:00:00   0.0
2000-01-01 00:00:30   NaN
2000-01-01 00:01:00   1.0
2000-01-01 00:01:30   NaN
2000-01-01 00:02:00   2.0
Freq: 30s, dtype: float64

Upsample the series into 30 second bins and fill the ``NaN``
values using the ``ffill`` method.

>>> series.resample('30s').ffill()[0:5]
2000-01-01 00:00:00    0
2000-01-01 00:00:30    0
2000-01-01 00:01:00    1
2000-01-01 00:01:30    1
2000-01-01 00:02:00    2
Freq: 30s, dtype: int64

Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.

>>> series.resample('30s').bfill()[0:5]
2000-01-01 00:00:00    0
2000-01-01 00:00:30    1
2000-01-01 00:01:00    1
2000-01-01 00:01:30    2
2000-01-01 00:02:00    2
Freq: 30s, dtype: int64

Pass a custom function via ``apply``

>>> def custom_resampler(arraylike):
...     return np.sum(arraylike) + 5
...
>>> series.resample('3min').apply(custom_resampler)
2000-01-01 00:00:00     8
2000-01-01 00:03:00    17
2000-01-01 00:06:00    26
Freq: 3min, dtype: int64

For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.

>>> d = {{'price': [10, 11, 9, 13, 14, 18, 17, 19],
...      'volume': [50, 60, 40, 100, 50, 100, 40, 50]}}
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
...                                     periods=8,
...                                     freq='W')
>>> df
   price  volume week_starting
0     10      50    2018-01-07
1     11      60    2018-01-14
2      9      40    2018-01-21
3     13     100    2018-01-28
4     14      50    2018-02-04
5     18     100    2018-02-11
6     17      40    2018-02-18
7     19      50    2018-02-25
>>> df.resample('ME', on='week_starting').mean()
               price  volume
week_starting
2018-01-31     10.75    62.5
2018-02-28     17.00    60.0

For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.

>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = {{'price': [10, 11, 9, 13, 14, 18, 17, 19],
...       'volume': [50, 60, 40, 100, 50, 100, 40, 50]}}
>>> df2 = pd.DataFrame(
...     d2,
...     index=pd.MultiIndex.from_product(
...         [days, ['morning', 'afternoon']]
...     )
... )
>>> df2
                      price  volume
2000-01-01 morning       10      50
           afternoon     11      60
2000-01-02 morning        9      40
           afternoon     13     100
2000-01-03 morning       14      50
           afternoon     18     100
2000-01-04 morning       17      40
           afternoon     19      50
>>> df2.resample('D', level=0).sum()
            price  volume
2000-01-01     21     110
2000-01-02     22     140
2000-01-03     32     150
2000-01-04     36      90

If you want to adjust the start of the bins based on a fixed timestamp:

>>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
>>> rng = pd.date_range(start, end, freq='7min')
>>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
>>> ts
2000-10-01 23:30:00     0
2000-10-01 23:37:00     3
2000-10-01 23:44:00     6
2000-10-01 23:51:00     9
2000-10-01 23:58:00    12
2000-10-02 00:05:00    15
2000-10-02 00:12:00    18
2000-10-02 00:19:00    21
2000-10-02 00:26:00    24
Freq: 7min, dtype: int64

>>> ts.resample('17min').sum()
2000-10-01 23:14:00     0
2000-10-01 23:31:00     9
2000-10-01 23:48:00    21
2000-10-02 00:05:00    54
2000-10-02 00:22:00    24
Freq: 17min, dtype: int64

>>> ts.resample('17min', origin='epoch').sum()
2000-10-01 23:18:00     0
2000-10-01 23:35:00    18
2000-10-01 23:52:00    27
2000-10-02 00:09:00    39
2000-10-02 00:26:00    24
Freq: 17min, dtype: int64

>>> ts.resample('17min', origin='2000-01-01').sum()
2000-10-01 23:24:00     3
2000-10-01 23:41:00    15
2000-10-01 23:58:00    45
2000-10-02 00:15:00    45
Freq: 17min, dtype: int64

If you want to adjust the start of the bins with an `offset` Timedelta, the two
following lines are equivalent:

>>> ts.resample('17min', origin='start').sum()
2000-10-01 23:30:00     9
2000-10-01 23:47:00    21
2000-10-02 00:04:00    54
2000-10-02 00:21:00    24
Freq: 17min, dtype: int64

>>> ts.resample('17min', offset='23h30min').sum()
2000-10-01 23:30:00     9
2000-10-01 23:47:00    21
2000-10-02 00:04:00    54
2000-10-02 00:21:00    24
Freq: 17min, dtype: int64

If you want to take the largest Timestamp as the end of the bins:

>>> ts.resample('17min', origin='end').sum()
2000-10-01 23:35:00     0
2000-10-01 23:52:00    18
2000-10-02 00:09:00    27
2000-10-02 00:26:00    63
Freq: 17min, dtype: int64

In contrast with the `start_day`, you can use `end_day` to take the ceiling
midnight of the largest Timestamp as the end of the bins and drop the bins
not containing data:

>>> ts.resample('17min', origin='end_day').sum()
2000-10-01 23:38:00     3
2000-10-01 23:55:00    15
2000-10-02 00:12:00    45
2000-10-02 00:29:00    45
Freq: 17min, dtype: int64
r   )get_resamplerr   z^DataFrame.resample with axis=1 is deprecated. Do `frame.T.resample(...)` without axis instead.r  The 'axis' keyword in z@.resample is deprecated and will be removed in a future version.zThe 'kind' keyword in zv.resample is deprecated and will be removed in a future version. Explicitly cast the index to the desired type insteadNzThe 'convention' keyword in z.resample is deprecated and will be removed in a future version. Explicitly cast PeriodIndex to DatetimeIndex before resampling instead.rQ  zSeries | DataFrame)r  r  closedr   r   
conventionr9  r:  originoffset
group_keys)r"  r7  r   r  r!  r  r	  r  rU   r
  r   r   )r   ruler   r9  r  r:  r   onr:  r;  r<  r=  r7  s                r   resampleNDFrame.resample$  sK   p
 	7s~~%((.DqyD!/1	 ,T$Z-@-@,A BJ J!/1	 Ds~~%MM(d)<)<(= >H H +- DS^^+MM.tDz/B/B.C D  +- !J%t,!!
 	
r   c                   [         R                  " S[        [        5       S9  [	        U R
                  [        5      (       d  [        S5      e[        U R
                  5      S:X  a  U R                  SS9$ [        U5      n[	        U[        5      (       dD  UR                  U R
                  S   5      (       a!  U R
                  S   UR                  -
  U-   =p#OU R
                  S   U-   =p#[	        U[        5      (       a8  X R
                  ;   a)  U R
                  R                  USS9nU R                  S	U $ U R                   S	U $ )
aX  
Select initial periods of time series data based on a date offset.

.. deprecated:: 2.1
    :meth:`.first` is deprecated and will be removed in a future version.
    Please create a mask and filter using `.loc` instead.

For a DataFrame with a sorted DatetimeIndex, this function can
select the first few rows based on a date offset.

Parameters
----------
offset : str, DateOffset or dateutil.relativedelta
    The offset length of the data that will be selected. For instance,
    '1ME' will display all the rows having their index within the first month.

Returns
-------
Series or DataFrame
    A subset of the caller.

Raises
------
TypeError
    If the index is not  a :class:`DatetimeIndex`

See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.

Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
            A
2018-04-09  1
2018-04-11  2
2018-04-13  3
2018-04-15  4

Get the rows for the first 3 days:

>>> ts.first('3D')
            A
2018-04-09  1
2018-04-11  2

Notice the data for 3 first calendar days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
zqfirst is deprecated and will be removed in a future version. Please create a mask and filter using `.loc` insteadr  z+'first' only supports a DatetimeIndex indexr   Fr   leftr  N)r  r	  r  rU   r   r  r}   r  r   r   r   r   is_on_offsetbaser  r  rf  )r   r<  end_dateends       r   firstNDFrame.first:&  s   p 	C')		
 $**m44IJJtzz?a99%9((6"&$''F,?,?

1,N,N "ZZ]V[[86AAHs!ZZ]V33H fd##JJ(>**))()@C99Tc?"xx~r   c                v   [         R                  " S[        [        5       S9  [	        U R
                  [        5      (       d  [        S5      e[        U R
                  5      S:X  a  U R                  SS9$ [        U5      nU R
                  S   U-
  nU R
                  R                  USS	9nU R                  US
 $ )a  
Select final periods of time series data based on a date offset.

.. deprecated:: 2.1
    :meth:`.last` is deprecated and will be removed in a future version.
    Please create a mask and filter using `.loc` instead.

For a DataFrame with a sorted DatetimeIndex, this function
selects the last few rows based on a date offset.

Parameters
----------
offset : str, DateOffset, dateutil.relativedelta
    The offset length of the data that will be selected. For instance,
    '3D' will display all the rows having their index within the last 3 days.

Returns
-------
Series or DataFrame
    A subset of the caller.

Raises
------
TypeError
    If the index is not  a :class:`DatetimeIndex`

See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.

Notes
-----
.. deprecated:: 2.1.0
    Please create a mask and filter using `.loc` instead

Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
            A
2018-04-09  1
2018-04-11  2
2018-04-13  3
2018-04-15  4

Get the rows for the last 3 days:

>>> ts.last('3D')  # doctest: +SKIP
            A
2018-04-13  3
2018-04-15  4

Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
zplast is deprecated and will be removed in a future version. Please create a mask and filter using `.loc` insteadr  z*'last' only supports a DatetimeIndex indexr   Fr   r  r  r  N)r  r	  r  rU   r   r  r}   r  r   r   r   r  r  )r   r<  
start_daterQ  s       r   r  NDFrame.last&  s    z 	C')		
 $**m44HIItzz?a99%9((6"ZZ^f,


''
'Ayy  r   c                  ^ ^^^^^
 T R                  U5      m
TS;  a  Sn[        U5      eUU
UUUU 4S jnU(       aF  T R                  S:X  a%  [        T R                  5      (       d  [        S5      eT R                  5       n	OT n	U" U	5      $ )aD  
Compute numerical data ranks (1 through n) along axis.

By default, equal values are assigned a rank that is the average of the
ranks of those values.

Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
    Index to direct ranking.
    For `Series` this parameter is unused and defaults to 0.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
    How to rank the group of records that have the same value (i.e. ties):

    * average: average rank of the group
    * min: lowest rank in the group
    * max: highest rank in the group
    * first: ranks assigned in order they appear in the array
    * dense: like 'min', but rank always increases by 1 between groups.

numeric_only : bool, default False
    For DataFrame objects, rank only numeric columns if set to True.

    .. versionchanged:: 2.0.0
        The default value of ``numeric_only`` is now ``False``.

na_option : {'keep', 'top', 'bottom'}, default 'keep'
    How to rank NaN values:

    * keep: assign NaN rank to NaN values
    * top: assign lowest rank to NaN values
    * bottom: assign highest rank to NaN values

ascending : bool, default True
    Whether or not the elements should be ranked in ascending order.
pct : bool, default False
    Whether or not to display the returned rankings in percentile
    form.

Returns
-------
same type as caller
    Return a Series or DataFrame with data ranks as values.

See Also
--------
core.groupby.DataFrameGroupBy.rank : Rank of values within each group.
core.groupby.SeriesGroupBy.rank : Rank of values within each group.

Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
...                                    'spider', 'snake'],
...                         'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
    Animal  Number_legs
0      cat          4.0
1  penguin          2.0
2      dog          4.0
3   spider          8.0
4    snake          NaN

Ties are assigned the mean of the ranks (by default) for the group.

>>> s = pd.Series(range(5), index=list("abcde"))
>>> s["d"] = s["b"]
>>> s.rank()
a    1.0
b    2.5
c    4.0
d    2.5
e    5.0
dtype: float64

The following example shows how the method behaves with the above
parameters:

* default_rank: this is the default behaviour obtained without using
  any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
  same values are ranked using the highest rank (e.g.: since 'cat'
  and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
  with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
  percentile rank.

>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
    Animal  Number_legs  default_rank  max_rank  NA_bottom  pct_rank
0      cat          4.0           2.5       3.0        2.5     0.625
1  penguin          2.0           1.0       1.0        1.0     0.250
2      dog          4.0           2.5       3.0        2.5     0.625
3   spider          8.0           4.0       4.0        4.0     1.000
4    snake          NaN           NaN       NaN        5.0       NaN
>   topkeepbottomz3na_option must be one of 'keep', 'top', or 'bottom'c           	     <  > U R                   S:X  a  U R                  nOU R                  n[        U[        5      (       a  UR                  TTTTTS9nO[        R                  " UTTTTTS9nT	R                  " U40 U R                  5       D6nUR                  T	SS9$ )Nr+  )r   r  r  	na_optionpctrankr~  )r,  r   r  r   ry   _rankalgosrT  r  r  r   )
r   r   ranks	ranks_objr  r
  r  rR  rS  r   s
       r   rankerNDFrame.rank.<locals>.rankerP'  s    yyA~ &.11!!'' %  

!!'' ))%O43L3L3NOI))$v)>>r   r   zDSeries.rank does not allow numeric_only=True with non-numeric dtype.)r!  r   r,  re   r   r  rU  )r   r   r  numeric_onlyrR  r  rS  r(  rY  r   r
  s   ` ` ```   @r   rT  NDFrame.rank&  s    Z ((.55GCS/!	? 	?: yyA~&6tzz&B&B)  ))+DDd|r   comparer   c           	     `   [        U 5      [        U5      La>  [        U 5      R                  [        U5      R                  pv[        SU SU SU S35      eX:H  U R                  5       UR                  5       -  -  ) nUR	                  SSS9  U(       d"  U R                  U5      n UR                  U5      nU(       d]  [        U [        5      (       a@  UR                  5       n	UR                  SS9n
U R                  X4   n UR                  X4   nOX   n X   n[        U[        5      (       d  [        S	[        U5       S
35      eUS;   a  SnOU R                  U5      n[        X/UUS9nXR                  :  a  U$ UR                  U5      n[        R                   " UR"                  5      n[        R$                  " ['        U5      5      Ul        [)        [+        SUR,                  5      5      S/-   n[        U[        5      (       a  UR/                  XS9nOUR/                  U5      nX   UR                  US9l        [        R$                  " UR0                  U   5      R3                  SUR0                  U   S-  /5      R4                  R7                  5       nUR9                  UUS9nU$ )Nzcan only compare 'z' (not 'z	') with 'r{  Tr  r   r   zPassing 'result_names' as a z= is not supported. Provide 'result_names' as a tuple instead.)r   r(  )r   r7  r   r+  )r
  r   r  ro   r  r  r   rk   r  rf  r`  r!  r   r,  r  rh  arrayr2  rP  r   ri  r  nlevelsreorder_levelsra  reshaper=   flattenrL  )r   r  
align_axis
keep_shape
keep_equalresult_namescls_self	cls_otherr  cmaskrmaskr   diffr  ax_namesorderrT  s                    r   r]  NDFrame.comparez'  sy    :T%[("&t*"5"5tE{7K7Ki$XJhyk8*TUV  -DIIK%**,$>?@D$'::d#DKK%E$--
a(xx-		%,/z,...tL/A.B CH H 
 'D((4D M
 99 K^^D!88BHH% 99S]+ U1bjj)*aS0dL))&&u&8D&&u-D +3/D!' IIdjj&'//DJJt4D4I0JKMMUUW 	 yyty,r   outerc                   U[         R                  Ld&  U[         R                  Ld  U	[         R                  La9  [        R                  " S[	        U 5      R
                   S3[        [        5       S9  U	[         R                  L a  Sn	U[         R                  L a  SnU[         R                  L a  SnUb  [        U5      nU
[         R                  La  S[	        U 5      R
                   S3nU
bK  U R                  S:X  a  UR                  S	:X  a  US
-  nO%U R                  S	:X  a  UR                  S:X  a  US-  n[        R                  " U[        [        5       S9  OSn
U
S:X  a  U R                  UR                  :w  a  [        U [        5      (       aX  U R                  nU" UR                   Vs0 s H  oU _M     sn40 UR                  5       D6nUR                  UUUUUUUUU	S9	SS	 $ [        U[        5      (       aX  UR                  nU" U R                   Vs0 s H  oU_M     sn40 U R                  5       D6nU R                  UUUUUUUUU	S9	SS	 $ Ub  U R!                  U5      n[        U["        5      (       a  U R                  UUUUUUUUU	S9	u  nnnOH[        U[        5      (       a  U R%                  UUUUUUUUU	S9	u  nnnO['        S[	        U5       35      e[)        [*        U5      nU R                  S:X  d  US:X  a  [        UR,                  R.                  [0        5      (       a]  UR,                  R2                  UR,                  R2                  :w  a/  Ub,  UR5                  SS9nUR5                  SS9nUUl        UUl        UR7                  U 5      nUR7                  U5      nUU4$ s  snf s  snf )a  
Align two objects on their axes with the specified join method.

Join method is specified for each axis Index.

Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
    Type of alignment to be performed.

    * left: use only keys from left frame, preserve key order.
    * right: use only keys from right frame, preserve key order.
    * outer: use union of keys from both frames, sort keys lexicographically.
    * inner: use intersection of keys from both frames,
      preserve the order of the left keys.

axis : allowed axis of the other object, default None
    Align on index (0), columns (1), or both (None).
level : int or level name, default None
    Broadcast across a level, matching Index values on the
    passed MultiIndex level.
copy : bool, default True
    Always returns new objects. If copy=False and no reindexing is
    required then original objects are returned.

    .. note::
        The `copy` keyword will change behavior in pandas 3.0.
        `Copy-on-Write
        <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
        will be enabled by default, which means that all methods with a
        `copy` keyword will use a lazy copy mechanism to defer the copy and
        ignore the `copy` keyword. The `copy` keyword will be removed in a
        future version of pandas.

        You can already get the future behavior and improvements through
        enabling copy on write ``pd.options.mode.copy_on_write = True``
fill_value : scalar, default np.nan
    Value to use for missing values. Defaults to NaN, but can be any
    "compatible" value.
method : {{'backfill', 'bfill', 'pad', 'ffill', None}}, default None
    Method to use for filling holes in reindexed Series:

    - pad / ffill: propagate last valid observation forward to next valid.
    - backfill / bfill: use NEXT valid observation to fill gap.

    .. deprecated:: 2.1

limit : int, default None
    If method is specified, this is the maximum number of consecutive
    NaN values to forward/backward fill. In other words, if there is
    a gap with more than this number of consecutive NaNs, it will only
    be partially filled. If method is not specified, this is the
    maximum number of entries along the entire axis where NaNs will be
    filled. Must be greater than 0 if not None.

    .. deprecated:: 2.1

fill_axis : {axes_single_arg}, default 0
    Filling axis, method and limit.

    .. deprecated:: 2.1

broadcast_axis : {axes_single_arg}, default None
    Broadcast values along this axis, if aligning two objects of
    different dimensions.

    .. deprecated:: 2.1

Returns
-------
tuple of ({klass}, type of other)
    Aligned objects.

Examples
--------
>>> df = pd.DataFrame(
...     [[1, 2, 3, 4], [6, 7, 8, 9]], columns=["D", "B", "E", "A"], index=[1, 2]
... )
>>> other = pd.DataFrame(
...     [[10, 20, 30, 40], [60, 70, 80, 90], [600, 700, 800, 900]],
...     columns=["A", "B", "C", "D"],
...     index=[2, 3, 4],
... )
>>> df
   D  B  E  A
1  1  2  3  4
2  6  7  8  9
>>> other
    A    B    C    D
2   10   20   30   40
3   60   70   80   90
4  600  700  800  900

Align on columns:

>>> left, right = df.align(other, join="outer", axis=1)
>>> left
   A  B   C  D  E
1  4  2 NaN  1  3
2  9  7 NaN  6  8
>>> right
    A    B    C    D   E
2   10   20   30   40 NaN
3   60   70   80   90 NaN
4  600  700  800  900 NaN

We can also align on the index:

>>> left, right = df.align(other, join="outer", axis=0)
>>> left
    D    B    E    A
1  1.0  2.0  3.0  4.0
2  6.0  7.0  8.0  9.0
3  NaN  NaN  NaN  NaN
4  NaN  NaN  NaN  NaN
>>> right
    A      B      C      D
1    NaN    NaN    NaN    NaN
2   10.0   20.0   30.0   40.0
3   60.0   70.0   80.0   90.0
4  600.0  700.0  800.0  900.0

Finally, the default `axis=None` will align on both index and columns:

>>> left, right = df.align(other, join="outer", axis=None)
>>> left
     A    B   C    D    E
1  4.0  2.0 NaN  1.0  3.0
2  9.0  7.0 NaN  6.0  8.0
3  NaN  NaN NaN  NaN  NaN
4  NaN  NaN NaN  NaN  NaN
>>> right
       A      B      C      D   E
1    NaN    NaN    NaN    NaN NaN
2   10.0   20.0   30.0   40.0 NaN
3   60.0   70.0   80.0   90.0 NaN
4  600.0  700.0  800.0  900.0 NaN
z3The 'method', 'limit', and 'fill_axis' keywords in zt.align are deprecated and will be removed in a future version. Call fillna directly on the returned objects instead.r  r   Nz The 'broadcast_axis' keyword in z=.align is deprecated and will be removed in a future version.r   r+  zz Use left = DataFrame({col: left for col in right.columns}, index=right.index) before calling `left.align(right)` instead.zy Use right = DataFrame({col: right for col in left.columns}, index=left.index) before calling `left.align(right)` instead)rs  r   r:  r   r  r  r  	fill_axiszunsupported type: Fr   )r   r  r  r	  r
  r   r  rU   r   r,  r   rl   _constructor_expanddimr(  r  _align_framer!  rk   _align_seriesr  r   r2   r  r   ri   tzr   r   )r   r  rs  r   r:  r   r  r  r  rr  broadcast_axisr(  consr  r   rC  _right
join_indexr  s                      r   alignNDFrame.align'  s   | #..(CNN*. MME:&&' ( +- &IS^^#FCNN"E&v.F/ 34:3F3F2G HF F  )99>ejjAoYC YY!^

aWC MM#}9I9KL!NQ499

#:$	** 22&+mm4mWm48=8R8R8T
 )!' ' 
 1
 
 E9-- 33'+||4|!X|48<8Q8Q8S
 (()!' ) 
 1
 
 ((.De\**'+'8'8%# (9 
($D&* y))'+'9'9%# (: 
($D&* 0e>??Xv&99>TQY $****O<<::==EKKNN2!-  $yyey4 %


 6%/
&0  &""5)U{g 5( 5s   0N8N=c
                |   Su  pSu  pSu  p[        U [        5      nUb  US:X  aR  U R                  R                  UR                  5      (       d(  U R                  R	                  UR                  X$SS9u  pnUb  US:X  aY  U(       dR  U R
                  R                  UR
                  5      (       d(  U R
                  R	                  UR
                  X$SS9u  pnU(       a  SX/0nOX/X/S.nU R                  UXVSS9nUR                  X/X/S.UUSS9nUb   UR                  XyUS9nUR                  XyUS9nUUU
4$ )	NNNr   Tr   r:  return_indexersr   )r   r   )r   r  r  r   r  )r   rl   r  r  rs  r(  r  r  )r   r  rs  r   r:  r   r  r  r  rr  rz  join_columnsilidxiridxclidxcridxr  r  rC  r  s                       r   rt  NDFrame._align_frame(  sm    $. 
!!tY/	LDAItzz/@/@/M/M'+zzD (7 ($Ju
 \TQYLL''66)-):):4d *; *&L j01J(0l5JKJ**TT + 
 ,,#(=>!	 - 
 ((u(MD**6*OEUJ&&r   c
                   [        U [        5      n
U(       a  [        5       (       a  SnU
(       d  Ub  US;  a  [        S5      eU
(       a  US:X  a  [        S5      eU(       d  U R                  R                  UR                  5      (       a  Su  pnO(U R                  R                  UR                  X$SS9u  pnU
(       a  U R                  XU5      nOJUb  Uc  U R                  US	9nO4U R                  R                  XSUS
9nU R                  XR                  S9nUR                  XU5      nOU R                  nU R                  S   nSu  pUR                  UR                  5      (       d  UR                  UR                  X$SS9u  pnUb!  U R                  S5      nUR                  XUS9nU(       a  UU R                  L a  UR                  5       nU R                  UUR                  S9nUc  UR                  US	9nOUR                  XS9n[        U5      =(       d    US LnU(       aO  [!        Xg5      u  pgUb   UR#                  XxU	S9nUR#                  XxS9nOUR%                  XhU	S9nUR%                  XhS9nUUU4$ )NF)Nr   r   zMust specify axis=0 or 1r   z1cannot align series to a series other than axis 0NNNTr  r   rl  r   r~  r   r  )r  r   )r  )r   rl   r   r   r  r  rs  _reindex_indexerr   r   r  r   r   r   r  rp   rY   r  r  )r   r  rs  r   r:  r   r  r  r  rr  r  rz  lidxridxrC  r   r  fdatar   fill_nas                       r   ru  NDFrame._align_series')  ss    tY/	'))Ddlt</G788PQQ zz  --)9&
$)-KKT *9 *&
$ ,,ZtD!3yydy+))33J1SW3X11'1M**:TBE IIE1J#JD$$U[[11)3KKT *9 *&
$ 66q9--jW-M*

--e%**-ED|


-j> 
#;d(:!7
!KJ!,,Vy,Q..v.C{{:{KZ=UJ&&r   c           	     
   [        US5      nUb  U R                  U5      n[        R                  " X5      n[	        U[
        5      (       a  UR                  S:X  a^  U R                  S:X  aN  UR                  [        [        U R                  5      5       Vs0 s H  owU_M     snSS9nU R                  Ul
        UR                  U SSS9S	   nOq[        US
5      (       d  [        R                  " U5      nUR                  U R                  :w  a  [!        S5      eU R"                  " U40 U R%                  5       DSS0D6n['        U5      n[(        R*                  " 5          [(        R,                  " SS[.        S9  UR1                  U5      nSSS5        UR3                  SS9nSn	UR4                  (       d  [	        U[6        5      (       d3  [9        U5      (       d"  [!        U	R;                  UR<                  S95      eOUR>                   H+  n
[9        U
5      (       a  M  [!        U	R;                  U
S95      e   UR@                  RB                  (       a3  UR"                  " URE                  [&        US940 UR%                  5       D6nOURG                  [&        5      nU(       a  U* OUnURI                  U RJ                  U RL                  SS9n[	        U[
        5      (       a  UR                  U R                  ::  a  U R                  USUUSSS9S   nUc  URO                  U 5      (       d  [P        eUR                  U R                  :  ag  URR                  nUS	:X  a  [        RT                  " US5      nOUS:X  a  [        RT                  " US5      n[        RV                  " X R                  5      nO0[Y        S5      e[	        U[Z        [
        45      (       d
  []        USS9n[	        U[        R^                  [`        45      (       a[  UR                  U R                  :w  a  U R                  S:w  a  [!        S5      eO%U R"                  " U40 U R%                  5       DSS0D6nUc  S	nU R                  [c        USS	5      :X  a  SnOU R                  U5      S:H  nU(       aD  U R@                  Re                  XXS9nU Rg                  XRh                  S9nU Rk                  U5      $ U R@                  Rm                  UUUS9nU Rg                  XRh                  S9nURo                  U 5      $ s  snf ! , (       d  f       GN= f) z~
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
r   Nr   r+  Fr\  r  )rs  r   r   ra  z,Array conditional must be same shape as selfr   r  zDowncasting object dtype arrays)categoryz5Boolean array expected for the condition, not {dtype}r   )r   na_valuerl  rC  )rs  r   r:  r  r   )r  r   )r   r  z.cannot align with a higher dimensional NDFrameT)extract_numpyz4other must be the same shape as self when an ndarrayr,  )r  newr{  r	  r   )r  r  r{  )8rX   r!  rs   apply_if_callabler   r   r,  rs  r  r   r(  r{  r  rh  
asanyarrayra  r   r  r  r  r  catch_warningsfilterwarningsr  r  r~  rR  rk   r`   r  r   rN  r   rM  r  r   r  rR  r  r  rM   r  rb  broadcast_tor   r   r{   ra  ry   r0  putmaskr   r   r  r  r   )r   r  r  r   r   r:  r	  r7  r  r(  _dtr{  r  r  s                 r   _whereNDFrame._wherew)  s    &gy9((.D ''3dG$$yyA~$))q.22&+C,=&>?&>W&>? 3   $||::du:=a@D4))}}T*zzTZZ' !OPP$$TUT-F-F-HUuUD ']
$$&##1&
 ;;z*D ' !!u!-EzzdL11$T**$SZZdjjZ%ABB +  ;;C(--(#)>?? ' 9900,,D:F335D ;;t$DuT||DOO$2H2Hu|U eW%%zzTYY&

# #   <(;(;D(A(A++::		)!MMEqy "

5' : "

5' :OOE::>E *D  EJ#899!%t<Eebjj.9::{{djj(99>
 %N  " ))!668?D <D99vq11E))$/14E yy((dU(VH//}}/MF''// yy ' H
 //}}/MF&&t,,{ @ '&s   T, ,T11
U )r   r   r:  c                   g r   r  r   r  r  r   r   r:  s         r   r  NDFrame.where*  r  r   )r   r:  c                   g r   r  r  s         r   r  r  *  r  r   c                   g r   r  r  s         r   r  r  '*  r  r   TrueFalser  r  )r   r  cond_revr8  
name_otherc               Z   [        US5      nU(       a  [        (       dL  [        5       (       a=  [        R                  " U 5      [
        ::  a  [        R                  " [        [        SS9  O[        (       d  [        5       (       d  U R                  5       (       aj  [        R                  " U 5      n[
        n[        U [        5      (       a  [        U S5      (       a  US-  nXg::  a  [        R                  " [        [        SS9  [         R"                  " X 5      nU R%                  XX4U5      $ )a  
Replace values where the condition is {cond_rev}.

Parameters
----------
cond : bool {klass}, array-like, or callable
    Where `cond` is {cond}, keep the original value. Where
    {cond_rev}, replace with corresponding value from `other`.
    If `cond` is callable, it is computed on the {klass} and
    should return boolean {klass} or array. The callable must
    not change input {klass} (though pandas doesn't check it).
other : scalar, {klass}, or callable
    Entries where `cond` is {cond_rev} are replaced with
    corresponding value from `other`.
    If other is callable, it is computed on the {klass} and
    should return scalar or {klass}. The callable must not
    change input {klass} (though pandas doesn't check it).
    If not specified, entries will be filled with the corresponding
    NULL value (``np.nan`` for numpy dtypes, ``pd.NA`` for extension
    dtypes).
inplace : bool, default False
    Whether to perform the operation in place on the data.
axis : int, default None
    Alignment axis if needed. For `Series` this parameter is
    unused and defaults to 0.
level : int, default None
    Alignment level if needed.

Returns
-------
Same type as caller or None if ``inplace=True``.

See Also
--------
:func:`DataFrame.{name_other}` : Return an object of same shape as
    self.

Notes
-----
The {name} method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``{cond}`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used. If the axis of ``other`` does not align with axis of
``cond`` {klass}, the misaligned index positions will be filled with
{cond_rev}.

The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.

For further details and examples see the ``{name}`` documentation in
:ref:`indexing <indexing.where_mask>`.

The dtype of the object takes precedence. The fill value is casted to
the object's dtype, if this can be done losslessly.

Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0    NaN
1    1.0
2    2.0
3    3.0
4    4.0
dtype: float64
>>> s.mask(s > 0)
0    0.0
1    NaN
2    NaN
3    NaN
4    NaN
dtype: float64

>>> s = pd.Series(range(5))
>>> t = pd.Series([True, False])
>>> s.where(t, 99)
0     0
1    99
2    99
3    99
4    99
dtype: int64
>>> s.mask(t, 99)
0    99
1     1
2    99
3    99
4    99
dtype: int64

>>> s.where(s > 1, 10)
0    10
1    10
2    2
3    3
4    4
dtype: int64
>>> s.mask(s > 1, 10)
0     0
1     1
2    10
3    10
4    10
dtype: int64

>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
   A  B
0  0  1
1  2  3
2  4  5
3  6  7
4  8  9
>>> m = df % 3 == 0
>>> df.where(m, -df)
   A  B
0  0 -1
1 -2  3
2 -4 -5
3  6 -7
4 -8  9
>>> df.where(m, -df) == np.where(m, df, -df)
      A     B
0  True  True
1  True  True
2  True  True
3  True  True
4  True  True
>>> df.where(m, -df) == df.mask(~m, -df)
      A     B
0  True  True
1  True  True
2  True  True
3  True  True
4  True  True
r   r+  r  r   r   )rX   rG   r   r  r  rH   r  r	  rP   rL   rW  r   rl   r  rQ   r  rs   r  r  r   r  r  r   r   r:  r  r  s           r   r  r  3*  s    t &gy94/11??4(I5MM6.#$ D+--1133ood+%	dI..743K3KNI#MM>%#$ ((5{{4u==r   c                   g r   r  r  s         r   r  NDFrame.mask*  r  r   c                   g r   r  r  s         r   r  r  *  r  r   c                   g r   r  r  s         r   r  r  +  r  r   c                  [        US5      nU(       a  [        (       dL  [        5       (       a=  [        R                  " U 5      [
        ::  a  [        R                  " [        [        SS9  O[        (       d  [        5       (       d  U R                  5       (       aj  [        R                  " U 5      n[
        n[        U [        5      (       a  [        U S5      (       a  US-  nXg::  a  [        R                  " [        [        SS9  [         R"                  " X5      n[         R"                  " X 5      n[        US5      (       d  [$        R&                  " U5      nU R)                  U) UUUUS9$ )Nr   r+  r  r   r   r  )r  r   r   r:  )rX   rG   r   r  r  rH   r  r	  rP   rL   rW  r   rl   r  rQ   r  rs   r  rh  r_  r  r  s           r   r  r  +  s   $ &gy94/11??4(I5MM6.#$ D+--1133ood+%	dI..743K3KNI#MM>%#$ ''3((5 t\**88D>D{{E  
 	
r   c                `   U R                  U5      nUbE  U[        R                  La2  [        R                  " S[
        [        5       S9  [        R                  nUS:X  a  U R                  SS9$ [        U5      (       a3  [        U [        5      (       a  U R                  5       R                  XX4S9$ [        [        U5      nUcY  U R                  U5      nUS:X  d   eU R                  R                  XS9nU R!                  XfR"                  S9R%                  U S	S
9$ U R'                  XU5      $ )aa  
Shift index by desired number of periods with an optional time `freq`.

When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`. `freq` can be inferred
when specified as "infer" as long as either freq or inferred_freq
attribute is set in the index.

Parameters
----------
periods : int or Sequence
    Number of periods to shift. Can be positive or negative.
    If an iterable of ints, the data will be shifted once by each int.
    This is equivalent to shifting by one value at a time and
    concatenating all resulting frames. The resulting columns will have
    the shift suffixed to their column names. For multiple periods,
    axis must not be 1.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
    Offset to use from the tseries module or time rule (e.g. 'EOM').
    If `freq` is specified then the index values are shifted but the
    data is not realigned. That is, use `freq` if you would like to
    extend the index when shifting and preserve the original data.
    If `freq` is specified as "infer" then it will be inferred from
    the freq or inferred_freq attributes of the index. If neither of
    those attributes exist, a ValueError is thrown.
axis : {{0 or 'index', 1 or 'columns', None}}, default None
    Shift direction. For `Series` this parameter is unused and defaults to 0.
fill_value : object, optional
    The scalar value to use for newly introduced missing values.
    the default depends on the dtype of `self`.
    For numeric data, ``np.nan`` is used.
    For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
    For extension dtypes, ``self.dtype.na_value`` is used.
suffix : str, optional
    If str and periods is an iterable, this is added after the column
    name and before the shift value for each shifted column name.

Returns
-------
{klass}
    Copy of input object, shifted.

See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.

Examples
--------
>>> df = pd.DataFrame({{"Col1": [10, 20, 15, 30, 45],
...                    "Col2": [13, 23, 18, 33, 48],
...                    "Col3": [17, 27, 22, 37, 52]}},
...                   index=pd.date_range("2020-01-01", "2020-01-05"))
>>> df
            Col1  Col2  Col3
2020-01-01    10    13    17
2020-01-02    20    23    27
2020-01-03    15    18    22
2020-01-04    30    33    37
2020-01-05    45    48    52

>>> df.shift(periods=3)
            Col1  Col2  Col3
2020-01-01   NaN   NaN   NaN
2020-01-02   NaN   NaN   NaN
2020-01-03   NaN   NaN   NaN
2020-01-04  10.0  13.0  17.0
2020-01-05  20.0  23.0  27.0

>>> df.shift(periods=1, axis="columns")
            Col1  Col2  Col3
2020-01-01   NaN    10    13
2020-01-02   NaN    20    23
2020-01-03   NaN    15    18
2020-01-04   NaN    30    33
2020-01-05   NaN    45    48

>>> df.shift(periods=3, fill_value=0)
            Col1  Col2  Col3
2020-01-01     0     0     0
2020-01-02     0     0     0
2020-01-03     0     0     0
2020-01-04    10    13    17
2020-01-05    20    23    27

>>> df.shift(periods=3, freq="D")
            Col1  Col2  Col3
2020-01-04    10    13    17
2020-01-05    20    23    27
2020-01-06    15    18    22
2020-01-07    30    33    37
2020-01-08    45    48    52

>>> df.shift(periods=3, freq="infer")
            Col1  Col2  Col3
2020-01-04    10    13    17
2020-01-05    20    23    27
2020-01-06    15    18    22
2020-01-07    30    33    37
2020-01-08    45    48    52

>>> df['Col1'].shift(periods=[0, 1, 2])
            Col1_0  Col1_1  Col1_2
2020-01-01      10     NaN     NaN
2020-01-02      20    10.0     NaN
2020-01-03      15    20.0    10.0
2020-01-04      30    15.0    20.0
2020-01-05      45    30.0    15.0
NzPassing a 'freq' together with a 'fill_value' silently ignores the fill_value and is deprecated. This will raise in a future version.r  r   r   )periodsr  r   r  )r  r  r   shiftr~  )r!  r   r  r  r	  r  rU   r   rc   r   rl   r  r  r   r  r   r   r   r   _shift_with_freq)r   r  r  r   r  r  r  s          r   r  NDFrame.shiftI+  s$   r $$T*
#.. @MM +- Ja<99$9''  Zi%@%@==?(( )   sG$<((.D199yywNH--}} . l4l01 $$WD99r   c           	     n   U R                  U5      nUS:X  a.  [        USS 5      nUc  [        USS 5      nUc  Sn[        U5      eO.[        U[        5      (       a  [        U[
        5      n[        X6S9n[        U[
        5      (       a~  [        UR                  5      nX7:w  aR  Uc   e[        S[        UR                  UR                  5       S[        UR                  UR                  5       35      eUR                  U5      nOUR                  X5      nU R                  XS9n	U	R                  U S	S
9$ )Nr  r  inferred_freqz6Freq was not set in the index hence cannot be inferred)	is_periodzGiven freq z! does not match PeriodIndex freq r   r  r~  )r  r0  r   r   r   r   r   r  r   r  r8  r  rp  r   )
r   r  r   r  r  r(  r  	orig_freqrg  r  s
             r   r  NDFrame._shift_with_freq+  s/    t$7?5&$/D|uot<|N o%  c"""5+6IT7De[))!%**-I  ,,, !"8"K!L M7-ikk9>>JKM 
 [[)F[[/Fv1""4"88r   c                   Uc  SnU R                  U5      nU R                  U5      nUR                  (       d  UR                  (       d  [	        S5      eUR
                  (       a  SSKJn  U" U5      nU" U5      nUb  Ub  X:  a  [	        SU SU 35      e[        U5      S:  a'  UR                  (       a  UR                  5       S:  a  X!p![        SS5      /U R                  -  n[        X5      Xs'   U R                  [        U5         n[        U[        5      (       a*  [!        XR#                  U5      UR%                  X5      5        UR'                  U=(       a    [)        5       (       + S9nU$ )	a  
Truncate a Series or DataFrame before and after some index value.

This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.

Parameters
----------
before : date, str, int
    Truncate all rows before this index value.
after : date, str, int
    Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
    Axis to truncate. Truncates the index (rows) by default.
    For `Series` this parameter is unused and defaults to 0.
copy : bool, default is True,
    Return a copy of the truncated section.

    .. note::
        The `copy` keyword will change behavior in pandas 3.0.
        `Copy-on-Write
        <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
        will be enabled by default, which means that all methods with a
        `copy` keyword will use a lazy copy mechanism to defer the copy and
        ignore the `copy` keyword. The `copy` keyword will be removed in a
        future version of pandas.

        You can already get the future behavior and improvements through
        enabling copy on write ``pd.options.mode.copy_on_write = True``

Returns
-------
type of caller
    The truncated Series or DataFrame.

See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.

Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.

Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
...                    'B': ['f', 'g', 'h', 'i', 'j'],
...                    'C': ['k', 'l', 'm', 'n', 'o']},
...                   index=[1, 2, 3, 4, 5])
>>> df
   A  B  C
1  a  f  k
2  b  g  l
3  c  h  m
4  d  i  n
5  e  j  o

>>> df.truncate(before=2, after=4)
   A  B  C
2  b  g  l
3  c  h  m
4  d  i  n

The columns of a DataFrame can be truncated.

>>> df.truncate(before="A", after="B", axis="columns")
   A  B
1  a  f
2  b  g
3  c  h
4  d  i
5  e  j

For Series, only rows can be truncated.

>>> df['A'].truncate(before=2, after=4)
2    b
3    c
4    d
Name: A, dtype: object

The index values in ``truncate`` can be datetimes or string
dates.

>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
                     A
2016-01-31 23:59:56  1
2016-01-31 23:59:57  1
2016-01-31 23:59:58  1
2016-01-31 23:59:59  1
2016-02-01 00:00:00  1

>>> df.truncate(before=pd.Timestamp('2016-01-05'),
...             after=pd.Timestamp('2016-01-10')).tail()
                     A
2016-01-09 23:59:56  1
2016-01-09 23:59:57  1
2016-01-09 23:59:58  1
2016-01-09 23:59:59  1
2016-01-10 00:00:00  1

Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.

>>> df.truncate('2016-01-05', '2016-01-10').tail()
                     A
2016-01-09 23:59:56  1
2016-01-09 23:59:57  1
2016-01-09 23:59:58  1
2016-01-09 23:59:59  1
2016-01-10 00:00:00  1

Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.

>>> df.loc['2016-01-05':'2016-01-10', :].tail()
                     A
2016-01-10 23:59:55  1
2016-01-10 23:59:56  1
2016-01-10 23:59:57  1
2016-01-10 23:59:58  1
2016-01-10 23:59:59  1
Nr   z truncate requires a sorted index)to_datetimez
Truncate: z must be after r   r   )r!  r  r  is_monotonic_decreasingr   _is_all_datespandas.core.tools.datetimesr  r   rN  r  r  rf  r`  r   r   rt  r%  truncater   r   )	r   beforeafterr   r   r  r  slicerr  s	            r   r  NDFrame.truncate,  s<   T <D$$T*^^D! ))"2L2L?@@ ? (F&E%"3z%xHIIr7Q;255"**,:J!Ed#$t~~5V+%-(b*%%F//5r{{67QR$"D/B/D+DEr   c                  ^ ^ T R                  T5      mT R                  T5      nUU 4S jn[        U[        5      (       a7  UR	                  U5      nU" UR
                  U   U5      nUR                  XsS9nO*USSUR                  4;  a  [        SU S35      eU" XQ5      nT R                  U=(       a    [        5       (       + S9nUR                  UTSS	9nUR                  T S
S9$ )aE  
Convert tz-aware axis to target time zone.

Parameters
----------
tz : str or tzinfo object or None
    Target time zone. Passing ``None`` will convert to
    UTC and remove the timezone information.
axis : {{0 or 'index', 1 or 'columns'}}, default 0
    The axis to convert
level : int, str, default None
    If axis is a MultiIndex, convert a specific level. Otherwise
    must be None.
copy : bool, default True
    Also make a copy of the underlying data.

    .. note::
        The `copy` keyword will change behavior in pandas 3.0.
        `Copy-on-Write
        <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
        will be enabled by default, which means that all methods with a
        `copy` keyword will use a lazy copy mechanism to defer the copy and
        ignore the `copy` keyword. The `copy` keyword will be removed in a
        future version of pandas.

        You can already get the future behavior and improvements through
        enabling copy on write ``pd.options.mode.copy_on_write = True``

Returns
-------
{klass}
    Object with time zone converted axis.

Raises
------
TypeError
    If the axis is tz-naive.

Examples
--------
Change to another time zone:

>>> s = pd.Series(
...     [1],
...     index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']),
... )
>>> s.tz_convert('Asia/Shanghai')
2018-09-15 07:30:00+08:00    1
dtype: int64

Pass None to convert to UTC and get a tz-naive index:

>>> s = pd.Series([1],
...               index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']))
>>> s.tz_convert(None)
2018-09-14 23:30:00    1
dtype: int64
c                   > [        U S5      (       d:  [        U 5      S:  a  TR                  T5      n[        U S35      e[	        / US9n U $ U R                  U5      n U $ )N
tz_convertr   , is not a valid DatetimeIndex or PeriodIndexrv  )r  r   r%  r  r}   r  )r  rv  ax_namer   r   s      r   _tz_convert'NDFrame.tz_convert.<locals>._tz_convert,  sm    2|,,r7Q;"11$7G#")#OP  #2"- I ]]2&Ir   r  Nr   
The level  is not validr   Frl  r  r~  )r!  r  r   r   r  levels
set_levelsr8  r   r   r   rp  r   )	r   rv  r   r:  r   r  r  	new_levelr  s	   ` `      r   r  NDFrame.tz_convert,  s    ~ $$T*^^D!
	 b*%%((/E#BIIe$4b9Iy6BT1bgg.. :eWM!BCCR$B B-@-B)BC$U;""4"==r   c                6  ^ ^ SnXg;  a*  [        U[        R                  5      (       d  [        S5      eT R	                  T5      mT R                  T5      nUU 4S jn	[        U[        5      (       a8  UR                  U5      nU	" UR                  U   XU5      n
UR                  XS9nO+USSUR                  4;  a  [        SU S35      eU	" XXV5      nT R                  U=(       a    [        5       (       + S	9nUR                  UTS
S9nUR                  T SS9$ )a  
Localize tz-naive index of a Series or DataFrame to target time zone.

This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.

Parameters
----------
tz : str or tzinfo or None
    Time zone to localize. Passing ``None`` will remove the
    time zone information and preserve local time.
axis : {{0 or 'index', 1 or 'columns'}}, default 0
    The axis to localize
level : int, str, default None
    If axis ia a MultiIndex, localize a specific level. Otherwise
    must be None.
copy : bool, default True
    Also make a copy of the underlying data.

    .. note::
        The `copy` keyword will change behavior in pandas 3.0.
        `Copy-on-Write
        <https://pandas.pydata.org/docs/dev/user_guide/copy_on_write.html>`__
        will be enabled by default, which means that all methods with a
        `copy` keyword will use a lazy copy mechanism to defer the copy and
        ignore the `copy` keyword. The `copy` keyword will be removed in a
        future version of pandas.

        You can already get the future behavior and improvements through
        enabling copy on write ``pd.options.mode.copy_on_write = True``
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
    When clocks moved backward due to DST, ambiguous times may arise.
    For example in Central European Time (UTC+01), when going from
    03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
    00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
    `ambiguous` parameter dictates how ambiguous times should be
    handled.

    - 'infer' will attempt to infer fall dst-transition hours based on
      order
    - bool-ndarray where True signifies a DST time, False designates
      a non-DST time (note that this flag is only applicable for
      ambiguous times)
    - 'NaT' will return NaT where there are ambiguous times
    - 'raise' will raise an AmbiguousTimeError if there are ambiguous
      times.
nonexistent : str, default 'raise'
    A nonexistent time does not exist in a particular timezone
    where clocks moved forward due to DST. Valid values are:

    - 'shift_forward' will shift the nonexistent time forward to the
      closest existing time
    - 'shift_backward' will shift the nonexistent time backward to the
      closest existing time
    - 'NaT' will return NaT where there are nonexistent times
    - timedelta objects will shift nonexistent times by the timedelta
    - 'raise' will raise an NonExistentTimeError if there are
      nonexistent times.

Returns
-------
{klass}
    Same type as the input.

Raises
------
TypeError
    If the TimeSeries is tz-aware and tz is not None.

Examples
--------
Localize local times:

>>> s = pd.Series(
...     [1],
...     index=pd.DatetimeIndex(['2018-09-15 01:30:00']),
... )
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00    1
dtype: int64

Pass None to convert to tz-naive index and preserve local time:

>>> s = pd.Series([1],
...               index=pd.DatetimeIndex(['2018-09-15 01:30:00+02:00']))
>>> s.tz_localize(None)
2018-09-15 01:30:00    1
dtype: int64

Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:

>>> s = pd.Series(range(7),
...               index=pd.DatetimeIndex(['2018-10-28 01:30:00',
...                                       '2018-10-28 02:00:00',
...                                       '2018-10-28 02:30:00',
...                                       '2018-10-28 02:00:00',
...                                       '2018-10-28 02:30:00',
...                                       '2018-10-28 03:00:00',
...                                       '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00    0
2018-10-28 02:00:00+02:00    1
2018-10-28 02:30:00+02:00    2
2018-10-28 02:00:00+01:00    3
2018-10-28 02:30:00+01:00    4
2018-10-28 03:00:00+01:00    5
2018-10-28 03:30:00+01:00    6
dtype: int64

In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly

>>> s = pd.Series(range(3),
...               index=pd.DatetimeIndex(['2018-10-28 01:20:00',
...                                       '2018-10-28 02:36:00',
...                                       '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00    0
2018-10-28 02:36:00+02:00    1
2018-10-28 03:46:00+01:00    2
dtype: int64

If the DST transition causes nonexistent times, you can shift these
dates forward or backward with a timedelta object or `'shift_forward'`
or `'shift_backward'`.

>>> s = pd.Series(range(2),
...               index=pd.DatetimeIndex(['2015-03-29 02:30:00',
...                                       '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00    0
2015-03-29 03:30:00+02:00    1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00    0
2015-03-29 03:30:00+02:00              1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1h'))
2015-03-29 03:30:00+02:00    0
2015-03-29 03:30:00+02:00    1
dtype: int64
)r  NaTshift_forwardshift_backwardzoThe nonexistent argument must be one of 'raise', 'NaT', 'shift_forward', 'shift_backward' or a timedelta objectc                   > [        U S5      (       d:  [        U 5      S:  a  TR                  T5      n[        U S35      e[	        / US9n U $ U R                  XUS9n U $ )Ntz_localizer   r  r  )	ambiguousnonexistent)r  r   r%  r  r}   r  )r  rv  r  r  r  r   r   s        r   _tz_localize)NDFrame.tz_localize.<locals>._tz_localize-  sq    2}--r7Q;"11$7G#")#OP  #2"- I ^^B^UIr   r  Nr   r  r  r   Frl  r  r~  )r   dt	timedeltar   r!  r  r   r  r  r  r8  r   r   rp  r   )r   rv  r   r:  r   r  r  nonexistent_optionsr  r  r  r  s   ` `         r   r  NDFrame.tz_localize-  s    t R1*;
 ;
 %  $$T*^^D!
	 b*%%((/E$RYYu%5rkRIy6BT1bgg.. :eWM!BCCbi=B B-@-B)BC$U;""4">>r   c                6    [        U UUUS9R                  U SS9$ )ac  
Generate descriptive statistics.

Descriptive statistics include those that summarize the central
tendency, dispersion and shape of a
dataset's distribution, excluding ``NaN`` values.

Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.

Parameters
----------
percentiles : list-like of numbers, optional
    The percentiles to include in the output. All should
    fall between 0 and 1. The default is
    ``[.25, .5, .75]``, which returns the 25th, 50th, and
    75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
    A white list of data types to include in the result. Ignored
    for ``Series``. Here are the options:

    - 'all' : All columns of the input will be included in the output.
    - A list-like of dtypes : Limits the results to the
      provided data types.
      To limit the result to numeric types submit
      ``numpy.number``. To limit it instead to object columns submit
      the ``numpy.object`` data type. Strings
      can also be used in the style of
      ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
      select pandas categorical columns, use ``'category'``
    - None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
    A black list of data types to omit from the result. Ignored
    for ``Series``. Here are the options:

    - A list-like of dtypes : Excludes the provided data types
      from the result. To exclude numeric types submit
      ``numpy.number``. To exclude object columns submit the data
      type ``numpy.object``. Strings can also be used in the style of
      ``select_dtypes`` (e.g. ``df.describe(exclude=['O'])``). To
      exclude pandas categorical columns, use ``'category'``
    - None (default) : The result will exclude nothing.

Returns
-------
Series or DataFrame
    Summary statistics of the Series or Dataframe provided.

See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
    columns based on their dtype.

Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.

For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.

If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.

For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.

The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.

Examples
--------
Describing a numeric ``Series``.

>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count    3.0
mean     2.0
std      1.0
min      1.0
25%      1.5
50%      2.0
75%      2.5
max      3.0
dtype: float64

Describing a categorical ``Series``.

>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count     4
unique    3
top       a
freq      2
dtype: object

Describing a timestamp ``Series``.

>>> s = pd.Series([
...     np.datetime64("2000-01-01"),
...     np.datetime64("2010-01-01"),
...     np.datetime64("2010-01-01")
... ])
>>> s.describe()
count                      3
mean     2006-09-01 08:00:00
min      2000-01-01 00:00:00
25%      2004-12-31 12:00:00
50%      2010-01-01 00:00:00
75%      2010-01-01 00:00:00
max      2010-01-01 00:00:00
dtype: object

Describing a ``DataFrame``. By default only numeric fields
are returned.

>>> df = pd.DataFrame({'categorical': pd.Categorical(['d', 'e', 'f']),
...                    'numeric': [1, 2, 3],
...                    'object': ['a', 'b', 'c']
...                    })
>>> df.describe()
       numeric
count      3.0
mean       2.0
std        1.0
min        1.0
25%        1.5
50%        2.0
75%        2.5
max        3.0

Describing all columns of a ``DataFrame`` regardless of data type.

>>> df.describe(include='all')  # doctest: +SKIP
       categorical  numeric object
count            3      3.0      3
unique           3      NaN      3
top              f      NaN      a
freq             1      NaN      1
mean           NaN      2.0    NaN
std            NaN      1.0    NaN
min            NaN      1.0    NaN
25%            NaN      1.5    NaN
50%            NaN      2.0    NaN
75%            NaN      2.5    NaN
max            NaN      3.0    NaN

Describing a column from a ``DataFrame`` by accessing it as
an attribute.

>>> df.numeric.describe()
count    3.0
mean     2.0
std      1.0
min      1.0
25%      1.5
50%      2.0
75%      2.5
max      3.0
Name: numeric, dtype: float64

Including only numeric columns in a ``DataFrame`` description.

>>> df.describe(include=[np.number])
       numeric
count      3.0
mean       2.0
std        1.0
min        1.0
25%        1.5
50%        2.0
75%        2.5
max        3.0

Including only string columns in a ``DataFrame`` description.

>>> df.describe(include=[object])  # doctest: +SKIP
       object
count       3
unique      3
top         a
freq        1

Including only categorical columns from a ``DataFrame`` description.

>>> df.describe(include=['category'])
       categorical
count            3
unique           3
top              d
freq             1

Excluding numeric columns from a ``DataFrame`` description.

>>> df.describe(exclude=[np.number])  # doctest: +SKIP
       categorical object
count            3      3
unique           3      3
top              f      a
freq             1      1

Excluding object columns from a ``DataFrame`` description.

>>> df.describe(exclude=[object])  # doctest: +SKIP
       categorical  numeric
count            3      3.0
unique           3      NaN
top              f      NaN
freq             1      NaN
mean           NaN      2.0
std            NaN      1.0
min            NaN      1.0
25%            NaN      1.5
50%            NaN      2.0
75%            NaN      2.5
max            NaN      3.0
)r   r   excludepercentilesdescriber~  )r   r   )r   r  r   r  s       r   r  NDFrame.describe-  s.    d  #	

 ,tJ,
/	0r   c                   U[         R                  S4;  d  U[         R                  La9  [        R                  " S[	        U 5      R
                   S3[        [        5       S9  U[         R                  L a  U[         R                  L a  U R                  S:X  a  U R                  5       OSU 4/nU H  u  px[        U5      S:  d  M  UR                  5       R                  n	U	[        R                  " U	) 5      S n	U	R                  5       (       d  Ma  [        R                  " S[	        U 5      R
                   S3[        [        5       S9    O   S	nU[         R                  L a  SnU R!                  UR#                  S
S5      5      n
Uc  U nOU R%                  X*US9nUR&                  " SXU
S.UD6nX-  S-
  nUb9  UR(                  UR*                  R-                  5       )    nUR/                  U5      nUR1                  U SS9$ )a  
Fractional change between the current and a prior element.

Computes the fractional change from the immediately previous row by
default. This is useful in comparing the fraction of change in a time
series of elements.

.. note::

    Despite the name of this method, it calculates fractional change
    (also known as per unit change or relative change) and not
    percentage change. If you need the percentage change, multiply
    these values by 100.

Parameters
----------
periods : int, default 1
    Periods to shift for forming percent change.
fill_method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad'
    How to handle NAs **before** computing percent changes.

    .. deprecated:: 2.1
        All options of `fill_method` are deprecated except `fill_method=None`.

limit : int, default None
    The number of consecutive NAs to fill before stopping.

    .. deprecated:: 2.1

freq : DateOffset, timedelta, or str, optional
    Increment to use from time series API (e.g. 'ME' or BDay()).
**kwargs
    Additional keyword arguments are passed into
    `DataFrame.shift` or `Series.shift`.

Returns
-------
Series or DataFrame
    The same type as the calling object.

See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.

Examples
--------
**Series**

>>> s = pd.Series([90, 91, 85])
>>> s
0    90
1    91
2    85
dtype: int64

>>> s.pct_change()
0         NaN
1    0.011111
2   -0.065934
dtype: float64

>>> s.pct_change(periods=2)
0         NaN
1         NaN
2   -0.055556
dtype: float64

See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.

>>> s = pd.Series([90, 91, None, 85])
>>> s
0    90.0
1    91.0
2     NaN
3    85.0
dtype: float64

>>> s.ffill().pct_change()
0         NaN
1    0.011111
2    0.000000
3   -0.065934
dtype: float64

**DataFrame**

Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.

>>> df = pd.DataFrame({
...     'FR': [4.0405, 4.0963, 4.3149],
...     'GR': [1.7246, 1.7482, 1.8519],
...     'IT': [804.74, 810.01, 860.13]},
...     index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
                FR      GR      IT
1980-01-01  4.0405  1.7246  804.74
1980-02-01  4.0963  1.7482  810.01
1980-03-01  4.3149  1.8519  860.13

>>> df.pct_change()
                  FR        GR        IT
1980-01-01       NaN       NaN       NaN
1980-02-01  0.013810  0.013684  0.006549
1980-03-01  0.053365  0.059318  0.061876

Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.

>>> df = pd.DataFrame({
...     '2016': [1769950, 30586265],
...     '2015': [1500923, 40912316],
...     '2014': [1371819, 41403351]},
...     index=['GOOG', 'APPL'])
>>> df
          2016      2015      2014
GOOG   1769950   1500923   1371819
APPL  30586265  40912316  41403351

>>> df.pct_change(axis='columns', periods=-1)
          2016      2015  2014
GOOG  0.179241  0.094112   NaN
APPL -0.252395 -0.011860   NaN
NzDThe 'fill_method' keyword being not None and the 'limit' keyword in z.pct_change are deprecated and will be removed in a future version. Either fill in any non-leading NA values prior to calling pct_change or specify 'fill_method=None' to not fill NA values.r  r+  r   z!The default fill_method='pad' in z.pct_change is deprecated and will be removed in a future version. Either fill in any non-leading NA values prior to calling pct_change or specify 'fill_method=None' to not fill NA values.r  r   r  r  )r  r  r   r   
pct_changer~  r  )r   r  r  r	  r
  r   r  rU   r,  r   r   ro   r   rh  argmaxr  r!  r  r  r  rf  r  
duplicatedr  r   )r   r  fill_methodr  r  r  r  rq  r  r  r   r   shiftedr  s                 r   r  NDFrame.pct_change.  s   T s~~t44S^^8SMMV:&&' (
 +- #..(&'+yyA~tzz|T4L>"FA3x!|"xxz00#BIIte$4$6788::$MM C#':#6#6"7 8W!W
 !.+;+= " #  KCNN"E$$VZZ%@AD((u(MD**MWdMfM^a ,,../B&BtL99r   c           	        [         R                  " SXaS9  [        USSS9  U R                  S:  a.  Uc+  U R                  " X4SXES.UD6nUR                  " X4SU0UD6$ Uc  SnU R                  S:  a  US:X  a~  [        U R                  R                  5      S:  a[  [        S	 U R                  R                   5       5      (       a0  U(       d)  U nU(       a  U R                  5       nUR                  XUS
9$ U R                  UUUUUSS9$ )Nr  fnameskipnaFnone_allowedr   r   )r   	bool_onlyr  c              3  >   #    U  H  oR                   S :H  v   M     g7f)r+  N)r,  r$  s     r   r^  (NDFrame._logical_func.<locals>.<genexpr>/  s     :)9AFFaK)9s   r  r  )r8  r   r  r[  filter_type)rM  validate_logical_funcrX   r,  _logical_funcr   r   ri  r  rY  _reduce_axis1_reduce)	r   r8  r  r   r  r  r  r  r   s	            r   r  NDFrame._logical_func/  s'    	  V8FH5A99q=T\$$!"iJPC $$#)-3  \D IIM	DII$$%):)9)9::: C))+$$T$??||"  
 	
r   c                J    U R                   " S[        R                  XU40 UD6$ )Nr  )r  rv   nananyr   r   r  r  r  s        r   r  NDFrame.any/  ,     !!6==$6
=C
 	
r   c                J    U R                   " S[        R                  XU40 UD6$ )Nr  )r  rv   nanallr  s        r   r  NDFrame.all/  r  r   c                h  ^^^^ [         R                  " TUTT5      mUc  SnOU R                  U5      nUS:X  a.  U R                  R                  " TT/UQ7STS.TD6R                  $ UUUU4S jnU R
                  R                  U5      nU R                  XR                  S9R                  U TS9$ )Nr   r   )r   r  c                
  > [        U S5      (       a  U R                  OU n[        U[        5      (       a  UR                  " T4ST0TD6nO[
        R                  " UTTS9n[        US5      (       a  UR                  nU$ UnU$ )Nr=   r  r  )r  r=   r   ry   _accumulaterv   na_accum_func)
blk_valuesr   r  r  r  r8  r  s      r   block_accum_func-NDFrame._accum_func.<locals>.block_accum_func/  s~    %,Z%=%=Z\\:F &.11++DJJ6J--fd6J!(!5!5VXXFM <BFMr   r   r~  )
rM  validate_cum_func_with_skipnar!  r=   _accum_funcr   r  r   r   r   )	r   r8  r  r   r  r  r  r  r  s	    `` ` `  r   r
  NDFrame._accum_func/  s     11&$M<D((.D1966%%d48!"6<Ba
	 
	 !12))&{{)CPP Q 
 	
r   c                b    U R                   " S[        R                  R                  X/UQ70 UD6$ )Ncummax)r
  rh  maximum
accumulater   r   r  r  r  s        r   r  NDFrame.cummax/  6    bjj++T
<@
DJ
 	
r   c                b    U R                   " S[        R                  R                  X/UQ70 UD6$ )Ncummin)r
  rh  minimumr  r  s        r   r  NDFrame.cummin/  r  r   c                N    U R                   " S[        R                  X/UQ70 UD6$ )Ncumsum)r
  rh  r  r  s        r   r  NDFrame.cumsum 0  s%    "))TSDSFSSr   c                N    U R                   " S[        R                  X/UQ70 UD6$ )Ncumprod)r
  rh  r  r  s        r   r  NDFrame.cumprod0  s%    	2::tUdUfUUr   c           	     2   [         R                  " SXqS9  [        USSS9  UcO  U R                  S:  a<  [        R
                  " S[        U 5      R                   SU S	3[        [        5       S
9  SnOU[        R                  L a  SnU R                  X!X6XES9$ )Nr  r  r  Fr  r   The behavior of . with axis=None is deprecated, in a future version this will reduce over both axes and return a scalar. To retain the old behavior, pass axis=0 (or do not pass axis)r  r   )r   r[  r  ddof)rM  validate_stat_ddof_funcrX   r,  r  r	  r
  r   r  rU   r   r  r  )r   r8  r  r   r  r!  r[  r  s           r   _stat_function_ddofNDFrame._stat_function_ddof0  s     	""2v:FH5A<yy1}&tDz':':&;1TF C3 3 "/1 DS^^#D||TV  
 	
r   c                J    U R                   " S[        R                  XX440 UD6$ )Nsem)r#  rv   nansemr   r   r  r!  r[  r  s         r   r&  NDFrame.sem&0  ,     ''6==$
FL
 	
r   c                J    U R                   " S[        R                  XX440 UD6$ )Nvar)r#  rv   nanvarr(  s         r   r,  NDFrame.var20  r*  r   c                J    U R                   " S[        R                  XX440 UD6$ )Nstd)r#  rv   nanstdr(  s         r   r0  NDFrame.std>0  r*  r   c                    US;   d   U5       e[         R                  " USU5        [        USSS9  U R                  X!X4US9$ )N)medianmeanr  r  kurtskewr  r  Fr  )r8  r   r  r[  )rM  validate_funcrX   r  )r   r8  r  r   r  r[  r  s          r   _stat_functionNDFrame._stat_functionJ0  sS     GGMMG
r6*FH5A||$L  
 	
r   c                L    U R                   " S[        R                  UUU40 UD6$ )Nr  )r9  rv   nanminr   r   r  r[  r  s        r   r  NDFrame.min]0  4     ""MM
 
 	
r   c                L    U R                   " S[        R                  UUU40 UD6$ )Nr  )r9  rv   nanmaxr=  s        r   r  NDFrame.maxm0  r?  r   c                J    U R                   " S[        R                  XU40 UD6$ )Nr5  )r9  rv   nanmeanr=  s        r   r5  NDFrame.mean}0  ,     ""FNND,
BH
 	
r   c                J    U R                   " S[        R                  XU40 UD6$ )Nr4  )r9  rv   	nanmedianr=  s        r   r4  NDFrame.median0  s.     ""f&&l
FL
 	
r   c                J    U R                   " S[        R                  XU40 UD6$ )Nr7  )r9  rv   nanskewr=  s        r   r7  NDFrame.skew0  rF  r   c                J    U R                   " S[        R                  XU40 UD6$ )Nr6  )r9  rv   nankurtr=  s        r   r6  NDFrame.kurt0  rF  r   c           	     X   US;   d   U5       e[         R                  " USU5        [        USSS9  UcO  U R                  S:  a<  [        R
                  " S[        U 5      R                   SU S	3[        [        5       S
9  SnOU[        R                  L a  SnU R                  UUUUUUS9$ )N)sumri  r  r  Fr  r   r  r  r   r  r   )r8  r   r  r[  	min_count)rM  r8  rX   r,  r  r	  r
  r   r  rU   r   r  r  )r   r8  r  r   r  r[  rR  r  s           r   _min_count_stat_function NDFrame._min_count_stat_function0  s     &,,&
r6*FH5A<yy1}&tDz':':&;1TF C3 3 "/1 DS^^#D||%  
 	
r   c                J    U R                   " S[        R                  XX440 UD6$ )NrQ  )rS  rv   nansumr   r   r  r[  rR  r  s         r   rQ  NDFrame.sum0  s,     ,,6==$
KQ
 	
r   c                N    U R                   " S[        R                  UUUU40 UD6$ )Nri  )rS  rv   nanprodrW  s         r   ri  NDFrame.prod0  s7     ,,NN
 
 	
r   singlec
                   U[         R                  La  U R                  U5      nSn
US:X  a@  [        R                  " S[        U 5      R                   SU
 SU
 S3[        [        5       S9  O?[        R                  " S[        U 5      R                   SU
 S	3[        [        5       S9  OS
nUb  [        U UUUUUUUUU	S9
$ [        U UUUUUUUUU	S9
$ )Nrollingr   Support for axis=1 in r  B is deprecated and will be removed in a future version. Use obj.T.(...) insteadr  r8  i is deprecated and will be removed in a future version. Call the method without the axis keyword instead.r   )	windowmin_periodscenterwin_typer?  r   r9  rS  r  )r   r  r!  r  r	  r
  r   r  rU   r   r   )r   rc  rd  re  rf  r?  r   r9  rS  r  r8  s              r   r^  NDFrame.rolling0  s    s~~%((.DDqy,T$Z-@-@,A4& I!!%m5 "/1 ,T$Z-@-@,A4& IH H "/1 D'!  #
 	
r   c           	     l   U[         R                  La  U R                  U5      nSnUS:X  a@  [        R                  " S[        U 5      R                   SU SU S3[        [        5       S9  O?[        R                  " S[        U 5      R                   SU S	3[        [        5       S9  OS
n[        XX#S9$ )N	expandingr   r_  r  r`  ra  r  r8  rb  r   )rd  r   r  )
r   r  r!  r  r	  r
  r   r  rU   r   )r   rd  r   r  r8  s        r   ri  NDFrame.expanding11  s     s~~%((.DDqy,T$Z-@-@,A4& I!!%m5 "/1 ,T$Z-@-@,A4& IH H "/1 DTQQr   c                ~   U[         R                  La  U R                  U5      nSnUS:X  a@  [        R                  " S[        U 5      R                   SU SU S3[        [        5       S9  O?[        R                  " S[        U 5      R                   SU S	3[        [        5       S9  OS
n[        U UUUUUUUUU	U
S9$ )Newmr   r_  r  r`  ra  r  r8  rb  r   )
comspanhalflifealphard  adjust	ignore_nar   timesr  )
r   r  r!  r  r	  r
  r   r  rU   r   )r   rm  rn  ro  rp  rd  rq  rr  r   rs  r  r8  s               r   rl  NDFrame.ewmP1  s     s~~%((.DDqy,T$Z-@-@,A4& I!!%m5 "/1 ,T$Z-@-@,A4& IH H "/1 D&#
 	
r   c                   Sn[         (       d2  [        5       (       a#  [        R                  " U 5      [        S-   ::  a  SnU" X5      nU R
                  S:X  a  UR                  U 5      (       ao  UR                  U R                  :X  aU  [        5       (       dF  [        5       (       a  U(       a0  U R                  R                  [        S5      UR                  US9  U $ U R                  5         U R                  UR                  U SS9SS9  U $ )	z,
Wrap arithmetic method to operate inplace.
Tr+  Fr   N)r	  r\  )rC  )rG   r   r  r  rH   r,  r  r   r   r   setitem_inplacer  r  r<  r  r  )r   r  opr	  r  s        r   _inplace_methodNDFrame._inplace_method1  s    
 t*,,t$	A5D IIN$$T**

*'))'))$
 II%%dV^^$ &  K 	 	51% 	 	
 r   c                L    U R                  U[        U 5      R                  5      $ r   )rx  r
  __add__r  s     r   __iadd__NDFrame.__iadd__1  !     ##E4:+=+=>>r   c                L    U R                  U[        U 5      R                  5      $ r   )rx  r
  __sub__r  s     r   __isub__NDFrame.__isub__1  r~  r   c                L    U R                  U[        U 5      R                  5      $ r   )rx  r
  __mul__r  s     r   __imul__NDFrame.__imul__1  r~  r   c                L    U R                  U[        U 5      R                  5      $ r   )rx  r
  __truediv__r  s     r   __itruediv__NDFrame.__itruediv__1  s&     ##4:))
 	
r   c                L    U R                  U[        U 5      R                  5      $ r   )rx  r
  __floordiv__r  s     r   __ifloordiv__NDFrame.__ifloordiv__1  s&     ##4:**
 	
r   c                L    U R                  U[        U 5      R                  5      $ r   )rx  r
  __mod__r  s     r   __imod__NDFrame.__imod__1  r~  r   c                L    U R                  U[        U 5      R                  5      $ r   )rx  r
  __pow__r  s     r   __ipow__NDFrame.__ipow__1  r~  r   c                L    U R                  U[        U 5      R                  5      $ r   )rx  r
  __and__r  s     r   __iand__NDFrame.__iand__1  r~  r   c                L    U R                  U[        U 5      R                  5      $ r   )rx  r
  __or__r  s     r   __ior__NDFrame.__ior__1  s    ##E4:+<+<==r   c                L    U R                  U[        U 5      R                  5      $ r   )rx  r
  __xor__r  s     r   __ixor__NDFrame.__ixor__1  r~  r   c               n    U R                  5       R                  n[        XS9nUc  gU R                  U   $ )z
Retrieves the index of the first valid value.

Parameters
----------
how : {'first', 'last'}
    Use this parameter to change between the first or last valid index.

Returns
-------
idx_first_valid : type of index
)r   is_validN)rp   r   r   r  )r   r   r  idxposs       r   _find_valid_indexNDFrame._find_valid_index1  s6     ::<&&!c=>zz&!!r   rH  )r  r   c                     U R                  SS9$ )a  
Return index for {position} non-NA value or None, if no non-NA value is found.

Returns
-------
type of index

Examples
--------
For Series:

>>> s = pd.Series([None, 3, 4])
>>> s.first_valid_index()
1
>>> s.last_valid_index()
2

>>> s = pd.Series([None, None])
>>> print(s.first_valid_index())
None
>>> print(s.last_valid_index())
None

If all elements in Series are NA/null, returns None.

>>> s = pd.Series()
>>> print(s.first_valid_index())
None
>>> print(s.last_valid_index())
None

If Series is empty, returns None.

For DataFrame:

>>> df = pd.DataFrame({{'A': [None, None, 2], 'B': [None, 3, 4]}})
>>> df
     A      B
0  NaN    NaN
1  NaN    3.0
2  2.0    4.0
>>> df.first_valid_index()
1
>>> df.last_valid_index()
2

>>> df = pd.DataFrame({{'A': [None, None, None], 'B': [None, None, None]}})
>>> df
     A      B
0  None   None
1  None   None
2  None   None
>>> print(df.first_valid_index())
None
>>> print(df.last_valid_index())
None

If all elements in DataFrame are NA/null, returns None.

>>> df = pd.DataFrame()
>>> df
Empty DataFrame
Columns: []
Index: []
>>> print(df.first_valid_index())
None
>>> print(df.last_valid_index())
None

If DataFrame is empty, returns None.
rH  r   r  r   s    r   first_valid_indexNDFrame.first_valid_index1  s    T %%'%22r   c                     U R                  SS9$ )Nr  r  r  r   s    r   last_valid_indexNDFrame.last_valid_indexD2  s     %%&%11r   )r   r   r   r   r   )r   r0   r  rC  )NF)
r   r0   r   z.dict[Literal['index', 'columns'], Axes | None]r   DtypeObj | Noner   r  r  r0   )T)r   r   r   r  r  r8   )r   r0   r   list[Index]r  r8   )r  r   )r   zMapping[Hashable, Any]r  rC  )r  r|   )r   r  r   bool_t | Noner  r8   )r  r  )r  zCallable[..., Self]r   )r   zSequence[Axis] | None)r   r   r  r   )r   r   r  r  )r   r   r  r~   )r   r   r  zdict[str, Series | MultiIndex])r  z#dict[Hashable, Series | MultiIndex])r  zdict[Hashable, Series])r  r~   )r  ztuple[int, ...])r  r  )r  r  )r   r   r   r  r  r8   )r   r   r   r  r   r  )r   r   ro  zAnyArrayLike | listr  rC  )r  r   r  r   r   r  r  r8   )r   )r:  r+   r   r   r  r8   )r  r   r  zSeries | Any)r   Axis | None)r  Renamer | Noner  r  r(  r  r   r  r   r  r   r  r:  Level | Noner  r   r  Self | None).)
r  IndexLabel | lib.NoDefaultr   r   r   r  r   Literal[False]r  r8   )
r  r  r   r   r   r  r   Literal[True]r  rC  )
r  r  r   r   r   r  r   r  r  r  )r   FTr  )r  r   r  r  )r  r8   )r  r   )r  r  r  r8   )r9  r/   r   r   r  r  )r9  r/   r   r   r  r  )r9  r/   r   r   r  rC  )r9  r/   r   r   r  r   )r   r   )r  r   r~  )r   znpt.DTypeLike | Noner   r  r  z
np.ndarray)r^  znp.ufuncr  r   r_  r   r  r   )r  zdict[str, Any]rB  )r  r   )Sheet1r,  NNTTNr   r   NTr  NNN)"r  z)FilePath | WriteExcelBuffer | ExcelWriterr  r   r  r   r  
str | Noner(  Sequence[Hashable] | Noner  zSequence[Hashable] | bool_tr  r  r  IndexLabel | Noner  r  r  r  r  z(Literal['openpyxl', 'xlsxwriter'] | Noner  r  r  r   r  ztuple[int, int] | Noner  StorageOptions | Noner  zdict[str, Any] | Noner  rC  )NNN
   TmsNFr  NNNw)r  7FilePath | WriteBuffer[bytes] | WriteBuffer[str] | Noner  zILiteral['split', 'records', 'index', 'table', 'columns', 'values'] | Noner  r  r  r  r  r  r  rB   r  z(Callable[[Any], JSONSerializable] | Noner  r  r  r    r  r  r  
int | Noner  r  r  zLiteral['a', 'w']r  r  )r   NNFNTNNNNstrictzUTF-8)r  zFilePath | HDFStorer9  r   r  zLiteral['a', 'w', 'r+']r  r  r  z/Literal['zlib', 'lzo', 'bzip2', 'blosc'] | Noner  r  r  z Literal['fixed', 'table'] | Noner  r  r  zint | dict[str, int] | Noner  r  r  z Literal[True] | list[str] | Noner  r3   r  r   r  rC  )NfailTNNNN)r8  r   r  r  r  z$Literal['fail', 'replace', 'append']r  r  r  r  r  r  r   zDtypeArg | Noner  z"Literal['multi'] | Callable | Noner  r  )
r  zFilePath | WriteBuffer[bytes]r  r    r  r  r  r  r  rC  )TN)r  r  r  r  r  rC  ).....................),r  rC  r(  r  r  bool_t | SequenceNotStr[str]r  r  r  r   r  FormattersType | Noner  FloatFormatType | Noner  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r   r  r  r  r  r  r  r  str | tuple[str, str] | Noner  r  r  r  r  r   )....................),r  zFilePath | WriteBuffer[str]r(  r  r  r  r  r  r  r   r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r   r  r  r  r  r  r  r  r  r  r  r  r  r  rC  )NNTTNaNNNNTFNNNNr  NNNNNN),r  z"FilePath | WriteBuffer[str] | Noner(  r  r  r  r  r  r  r   r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r   r  r  r  r  r  r  r  r  r  r  r  r  r  r  )
r  dict | list[dict] | Noner  r  r  r  r  r  r  dict | None),r  rC  r  r   r  r   r  str | Callable | Noner(  r  r  bool_t | list[str]r  r  r  r  r  r   r  r  r  r    r1  r  r2  r   r3  r  r  r  r  r  r4  r  r5  r  r  r   r  r3   r  r;   r  r   ),r  z0FilePath | WriteBuffer[bytes] | WriteBuffer[str]r  r   r  r   r  r  r(  r  r  r  r  r  r  r  r  r   r  r  r  r    r1  r  r2  r   r3  r  r  r  r  r  r4  r  r5  r  r  r   r  r3   r  r;   r  rC  )Nro  r,  NNTTNr  Nr  Nr  NNNTNr  r  N),r  r  r  r   r  r   r  r  r(  r  r  r  r  r  r  r  r  r   r  r  r  r    r1  r  r2  r   r3  r  r  r  r  r  r4  r  r5  r  r  r   r  r3   r  r  r  r  )FTF)rB  r  rC  r  r   r  r  rC  )r   r   r  r8   )r   NT)
r9  r+   r   r   r:  r  r[  r  r  r8   )r9  r  r  r8   )rs  r  r   r   r  r8   )r|  r   r   r  r  rC  )settingF)r  r   r  r  )r   r  )NNNN)r  z>Literal['backfill', 'bfill', 'pad', 'ffill', 'nearest'] | Noner   r  r  r  r  r8   )ro  r+   r   r   r  r+   r(  r+   r:  r  r   r  r  r)   r  rC  )ro  r+   r   r   r  r+   r(  r+   r:  r  r   r  r  r)   r  r8   )ro  r+   r   r   r  r+   r(  r+   r:  r  r   r  r  r)   r  r  )ro  r  r   r   r  r  r(  r  r:  r  r   r  r  r)   r  r  )Nr  F)r  r)   r  r  r  r8   )rC  r  r  rC  )r6  r   r   r  r  r8   )r  r   r   r  r  r8   )r   r   r  bool_t | Sequence[bool_t]r   r  r   r:   r  r1   r  r  r9  rC   r  r8   )r   r   r  r  r   r  r   r:   r  r1   r  r  r9  rC   r  rC  )r   r   r  r  r   r  r   r:   r  r1   r  r  r9  rC   r  r  )r   r   r  r  r   r  r   r:   r  r1   r  r  r9  zValueKeyFunc | Noner  r  )r   r   r:  r+   r  r  r   r  r   r:   r  r1   r  r  r  r  r9  r*   r  rC  )r   r   r:  r+   r  r  r   r  r   r:   r  r1   r  r  r  r  r9  r*   r  r8   )r   r   r:  r+   r  r  r   r  r   r:   r  r1   r  r  r  r  r9  r*   r  r  )r   r   r:  r  r  r  r   r  r   r:   r  r1   r  r  r  r  r9  zIndexKeyFunc | Noner  r  )r   r  r  zReindexMethod | Noner   r  r:  r  r  Scalar | Noner  r  r  r8   )
r:  r  r  r  r  r  r   r  r  r8   )r:  r  r  r  )NFF)r   r  r  r  r  r8   )r  r  r  r  r   r  r  r8   )   )r  r  r  r8   )NNFNNNF)r  r  r  float | Noner  r  r  zRandomState | Noner   r  r  r  r  r8   )r  z/Callable[..., T] | tuple[Callable[..., T], str]r  r=   )r  r  r  r8   )r8  r   )r8  r   r  rC  )r  r   )r  r   )Nr  )r   r  r  r)   r  r8   )r   r  r  r8   )r   r  r  r8   )r   r  r  r8   )TTTTTnumpy_nullable)r~  r  r  r  r  r  r  r  r  r  r  r"   r  r8   )r  r   )r  z,Literal['ffill', 'bfill', 'pad', 'backfill']r   None | Axisr   r  r  
None | intr  #Literal['inside', 'outside'] | Noner  r  )r   'Hashable | Mapping | Series | DataFramer  FillnaOptions | Noner   r  r   r  r  r  r  r  r  r8   )r   r  r  r  r   r  r   r  r  r  r  r  r  rC  )r   r  r  r  r   r  r   r  r  r  r  r  r  r  )r   z.Hashable | Mapping | Series | DataFrame | Noner  r  r   r  r   r  r  r  r  dict | None | lib.NoDefaultr  r  )r   r  r   r  r  r  r  r  r  r  r  r8   )r   r  r   r  r  r  r  r  r  r  r  rC  )r   r  r   r  r  r  r  r  r  r  r  r  )
r   r  r   r  r  r  r  r  r  r  )
r   r  r   r  r  r  r  r  r  rC  )..)
r   r  r  r  r  r  r  0Literal['pad', 'ffill', 'bfill'] | lib.NoDefaultr  r8   )
r   r  r  r  r  r  r  r  r  rC  )
r   r  r  r  r  r  r  r  r  r  )r  r,   r   r   r  r  r   r  r  -Literal['forward', 'backward', 'both'] | Noner  r  r  'Literal['infer'] | None | lib.NoDefaultr  r8   )r  r,   r   r   r  r  r   r  r  r  r  r  r  r  r  rC  )r  r,   r   r   r  r  r   r  r  r  r  r  r  r  r  r  )r  )F)r   r  r   r  r  r8   )r   r  r   r  r  rC  )r   r  r   r  r  r  )NNFN)r  r(   r  r  r   zLiteral['start', 'end'] | Noner!  r  r  Hashable | Noner  r8   )FN)r  r  r   r  r  r8   )bothN)r0  r-   r   r  r  r8   )r   Axis | lib.NoDefaultr9  Literal['right', 'left'] | Noner  r  r:  z1Literal['start', 'end', 's', 'e'] | lib.NoDefaultr   z5Literal['timestamp', 'period'] | None | lib.NoDefaultr?  r  r:  r  r;  zstr | TimestampConvertibleTypesr<  z TimedeltaConvertibleTypes | Noner=  r  r  r   )r   averageFrO  TF)r   r   r  z2Literal['average', 'min', 'max', 'first', 'dense']r[  r  rR  z Literal['keep', 'top', 'bottom']r  r  rS  r  r  r8   )r   FFr  )rd  r   re  r  rf  r  rg  r<   )r  r2   rs  r   r   r  r:  r  r   r  r  r  r  $FillnaOptions | None | lib.NoDefaultr  int | None | lib.NoDefaultrr  r  rw  Axis | None | lib.NoDefaultr  ztuple[Self, NDFrameT])rp  NNNNNNr   )r  r   rs  r   r   r  r   r  r  r  rr  r   r  z$tuple[Self, DataFrame, Index | None])r  r   rs  r   r   r  r   r  r  r  rr  r   r  z!tuple[Self, Series, Index | None])r   r  r   r  r	  r  )r   r  r   r  r:  r/   r  r8   )r   r  r   r  r:  r/   r  rC  )r   r  r   r  r:  r/   r  r  )r   r  r   r  r:  r  r  r  )
r  zint | Sequence[int]r   r   r  r   r  r  r  zSelf | DataFrame)r  r  r   r  r  r8   )r   r  r   r  r  r8   )r   NN)r   NNr  r  )
r   r   r   r  r  r>   r  r@   r  r8   r  )r  r  r  r  r  r  r  r8   )
r8  r   r   r  r  r  r  r  r  Series | bool_t)r   r  r  r  r  r  r  r  )r   r   r  r  r  r  r  r  )NT)r8  r   r   r  r  r  )r   r  r  r  )r8  r   r   r  r  r  r!  r  r[  r  r  Series | float)r   Tr   F)
r   r  r  r  r!  r  r[  r  r  r  )r   TF)r8  r   r   r  r  r  r[  r  )r   r  r  r  r[  r  )r   r  r  r  r[  r  r  r  )
r8  r   r   r  r  r  r[  r  rR  r  )r   TFr   )r   r  r  r  r[  r  rR  r  )rc  z3int | dt.timedelta | str | BaseOffset | BaseIndexerrd  r  re  r  rf  r  r?  r  r   r  r9  zIntervalClosedType | NonerS  r  r  r   r  zWindow | Rolling)rd  r  r   r  r  Literal['single', 'table']r  r   )rm  r  rn  r  ro  z(float | TimedeltaConvertibleTypes | Nonerp  r  rd  r  rq  r  rr  r  r   r  rs  z&np.ndarray | DataFrame | Series | Noner  r  r  r   )r   r   r  r  )r  r  )r   
__module____qualname____firstlineno____doc__r   __annotations__rh  r   r   	frozensetr   r   r   r   r   classmethodr   r   r   propertyr   setterr   r   r   r  r  r  r  r!  r%  r  r   r>  rG  rO  rR  rW  ra  r   r,  rj  rp  rn  rx  r}  rT   _shared_doc_kwargsr  r  r  r  r   r  r   r  r  r  r  r  r  r  r  __bool__r  r  r  r  r  r	  r  r)  r0  r<  rA  r7  r   rJ  rM  rR  rT  rZ  r`  re  rk  rv  r|  r  rS   r   r  r  r  r  pickleHIGHEST_PROTOCOLr  r  r  r{  r  r6  r<  rD  rw  rL  rX  r-  rl  rt  rr  rW  r  rA  r  r  r  re  r  r3  r  r  r  r  r  r  rh  r  r  r  r  r  r  r  r  r  rw   r  r   r+  r   r3  r;  rD  rJ  rO  rU  rY  r   r  rN  r   r   rw  r{  r~  r  r  r  r  r  r  r  r  r  r  r  ro   r  rp   r  r  r  r  r  r(  r3  r@  rH  r  rT  r]  r{  rt  ru  r  r  r  r  r  r  r  r  r  r  r  r  r  r
  r  r  r  r  r#  r&  r,  r0  r9  r  r  r5  r4  r7  r6  kurtosisrS  rQ  ri  productr   r^  r   ri  r   rl  rx  r|  r  r  r  r  r  r  r  r  r  r  r  r  __static_attributes____classcell__)r7  s   @r   r   r      s?   		"OY 	 %($885J $-bMM>1Iy<@H9@
M
I
V 
 "& = 	
  
  : Y Y,   , # #J \\" " %  %N  15	> > "/	>
 
> >@   " ( ( 
  
 434510M-M00N
  U  U -  - @ @
     < Y Y 
 
, 3  38 H H > >   , (  (8 ".N 	.N
 .N 
.N` 
 
+1
9F
 
 ! ! =0 =0~ !'*+=? , =?~ p pj  "&E> !%"& ""E>E> 	E>
  E> E> E> E> E> E> 
E> E>N  .1
 !"%
*
 
 
  
 

 
  .1
 !
*
 
 
 
 

 
  .1
 !
*
 
 
 
 

 
 .1^^y nn"y*y y y y 
yv SWAA-3ACPA AL 
 

 Q, Q,l 8 8" 8 8 ; ; 
 
 H
@ @D F
 F
P   K K 
 
< 
 
: 
 
0 )" )"V : :x I Ib %(2$ & & 9K 9K@ #" IM)8E	$ MM'*M58MDGM M 	
 	
 #6 #6P1   
M 
M #V^$<: 	$%67%, ##'-1.2)-;?"/315/3#[
?[
 [
 	[

 ![
 +[
 ,[
 [
 '[
 [
 [
 9[
 [
 [
 -[
  /![
" -#[
$ 
%[
 [
z #V]$;) 	$%67()>?-O PT"& """DH*1#!15"%Y
LY
Y

  Y
 Y
 Y
 Y
 BY
 Y
 (Y
 Y
 Y
 /Y
  Y
  
!Y
	 Y
v #V]$;( ), $CG3748 $9=!)T
(T
 T
 &	T

 T
 AT
 T
 1T
 T
 2T
 T
 7T
 T
 T
  
!T
 T
l #$;( ":@)- $!%59]
]
 	]

 8]
 ]
 ']
 ]
 ]
 3]
 
]
 ]
~ #VV$4; 	$%67()>?&H +2//15?
+?
 (?
 	?

 /?
 
?
	 ?
B #VH> 7;FFFF)3FF	FF FFP Q7 Q7f  -0/2,//2"%!$'#& #"%(),"%03"- + -	
   * -     " !    !" ##$ '%&  '( .)* +, -. 
/ 4  .1/2,//2"%!$'#& #"%(),"%03"-( + -	
   * -     " !    !" ##$ '%&  '( .)* +, -. 
/ 4 #VUO*
 37-1/3,0/3"&"!$(#' $#%))-"&04 #-Z
/Z
 +Z
 -	Z

 Z
 Z
 *Z
 -Z
  Z
 Z
 Z
 "Z
 !Z
 Z
 Z
  !Z
" ##Z
$ '%Z
&  'Z
( .)Z
* +Z
, -Z
. 
/Z
 Z
x  A9 *.26+/15%)A9 '	A9
 0A9 )A9 /A9 #A9 A9F   .1-0%(),"*-!%( #"%!!$!$*--  	
 , + #  '   (   #  !"  #$ %& '( )* +, (-. 
/ 4  .1-0%(),"*-!%( #"%!!$!$*--E  	
 , + #  '   (   #  !"  #$ %& '( )* +, (-. 
/ 4 #V]$;( 	$%67()>?-O PT.2-1%))-#*1"%) $"&"!%!)15-s
Ls
 s
 	s

 ,s
 +s
 #s
 s
 's
 s
 s
 (s
 s
 s
 #s
  !s
"  #s
$ %s
& 's
( )s
* +s
, /-s
. 
/s
	 s
p( !%	%% % 	%
 
%4( p
 p
d     #'!qq q !	q
 q 
q qf( " "$ $ - - HT HTT # #P   8 8t !  !  RV" y! Oy! 	y!
 y! 
y! y!v  ! !!! 	
      
   ! !!"%! 	
       
   ! !!! 	
      
  %)% #'%)"%%!% 	%
 !% #% % % % 
%N 
 %"T)
 T) T) 
T) T)l O O" G& G&R F& F&P  /2"%"%"  -	
        
   /2"%"  -	
       
   /2"%"  -	
       
   /3$"($#'V( V( -	V(
 V( V(  V( V( !V( 
V(p  /2"%!$"  	
 -        
   /2"%"%!$"  	
 -         
   /2"%!$"  	
 -        
 $ #'/3$"(!%$#'3B 3B !	3B
 -3B 3B 3B  3B 3B 3B !3B 
3Bj 	 ) I/  '+""$&FF I/ I/ %I/ I/ I/ "I/ I/ 
I/	I/V     	  "    
   D	
(  #",
 	,

 ,
 
,
 ,
`   jL jL 	jL
 jL 
jLX J JX N N`  !+/ $SS S 	S )S S S 
S Sj !'*+g8=g8
 
g8 , g8X - -^ 3 3  '6 '6R     % % 
 
 
)  
) Y Y Y Y ( ( ( ( W W< GND"(D"9DD"	D" D"L X
 X
t $ $ $ $ :> :>x  !%!%"&"&#'&6S?S? S?  	S?
  S? !S? $S? 
S? S?p  
 ! :> $'><'> 	'>
 '> '> 8'> '> '>R  :=
 (+"% #
6
 %	

 
  
 
 
 

 
  :=
 (+ #
6
 %	

 
 
 
 
 

 
  :=
 (+ #
6
 %	

 
 
 
 
 

 
  )*+<= AEk> (,  03k>=k> %	k>
 k> k> k> .k> 
k>	 
k>Z	   "%:=03	 	  		
 	 8	 .	 
	 	   :=03	 	 		
 	 8	 .	 
	 	   :=03	 	 		
 	 8	 .	 
	 	  )*+<= ! :>03u
 u
 	u

 u
 8u
 .u
 
u
	 
u
n !'*+ ! 03V V 	V
 V .V 
V , V@   "%:=03	 	  		
 	 8	 .	 
	 	   03  	
  . 
    :=03	 	 		
 	 8	 .	 
	 	  )*+<= ! :>03@
 @
 	@

 @
 8@
 .@
 
@
	 
@
D !'*+ ! 03V V 	V
 V .V 
V , V@  

 #&CF

  
 
 
 A
 

 
  
  CF

 
 
 
 A
 

 
  

 CF

 
 
 
 A
 

 
 Y )"9- nnF?
   CF>>F?
 F? F? F? AF? 
F? F?P  &) "%IL:=<?" 	
    G 8 : 
   &) IL:=<?" 	
   G 8 : 
   &) IL:=<?" 	
   G 8 : 
   &.{C  IM:><?NN{C"{C 	{C
 {C {C G{C 8{C :{C 
{C {C@
 f2 f2V 	!'*+<< ,<<| 	'01> 2> 	!'*+<> ,<>| 	(12@ 3@   . I I@  	
  "%	
 	  	 
	 	  	
  	
 	 	 
	 	  	
  	
 	 	 
	 	  
 !
   
 B !'*+ (,.2!&*z
z
 %z
 ,	z

 z
 $z
 
z
 , z
x 6; 6; 6;p 
 )/ R; &	R;
 R; 
R; R;h !'*+ &)^^2615HKFInn"2=37"P
 #P
 0	P

 /P
 FP
 DP
 P
 P
 0P
 1P
 P
 
P
 , P
d P Pd M! M!^  EN$6< ZZ CZ 	Z
 4Z Z Z 
Z Zx 	i	 (:7(CD ""!2L L 	L
 L L EL\  )*+<= " ""&*7:~~,/NN*-..69nn__ _ 	_
 _ _ $_ 5_ *_ (_ 4_ 
_	 
_B	  " " 5'5' 5' 	5' 5' 5' 5' 
.5' 5'n  " " M'M' M' 	M' M' M' M' 
+M' M'^  nn U- 	U-
 U- U- U-n  	
 #&	
  	 	 	 
	 	  	  	
 	 	 	 
	 	  	
 	
 	 	 	 
	 	  ) ffm>
   "m>
 m> m> m> 
m> m>^  	
 #&	
  	 	 	 
	 	  	  	
 	 	 	 
	 	  	
 	
 	 	 	 
	 	  ) nn0

   "0

 0
 0
 0
 
0
 0
d 	!'*+ ()"~~!W:$W: 	W:
 W: W: 
W: ,W:r !9 !9F   "j 	j
 j 
j jX !'*+DHY>Y>4AY>	Y> , Y>v !'*+ "#*'.~? ~?
 ~? !~? %~? 
~? , ~?F  	v0
 
v0 v0p  <?NN,/NNw:w: :w: *	w: 
w: w:r 
 !-
-
 	-

 -
 -
 
-
 -
b !		
	
 	
 		
 
	
 !		
	
 	
 		
 
	
 
 !$
$
 	$

 $
 $
L
 


 

T TV V 
 -0NN$

 *	

 
 
 
 

 
B $



 

 	


 

 


 $



 

 	


 

 


 $



 

 	


 

 


 
 $

 	

 
 
 
( $	

 
 	
$ $	

 
 	
$ $		
	
 	
 		
 
	
 $		
	
 	
 		
 
	
 $		
	
 	
 		
 
	
 $		
	
 	
 		
 
	
 H

 -0NN$$
$
 *	$

 $
 $
 $
 $
P $



 

 	


 

 $

 
 	

 
$ G
\ #'#%(^^,0;
C;
  ;
 	;

 ;
 ;
 #;
 *;
 ;
 ;
 
;
  ;
z ^ %(^^-5	RR #R +	R
 
R  R: 	 ! !!=A""#!%(^^8<-5/
/
 /
 ;	/

 /
  /
 /
 /
 #/
 6/
 +/
 
!/
 " /
h # #J ? ? ? ? ? ? 
 
 
 
 ? ? ? ? ? ? > > ? ? " "& '!3G!<=H3 > H3T 	V3Eg3NO2 P 2r   r   ac  
{desc}

Parameters
----------
axis : {axis_descr}
    Axis for the function to be applied on.
    For `Series` this parameter is unused and defaults to 0.

    For DataFrames, specifying ``axis=None`` will apply the aggregation
    across both axes.

    .. versionadded:: 2.0.0

skipna : bool, default True
    Exclude NA/null values when computing the result.
numeric_only : bool, default False
    Include only float, int, boolean columns. Not implemented for Series.

{min_count}**kwargs
    Additional keyword arguments to be passed to the function.

Returns
-------
{name1} or scalar{see_also}{examples}
a  
{desc}

Parameters
----------
axis : {axis_descr}
    Axis for the function to be applied on.
    For `Series` this parameter is unused and defaults to 0.

    .. warning::

        The behavior of DataFrame.{name} with ``axis=None`` is deprecated,
        in a future version this will reduce over both axes and return a scalar
        To retain the old behavior, pass axis=0 (or do not pass axis).

    .. versionadded:: 2.0.0

skipna : bool, default True
    Exclude NA/null values when computing the result.
numeric_only : bool, default False
    Include only float, int, boolean columns. Not implemented for Series.

{min_count}**kwargs
    Additional keyword arguments to be passed to the function.

Returns
-------
{name1} or scalar{see_also}{examples}
a!  
{desc}

Parameters
----------
axis : {axis_descr}
    For `Series` this parameter is unused and defaults to 0.

    .. warning::

        The behavior of DataFrame.{name} with ``axis=None`` is deprecated,
        in a future version this will reduce over both axes and return a scalar
        To retain the old behavior, pass axis=0 (or do not pass axis).

skipna : bool, default True
    Exclude NA/null values. If an entire row/column is NA, the result
    will be NA.
ddof : int, default 1
    Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
    where N represents the number of elements.
numeric_only : bool, default False
    Include only float, int, boolean columns. Not implemented for Series.

Returns
-------
{name1} or {name2} (if level specified) {notes}{examples}
zg

Notes
-----
To have the same behaviour as `numpy.std`, use `ddof=0` (instead of the
default `ddof=1`)ay  

Examples
--------
>>> df = pd.DataFrame({'person_id': [0, 1, 2, 3],
...                    'age': [21, 25, 62, 43],
...                    'height': [1.61, 1.87, 1.49, 2.01]}
...                   ).set_index('person_id')
>>> df
           age  height
person_id
0           21    1.61
1           25    1.87
2           62    1.49
3           43    2.01

The standard deviation of the columns can be found as follows:

>>> df.std()
age       18.786076
height     0.237417
dtype: float64

Alternatively, `ddof=0` can be set to normalize by N instead of N-1:

>>> df.std(ddof=0)
age       16.269219
height     0.205609
dtype: float64a?  

Examples
--------
>>> df = pd.DataFrame({'person_id': [0, 1, 2, 3],
...                    'age': [21, 25, 62, 43],
...                    'height': [1.61, 1.87, 1.49, 2.01]}
...                   ).set_index('person_id')
>>> df
           age  height
person_id
0           21    1.61
1           25    1.87
2           62    1.49
3           43    2.01

>>> df.var()
age       352.916667
height      0.056367
dtype: float64

Alternatively, ``ddof=0`` can be set to normalize by N instead of N-1:

>>> df.var(ddof=0)
age       264.687500
height      0.042275
dtype: float64aA  
{desc}

Parameters
----------
axis : {{0 or 'index', 1 or 'columns', None}}, default 0
    Indicate which axis or axes should be reduced. For `Series` this parameter
    is unused and defaults to 0.

    * 0 / 'index' : reduce the index, return a Series whose index is the
      original column labels.
    * 1 / 'columns' : reduce the columns, return a Series whose index is the
      original index.
    * None : reduce all axes, return a scalar.

bool_only : bool, default False
    Include only boolean columns. Not implemented for Series.
skipna : bool, default True
    Exclude NA/null values. If the entire row/column is NA and skipna is
    True, then the result will be {empty_value}, as for an empty row/column.
    If skipna is False, then NA are treated as True, because these are not
    equal to zero.
**kwargs : any, default None
    Additional keywords have no effect but might be accepted for
    compatibility with NumPy.

Returns
-------
{name1} or {name2}
    If level is specified, then, {name2} is returned; otherwise, {name1}
    is returned.

{see_also}
{examples}zReturn whether all elements are True, potentially over an axis.

Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty).a  Examples
--------
**Series**

>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([], dtype="float64").all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True

**DataFrames**

Create a dataframe from a dictionary.

>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
   col1   col2
0  True   True
1  True  False

Default behaviour checks if values in each column all return True.

>>> df.all()
col1     True
col2    False
dtype: bool

Specify ``axis='columns'`` to check if values in each row all return True.

>>> df.all(axis='columns')
0     True
1    False
dtype: bool

Or ``axis=None`` for whether every value is True.

>>> df.all(axis=None)
False
zSee Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
aZ  
Return cumulative {desc} over a DataFrame or Series axis.

Returns a DataFrame or Series of the same size containing the cumulative
{desc}.

Parameters
----------
axis : {{0 or 'index', 1 or 'columns'}}, default 0
    The index or the name of the axis. 0 is equivalent to None or 'index'.
    For `Series` this parameter is unused and defaults to 0.
skipna : bool, default True
    Exclude NA/null values. If an entire row/column is NA, the result
    will be NA.
*args, **kwargs
    Additional keywords have no effect but might be accepted for
    compatibility with NumPy.

Returns
-------
{name1} or {name2}
    Return cumulative {desc} of {name1} or {name2}.

See Also
--------
core.window.expanding.Expanding.{accum_func_name} : Similar functionality
    but ignores ``NaN`` values.
{name2}.{accum_func_name} : Return the {desc} over
    {name2} axis.
{name2}.cummax : Return cumulative maximum over {name2} axis.
{name2}.cummin : Return cumulative minimum over {name2} axis.
{name2}.cumsum : Return cumulative sum over {name2} axis.
{name2}.cumprod : Return cumulative product over {name2} axis.

{examples}a  Examples
--------
**Series**

>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0    2.0
1    NaN
2    5.0
3   -1.0
4    0.0
dtype: float64

By default, NA values are ignored.

>>> s.cummin()
0    2.0
1    NaN
2    2.0
3   -1.0
4   -1.0
dtype: float64

To include NA values in the operation, use ``skipna=False``

>>> s.cummin(skipna=False)
0    2.0
1    NaN
2    NaN
3    NaN
4    NaN
dtype: float64

**DataFrame**

>>> df = pd.DataFrame([[2.0, 1.0],
...                    [3.0, np.nan],
...                    [1.0, 0.0]],
...                   columns=list('AB'))
>>> df
     A    B
0  2.0  1.0
1  3.0  NaN
2  1.0  0.0

By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.

>>> df.cummin()
     A    B
0  2.0  1.0
1  2.0  NaN
2  1.0  0.0

To iterate over columns and find the minimum in each row,
use ``axis=1``

>>> df.cummin(axis=1)
     A    B
0  2.0  1.0
1  3.0  NaN
2  1.0  0.0
a  Examples
--------
**Series**

>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0    2.0
1    NaN
2    5.0
3   -1.0
4    0.0
dtype: float64

By default, NA values are ignored.

>>> s.cumsum()
0    2.0
1    NaN
2    7.0
3    6.0
4    6.0
dtype: float64

To include NA values in the operation, use ``skipna=False``

>>> s.cumsum(skipna=False)
0    2.0
1    NaN
2    NaN
3    NaN
4    NaN
dtype: float64

**DataFrame**

>>> df = pd.DataFrame([[2.0, 1.0],
...                    [3.0, np.nan],
...                    [1.0, 0.0]],
...                   columns=list('AB'))
>>> df
     A    B
0  2.0  1.0
1  3.0  NaN
2  1.0  0.0

By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.

>>> df.cumsum()
     A    B
0  2.0  1.0
1  5.0  NaN
2  6.0  1.0

To iterate over columns and find the sum in each row,
use ``axis=1``

>>> df.cumsum(axis=1)
     A    B
0  2.0  3.0
1  3.0  NaN
2  1.0  1.0
a  Examples
--------
**Series**

>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0    2.0
1    NaN
2    5.0
3   -1.0
4    0.0
dtype: float64

By default, NA values are ignored.

>>> s.cumprod()
0     2.0
1     NaN
2    10.0
3   -10.0
4    -0.0
dtype: float64

To include NA values in the operation, use ``skipna=False``

>>> s.cumprod(skipna=False)
0    2.0
1    NaN
2    NaN
3    NaN
4    NaN
dtype: float64

**DataFrame**

>>> df = pd.DataFrame([[2.0, 1.0],
...                    [3.0, np.nan],
...                    [1.0, 0.0]],
...                   columns=list('AB'))
>>> df
     A    B
0  2.0  1.0
1  3.0  NaN
2  1.0  0.0

By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.

>>> df.cumprod()
     A    B
0  2.0  1.0
1  6.0  NaN
2  6.0  0.0

To iterate over columns and find the product in each row,
use ``axis=1``

>>> df.cumprod(axis=1)
     A    B
0  2.0  2.0
1  3.0  NaN
2  1.0  0.0
a  Examples
--------
**Series**

>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0    2.0
1    NaN
2    5.0
3   -1.0
4    0.0
dtype: float64

By default, NA values are ignored.

>>> s.cummax()
0    2.0
1    NaN
2    5.0
3    5.0
4    5.0
dtype: float64

To include NA values in the operation, use ``skipna=False``

>>> s.cummax(skipna=False)
0    2.0
1    NaN
2    NaN
3    NaN
4    NaN
dtype: float64

**DataFrame**

>>> df = pd.DataFrame([[2.0, 1.0],
...                    [3.0, np.nan],
...                    [1.0, 0.0]],
...                   columns=list('AB'))
>>> df
     A    B
0  2.0  1.0
1  3.0  NaN
2  1.0  0.0

By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.

>>> df.cummax()
     A    B
0  2.0  1.0
1  3.0  NaN
2  3.0  1.0

To iterate over columns and find the maximum in each row,
use ``axis=1``

>>> df.cummax(axis=1)
     A    B
0  2.0  2.0
1  3.0  NaN
2  1.0  1.0
a2  See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
zReturn whether any element is True, potentially over an axis.

Returns False unless there is at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty).a\  Examples
--------
**Series**

For Series input, the output is a scalar indicating whether any element
is True.

>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([], dtype="float64").any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True

**DataFrame**

Whether each column contains at least one True element (the default).

>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
   A  B  C
0  1  0  0
1  2  2  0

>>> df.any()
A     True
B     True
C    False
dtype: bool

Aggregating over the columns.

>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
       A  B
0   True  1
1  False  2

>>> df.any(axis='columns')
0    True
1    True
dtype: bool

>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
       A  B
0   True  1
1  False  0

>>> df.any(axis='columns')
0    True
1    False
dtype: bool

Aggregating over the entire DataFrame with ``axis=None``.

>>> df.any(axis=None)
True

`any` for an empty DataFrame is an empty Series.

>>> pd.DataFrame([]).any()
Series([], dtype: bool)
a  

Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
...     ['warm', 'warm', 'cold', 'cold'],
...     ['dog', 'falcon', 'fish', 'spider']],
...     names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded  animal
warm     dog       4
         falcon    2
cold     fish      0
         spider    8
Name: legs, dtype: int64

>>> s.{stat_func}()
{default_output}stat_func_examplerQ  Sum         )	stat_funcverbdefault_outputlevel_output_0level_output_1a  

By default, the sum of an empty or all-NA Series is ``0``.

>>> pd.Series([], dtype="float64").sum()  # min_count=0 is the default
0.0

This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.

>>> pd.Series([], dtype="float64").sum(min_count=1)
nan

Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.

>>> pd.Series([np.nan]).sum()
0.0

>>> pd.Series([np.nan]).sum(min_count=1)
nanr  Max   r   _max_examplesr  Minr+  _min_examplesa  

See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis.a  

Examples
--------
By default, the product of an empty or all-NA Series is ``1``

>>> pd.Series([], dtype="float64").prod()
1.0

This can be controlled with the ``min_count`` parameter

>>> pd.Series([], dtype="float64").prod(min_count=1)
nan

Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.

>>> pd.Series([np.nan]).prod()
1.0

>>> pd.Series([np.nan]).prod(min_count=1)
nanzmin_count : int, default 0
    The required number of valid values to perform the operation. If fewer than
    ``min_count`` non-NA values are present the result will be NA.
c                @   US:X  a  SnSnSnOSnSnSnU S:X  a  [         n[        n[        n[        nSS	0n	GOU S
:X  a  [         n[        n[
        n[        nSS0n	GOU S:X  a  [        nSn[        n[        nSS0n	GOU S:X  a  [        nSn[        n[        nSS0n	GOhU S:X  a  [        nSn[        n[        nS[        0n	GODU S:X  a  [        nSn[        n[        nS[        0n	GO U S:X  a  [        nSnSnSnSS0n	GOU S:X  a  [        nSnSnSnSS0n	OU S:X  a  [        nSn[         nSnSS0n	OU S:X  a  [        nS n["        nSnS[$        0n	OU S!:X  a  [        nS"nS#nSnSS0n	OU S$:X  a  [        nS%nSnS&nSS0n	OU S':X  a  [        nS(nSnS)nSS0n	OrU S*:X  a  [&        nSnSn[(        nS+S0n	OWU S,:X  a  [&        nS-nSn[*        nS+S0n	O<U S.:X  a  [&        nS/nSn[,        nS+S0n	O!U S0:X  a  [&        nS1nSn[.        nS+S0n	O[0        eUR2                  " S3UU UUUUUS2.U	D6n
U
$ )4z:
Generate the docstring for a Series/DataFrame reduction.
r   scalarr   z{index (0)}r   z{index (0), columns (1)}r  empty_valuer  r  r  r  zReturn the minimum of the values over the requested axis.

If you want the *index* of the minimum, use ``idxmin``. This is the equivalent of the ``numpy.ndarray`` method ``argmin``.rR  r,  r  zReturn the maximum of the values over the requested axis.

If you want the *index* of the maximum, use ``idxmax``. This is the equivalent of the ``numpy.ndarray`` method ``argmax``.rQ  zfReturn the sum of the values over the requested axis.

This is equivalent to the method ``numpy.sum``.ri  z9Return the product of the values over the requested axis.r4  z8Return the median of the values over the requested axis.a  

            Examples
            --------
            >>> s = pd.Series([1, 2, 3])
            >>> s.median()
            2.0

            With a DataFrame

            >>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra'])
            >>> df
                   a   b
            tiger  1   2
            zebra  2   3
            >>> df.median()
            a   1.5
            b   2.5
            dtype: float64

            Using axis=1

            >>> df.median(axis=1)
            tiger   1.5
            zebra   2.5
            dtype: float64

            In this case, `numeric_only` should be set to `True`
            to avoid getting an error.

            >>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']},
            ...                   index=['tiger', 'zebra'])
            >>> df.median(numeric_only=True)
            a   1.5
            dtype: float64r5  z6Return the mean of the values over the requested axis.aw  

            Examples
            --------
            >>> s = pd.Series([1, 2, 3])
            >>> s.mean()
            2.0

            With a DataFrame

            >>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra'])
            >>> df
                   a   b
            tiger  1   2
            zebra  2   3
            >>> df.mean()
            a   1.5
            b   2.5
            dtype: float64

            Using axis=1

            >>> df.mean(axis=1)
            tiger   1.5
            zebra   2.5
            dtype: float64

            In this case, `numeric_only` should be set to `True` to avoid
            getting an error.

            >>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']},
            ...                   index=['tiger', 'zebra'])
            >>> df.mean(numeric_only=True)
            a   1.5
            dtype: float64r,  zyReturn unbiased variance over requested axis.

Normalized by N-1 by default. This can be changed using the ddof argument.notesr0  zReturn sample standard deviation over requested axis.

Normalized by N-1 by default. This can be changed using the ddof argument.r&  zReturn unbiased standard error of the mean over requested axis.

Normalized by N-1 by default. This can be changed using the ddof argumenta  

            Examples
            --------
            >>> s = pd.Series([1, 2, 3])
            >>> s.sem().round(6)
            0.57735

            With a DataFrame

            >>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra'])
            >>> df
                   a   b
            tiger  1   2
            zebra  2   3
            >>> df.sem()
            a   0.5
            b   0.5
            dtype: float64

            Using axis=1

            >>> df.sem(axis=1)
            tiger   0.5
            zebra   0.5
            dtype: float64

            In this case, `numeric_only` should be set to `True`
            to avoid getting an error.

            >>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']},
            ...                   index=['tiger', 'zebra'])
            >>> df.sem(numeric_only=True)
            a   0.5
            dtype: float64r7  z=Return unbiased skew over requested axis.

Normalized by N-1.a-  

            Examples
            --------
            >>> s = pd.Series([1, 2, 3])
            >>> s.skew()
            0.0

            With a DataFrame

            >>> df = pd.DataFrame({'a': [1, 2, 3], 'b': [2, 3, 4], 'c': [1, 3, 5]},
            ...                   index=['tiger', 'zebra', 'cow'])
            >>> df
                    a   b   c
            tiger   1   2   1
            zebra   2   3   3
            cow     3   4   5
            >>> df.skew()
            a   0.0
            b   0.0
            c   0.0
            dtype: float64

            Using axis=1

            >>> df.skew(axis=1)
            tiger   1.732051
            zebra  -1.732051
            cow     0.000000
            dtype: float64

            In this case, `numeric_only` should be set to `True` to avoid
            getting an error.

            >>> df = pd.DataFrame({'a': [1, 2, 3], 'b': ['T', 'Z', 'X']},
            ...                   index=['tiger', 'zebra', 'cow'])
            >>> df.skew(numeric_only=True)
            a   0.0
            dtype: float64r6  zReturn unbiased kurtosis over requested axis.

Kurtosis obtained using Fisher's definition of
kurtosis (kurtosis of normal == 0.0). Normalized by N-1.a6  

            Examples
            --------
            >>> s = pd.Series([1, 2, 2, 3], index=['cat', 'dog', 'dog', 'mouse'])
            >>> s
            cat    1
            dog    2
            dog    2
            mouse  3
            dtype: int64
            >>> s.kurt()
            1.5

            With a DataFrame

            >>> df = pd.DataFrame({'a': [1, 2, 2, 3], 'b': [3, 4, 4, 4]},
            ...                   index=['cat', 'dog', 'dog', 'mouse'])
            >>> df
                   a   b
              cat  1   3
              dog  2   4
              dog  2   4
            mouse  3   4
            >>> df.kurt()
            a   1.5
            b   4.0
            dtype: float64

            With axis=None

            >>> df.kurt(axis=None).round(6)
            -0.988693

            Using axis=1

            >>> df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [3, 4], 'd': [1, 2]},
            ...                   index=['cat', 'dog'])
            >>> df.kurt(axis=1)
            cat   -6.0
            dog   -6.0
            dtype: float64r  accum_func_namer  r  r  r  r  r  )descr8  name1name2
axis_descrsee_alsoexamplesr  )	_bool_doc	_any_desc_any_see_also_any_examples	_all_desc_all_see_also_all_examples_num_doc_stat_func_see_alsor  r  _sum_prod_doc_sum_examples_min_count_stub_prod_examples_num_ddof_doc_var_examples_std_examples
_std_notes	_cnum_doc_cumsum_examples_cumprod_examples_cummin_examples_cummax_examplesr   r  )r8  r,  r  r  r  base_docr  r  r  r  docstrs              r   make_docr,  +5  s-    qy"
/
u}  )	  (	I 	
 ' r"	I 	
 ' r"	 > 	 ' /	 J&!/		I"F r"	G"F r"	 K 	 !2	  	
 !:&	 & 	
"F 2	P&N r"	 	 )T r"		##U+		$#V,		##U+		##U+ "!__ 		 	F Mr   )r8  r   r,  r  r  r   )
__future__r   r  r   r   datetimer  	functoolsr   r  r  r   r  r  r  r  typingr   r   r	   r
   r   r   r   r   r   r  r{  numpyrh  pandas._configr   r   r   pandas._libsr   pandas._libs.libr   pandas._libs.tslibsr   r   r   r   pandas._libs.tslibs.dtypesr   pandas._typingr   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   pandas.compatrG   pandas.compat._constantsrH   pandas.compat._optionalrI   pandas.compat.numpyrJ   rM  pandas.errorsrK   rL   rM   rN   rO   rP   rQ   rR   pandas.util._decoratorsrS   rT   pandas.util._exceptionsrU   pandas.util._validatorsrV   rW   rX   rY   rZ   pandas.core.dtypes.astyper[   pandas.core.dtypes.commonr\   r]   r^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   pandas.core.dtypes.dtypesri   rj   pandas.core.dtypes.genericrk   rl   pandas.core.dtypes.inferencerm   rn   pandas.core.dtypes.missingro   rp   pandas.corerq   rV  rr   rs   rt   ru   rv   rw   pandas.core.array_algos.replacerx   pandas.core.arraysry   pandas.core.baserz   pandas.core.constructionr{   pandas.core.flagsr|   pandas.core.indexes.apir}   r~   r   r   r   r   r   pandas.core.internalsr   r   r   "pandas.core.internals.constructionr   r   pandas.core.methods.describer   pandas.core.missingr   r   r   pandas.core.reshape.concatr   pandas.core.shared_docsr   pandas.core.sortingr   pandas.core.windowr   r   r   r   pandas.io.formats.formatr   r   pandas.io.formats.printingr   collections.abcr   r   r   r   r   rg  r   r   r   r   pandas.core.indexers.objectsr   r"  r   r  r  r  IndexingMixinr   r  r  r!  r$  r#  r"  r  r  r  r  r%  r(  r&  r'  r)  r  r  r  r  r  r  r  r  r  r   r  r,  r  r   r   <module>rZ     s*   "     	    	 

 
 
      -  >. . . . . . . . . . . .^  . > .	 	 	 5  5   
   = - ) 2 #   
 : 
 . 0 3  4  /  9.  ,`A0
  
SE2lH22 SE2lJ<B<
<8!	F	,\"	H? B? B? B? B	DP , 0188%1UV 9    , ""56==%!TU > s  ""56==%!TU > s O .]r   