Participer au site avec un Tip
Rechercher
 

Améliorations / Corrections

Vous avez des améliorations (ou des corrections) à proposer pour ce document : je vous remerçie par avance de m'en faire part, cela m'aide à améliorer le site.

Emplacement :

Description des améliorations :

Contenu du module « pandas »

Liste des classes du module pandas

Nom de la classe Description
BooleanDtype
Categorical
CategoricalDtype
CategoricalIndex
DataFrame
DateOffset
DatetimeIndex
DatetimeTZDtype
ExcelFile
ExcelWriter
Flags
Float32Dtype
Float64Dtype
Float64Index
Grouper
HDFStore
Index
Int16Dtype
Int32Dtype
Int64Dtype
Int64Index
Int8Dtype
Interval
IntervalDtype
IntervalIndex
MultiIndex
NamedAgg NamedAgg(column, aggfunc) [extrait de NamedAgg.__doc__]
option_context
Period
PeriodDtype
PeriodIndex
RangeIndex
Series
SparseDtype
StringDtype
Timedelta
TimedeltaIndex
Timestamp
UInt16Dtype
UInt32Dtype
UInt64Dtype
UInt64Index
UInt8Dtype

Liste des fonctions du module pandas

Signature de la fonction Description
array(data: 'Union[Sequence[object], AnyArrayLike]', dtype: 'Optional[Dtype]' = None, copy: 'bool' = True) -> 'ExtensionArray'
bdate_range(start=None, end=None, periods=None, freq='B', tz=None, normalize=True, name=None, weekmask=None, holidays=None, closed=None, **kwargs) -> pandas.core.indexes.datetimes.DatetimeIndex
concat(objs: Union[Iterable[ForwardRef('NDFrame')], Mapping[Optional[Hashable], ForwardRef('NDFrame')]], axis=0, join='outer', ignore_index: bool = False, keys=None, levels=None, names=None, verify_integrity: bool = False, sort: bool = False, copy: bool = True) -> Union[ForwardRef('DataFrame'), ForwardRef('Series')]
crosstab(index, columns, values=None, rownames=None, colnames=None, aggfunc=None, margins=False, margins_name: str = 'All', dropna: bool = True, normalize=False) -> 'DataFrame'
cut(x, bins, right: bool = True, labels=None, retbins: bool = False, precision: int = 3, include_lowest: bool = False, duplicates: str = 'raise', ordered: bool = True)
date_range(start=None, end=None, periods=None, freq=None, tz=None, normalize=False, name=None, closed=None, **kwargs) -> pandas.core.indexes.datetimes.DatetimeIndex
describe_option(*args, **kwds)
eval(expr, parser='pandas', engine: Optional[str] = None, truediv=<object object at 0x7f5051439e10>, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False)
factorize(values, sort: 'bool' = False, na_sentinel: 'Optional[int]' = -1, size_hint: 'Optional[int]' = None) -> "Tuple[np.ndarray, Union[np.ndarray, 'Index']]"
get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False, columns=None, sparse=False, drop_first=False, dtype=None) -> 'DataFrame'
get_option(*args, **kwds)
infer_freq(index, warn: bool = True) -> Optional[str]
interval_range(start=None, end=None, periods=None, freq=None, name=None, closed='right')
isna(obj)
lreshape(data: 'DataFrame', groups, dropna: bool = True, label=None) -> 'DataFrame'
melt(frame: 'DataFrame', id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None, ignore_index: bool = True) -> 'DataFrame'
merge(left, right, how: str = 'inner', on=None, left_on=None, right_on=None, left_index: bool = False, right_index: bool = False, sort: bool = False, suffixes=('_x', '_y'), copy: bool = True, indicator: bool = False, validate=None) -> 'DataFrame'
merge_asof(left, right, on=None, left_on=None, right_on=None, left_index: bool = False, right_index: bool = False, by=None, left_by=None, right_by=None, suffixes=('_x', '_y'), tolerance=None, allow_exact_matches: bool = True, direction: str = 'backward') -> 'DataFrame'
merge_ordered(left, right, on=None, left_on=None, right_on=None, left_by=None, right_by=None, fill_method=None, suffixes=('_x', '_y'), how: str = 'outer') -> 'DataFrame'
notna(obj)
period_range(start=None, end=None, periods=None, freq=None, name=None) -> pandas.core.indexes.period.PeriodIndex
pivot(data: 'DataFrame', index: Union[Hashable, NoneType, Sequence[Optional[Hashable]]] = None, columns: Union[Hashable, NoneType, Sequence[Optional[Hashable]]] = None, values: Union[Hashable, NoneType, Sequence[Optional[Hashable]]] = None) -> 'DataFrame'
pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', fill_value=None, margins=False, dropna=True, margins_name='All', observed=False) -> 'DataFrame'
qcut(x, q, labels=None, retbins: bool = False, precision: int = 3, duplicates: str = 'raise')
read_clipboard(sep='\\s+', **kwargs)
read_csv(filepath_or_buffer: Union[ForwardRef('PathLike[str]'), str, IO[~T], io.RawIOBase, io.BufferedIOBase, io.TextIOBase, _io.TextIOWrapper, mmap.mmap], sep=<object object at 0x7f5051439e10>, delimiter=None, header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, skipfooter=0, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, cache_dates=True, iterator=False, chunksize=None, compression='infer', thousands=None, decimal: str = '.', lineterminator=None, quotechar='"', quoting=0, doublequote=True, escapechar=None, comment=None, encoding=None, dialect=None, error_bad_lines=True, warn_bad_lines=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None, storage_options: Optional[Dict[str, Any]] = None)
read_excel(io, sheet_name=0, header=0, names=None, index_col=None, usecols=None, squeeze=False, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skiprows=None, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, parse_dates=False, date_parser=None, thousands=None, comment=None, skipfooter=0, convert_float=True, mangle_dupe_cols=True, storage_options: Optional[Dict[str, Any]] = None)
read_feather(path, columns=None, use_threads: bool = True, storage_options: Optional[Dict[str, Any]] = None)
read_fwf(filepath_or_buffer: Union[ForwardRef('PathLike[str]'), str, IO[~T], io.RawIOBase, io.BufferedIOBase, io.TextIOBase, _io.TextIOWrapper, mmap.mmap], colspecs='infer', widths=None, infer_nrows=100, **kwds)
read_gbq(query: str, project_id: Optional[str] = None, index_col: Optional[str] = None, col_order: Optional[List[str]] = None, reauth: bool = False, auth_local_webserver: bool = False, dialect: Optional[str] = None, location: Optional[str] = None, configuration: Optional[Dict[str, Any]] = None, credentials=None, use_bqstorage_api: Optional[bool] = None, max_results: Optional[int] = None, progress_bar_type: Optional[str] = None) -> 'DataFrame'
read_hdf(path_or_buf, key=None, mode: str = 'r', errors: str = 'strict', where=None, start: Optional[int] = None, stop: Optional[int] = None, columns=None, iterator=False, chunksize: Optional[int] = None, **kwargs)
read_html(io: Union[ForwardRef('PathLike[str]'), str, IO[~T], io.RawIOBase, io.BufferedIOBase, io.TextIOBase, _io.TextIOWrapper, mmap.mmap], match: Union[str, Pattern] = '.+', flavor: Optional[str] = None, header: Union[int, Sequence[int], NoneType] = None, index_col: Union[int, Sequence[int], NoneType] = None, skiprows: Union[int, Sequence[int], slice, NoneType] = None, attrs: Optional[Dict[str, str]] = None, parse_dates: bool = False, thousands: Optional[str] = ',', encoding: Optional[str] = None, decimal: str = '.', converters: Optional[Dict] = None, na_values=None, keep_default_na: bool = True, displayed_only: bool = True) -> List[pandas.core.frame.DataFrame]
read_json(path_or_buf=None, orient=None, typ='frame', dtype=None, convert_axes=None, convert_dates=True, keep_default_dates: bool = True, numpy: bool = False, precise_float: bool = False, date_unit=None, encoding=None, lines: bool = False, chunksize: Optional[int] = None, compression: Union[str, Dict[str, Any], NoneType] = 'infer', nrows: Optional[int] = None, storage_options: Optional[Dict[str, Any]] = None)
read_orc(path: Union[ForwardRef('PathLike[str]'), str, IO[~T], io.RawIOBase, io.BufferedIOBase, io.TextIOBase, _io.TextIOWrapper, mmap.mmap], columns: Optional[List[str]] = None, **kwargs) -> 'DataFrame'
read_parquet(path, engine: str = 'auto', columns=None, use_nullable_dtypes: bool = False, **kwargs)
read_pickle(filepath_or_buffer: Union[ForwardRef('PathLike[str]'), str, IO[~T], io.RawIOBase, io.BufferedIOBase, io.TextIOBase, _io.TextIOWrapper, mmap.mmap], compression: Union[str, Dict[str, Any], NoneType] = 'infer', storage_options: Optional[Dict[str, Any]] = None)
read_sas(filepath_or_buffer: Union[ForwardRef('PathLike[str]'), str, IO[~T], io.RawIOBase, io.BufferedIOBase, io.TextIOBase, _io.TextIOWrapper, mmap.mmap], format: Optional[str] = None, index: Optional[Hashable] = None, encoding: Optional[str] = None, chunksize: Optional[int] = None, iterator: bool = False) -> Union[ForwardRef('DataFrame'), pandas.io.sas.sasreader.ReaderBase]
read_spss(path: Union[str, pathlib.Path], usecols: Optional[Sequence[str]] = None, convert_categoricals: bool = True) -> pandas.core.frame.DataFrame
read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize: Optional[int] = None) -> Union[pandas.core.frame.DataFrame, Iterator[pandas.core.frame.DataFrame]]
read_sql_query(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize: Optional[int] = None) -> Union[pandas.core.frame.DataFrame, Iterator[pandas.core.frame.DataFrame]]
read_sql_table(table_name, con, schema=None, index_col=None, coerce_float=True, parse_dates=None, columns=None, chunksize: Optional[int] = None) -> Union[pandas.core.frame.DataFrame, Iterator[pandas.core.frame.DataFrame]]
read_stata(filepath_or_buffer: Union[ForwardRef('PathLike[str]'), str, IO[~T], io.RawIOBase, io.BufferedIOBase, io.TextIOBase, _io.TextIOWrapper, mmap.mmap], convert_dates: bool = True, convert_categoricals: bool = True, index_col: Optional[str] = None, convert_missing: bool = False, preserve_dtypes: bool = True, columns: Optional[Sequence[str]] = None, order_categoricals: bool = True, chunksize: Optional[int] = None, iterator: bool = False, storage_options: Optional[Dict[str, Any]] = None) -> Union[pandas.core.frame.DataFrame, pandas.io.stata.StataReader]
read_table(filepath_or_buffer: Union[ForwardRef('PathLike[str]'), str, IO[~T], io.RawIOBase, io.BufferedIOBase, io.TextIOBase, _io.TextIOWrapper, mmap.mmap], sep=<object object at 0x7f5051439e10>, delimiter=None, header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, skipfooter=0, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, cache_dates=True, iterator=False, chunksize=None, compression='infer', thousands=None, decimal: str = '.', lineterminator=None, quotechar='"', quoting=0, doublequote=True, escapechar=None, comment=None, encoding=None, dialect=None, error_bad_lines=True, warn_bad_lines=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None)
reset_option(*args, **kwds)
set_eng_float_format(accuracy: int = 3, use_eng_prefix: bool = False) -> None
set_option(*args, **kwds)
show_versions(as_json: Union[str, bool] = False) -> None
test(extra_args=None)
timedelta_range(start=None, end=None, periods=None, freq=None, name=None, closed=None) -> pandas.core.indexes.timedeltas.TimedeltaIndex
to_datetime(arg: Union[~DatetimeScalar, List, Tuple, ~ArrayLike, ForwardRef('Series')], errors: str = 'raise', dayfirst: bool = False, yearfirst: bool = False, utc: Optional[bool] = None, format: Optional[str] = None, exact: bool = True, unit: Optional[str] = None, infer_datetime_format: bool = False, origin='unix', cache: bool = True) -> Union[pandas.core.indexes.datetimes.DatetimeIndex, ForwardRef('Series'), ~DatetimeScalar, ForwardRef('NaTType')]
to_numeric(arg, errors='raise', downcast=None)
to_pickle(obj: Any, filepath_or_buffer: Union[ForwardRef('PathLike[str]'), str, IO[~T], io.RawIOBase, io.BufferedIOBase, io.TextIOBase, _io.TextIOWrapper, mmap.mmap], compression: Union[str, Dict[str, Any], NoneType] = 'infer', protocol: int = 5, storage_options: Optional[Dict[str, Any]] = None)
to_timedelta(arg, unit=None, errors='raise')
unique(values)
value_counts(values, sort: 'bool' = True, ascending: 'bool' = False, normalize: 'bool' = False, bins=None, dropna: 'bool' = True) -> 'Series'
wide_to_long(df: 'DataFrame', stubnames, i, j, sep: str = '', suffix: str = '\\d+') -> 'DataFrame'

Liste des variables globales du module pandas

Nom de la variable globale Valeur
IndexSlice <pandas.core.indexing._IndexSlice object at 0x7f504b64c940>
NA <NA>
NaT NaT
options <pandas._config.config.DictWrapper object at 0x7f504e55c5e0>

Liste des alias du module pandas

Nom de l'alias Définition ciblée
isnull isna
json_normalize _json_normalize
notnull notna