mirror of
https://github.com/jellyfin/jellyfin-kodi.git
synced 2025-05-06 01:18:48 +00:00
Move libraries import
This commit is contained in:
parent
3ea6890c34
commit
b2bc90cb06
209 changed files with 36 additions and 14941 deletions
|
@ -13,7 +13,7 @@ import xbmc
|
|||
import xbmcvfs
|
||||
import xbmcaddon
|
||||
|
||||
from libraries import requests
|
||||
import requests
|
||||
from helper.utils import should_stop, delete_folder
|
||||
from helper import settings, stop, event, window, kodi_version, unzip, create_id
|
||||
from emby import Emby
|
||||
|
|
|
@ -6,7 +6,7 @@ import json
|
|||
import logging
|
||||
import time
|
||||
|
||||
from libraries import requests
|
||||
import requests
|
||||
from exceptions import HTTPException
|
||||
|
||||
#################################################################################################
|
||||
|
|
|
@ -17,7 +17,7 @@ import client
|
|||
import library
|
||||
import setup
|
||||
import monitor
|
||||
from libraries import requests
|
||||
import requests
|
||||
from views import Views, verify_kodi_defaults
|
||||
from helper import _, window, settings, event, dialog, find, compare_version
|
||||
from downloader import get_objects
|
||||
|
|
|
@ -14,8 +14,8 @@ import api
|
|||
import database
|
||||
import client
|
||||
import collections
|
||||
import requests
|
||||
from . import _, settings, window, dialog
|
||||
from libraries import requests
|
||||
from downloader import TheVoid
|
||||
from emby import Emby
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import xbmcgui
|
|||
import xbmcvfs
|
||||
|
||||
from . import _
|
||||
from dateutil import tz, parser
|
||||
|
||||
#################################################################################################
|
||||
|
||||
|
@ -449,8 +450,6 @@ def convert_to_local(date):
|
|||
|
||||
''' Convert the local datetime to local.
|
||||
'''
|
||||
from libraries.dateutil import tz, parser
|
||||
|
||||
try:
|
||||
date = parser.parse(date) if type(date) in (unicode, str) else date
|
||||
date = date.replace(tzinfo=tz.tzutc())
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
import requests
|
||||
import dateutil
|
|
@ -1,54 +0,0 @@
|
|||
Copyright 2017- Paul Ganssle <paul@ganssle.io>
|
||||
Copyright 2017- dateutil contributors (see AUTHORS file)
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
The above license applies to all contributions after 2017-12-01, as well as
|
||||
all contributions that have been re-licensed (see AUTHORS file for the list of
|
||||
contributors who have re-licensed their code).
|
||||
--------------------------------------------------------------------------------
|
||||
dateutil - Extensions to the standard Python datetime module.
|
||||
|
||||
Copyright (c) 2003-2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
Copyright (c) 2012-2014 - Tomi Pieviläinen <tomi.pievilainen@iki.fi>
|
||||
Copyright (c) 2014-2016 - Yaron de Leeuw <me@jarondl.net>
|
||||
Copyright (c) 2015- - Paul Ganssle <paul@ganssle.io>
|
||||
Copyright (c) 2015- - dateutil contributors (see AUTHORS file)
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
The above BSD License Applies to all code, even that also covered by Apache 2.0.
|
|
@ -1,701 +0,0 @@
|
|||
Version 2.7.3 (2018-05-09)
|
||||
==========================
|
||||
|
||||
Data updates
|
||||
------------
|
||||
|
||||
- Update tzdata to 2018e. (gh pr #710)
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fixed an issue where decimal.Decimal would cast `NaN` or infinite value in a
|
||||
parser.parse, which will raise decimal.Decimal-specific errors. Reported and
|
||||
fixed by @amureki (gh issue #662, gh pr #679).
|
||||
- Fixed a ValueError being thrown if tzinfos call explicity returns ``None``.
|
||||
Reported by @pganssle (gh issue #661) Fixed by @parsethis (gh pr #681)
|
||||
- Fixed incorrect parsing of certain dates earlier than 100 AD when repesented
|
||||
in the form "%B.%Y.%d", e.g. "December.0031.30". (gh issue #687, pr #700)
|
||||
- Fixed a bug where automatically generated DTSTART was naive even if a
|
||||
specified UNTIL had a time zone. Automatically generated DTSTART will now
|
||||
take on the timezone of an UNTIL date, if provided. Reported by @href (gh
|
||||
issue #652). Fixed by @absreim (gh pr #693).
|
||||
|
||||
|
||||
Documentation changes
|
||||
---------------------
|
||||
|
||||
- Corrected link syntax and updated URL to https for ISO year week number
|
||||
notation in relativedelta examples. (gh issue #670, pr #711)
|
||||
- Add doctest examples to tzfile documentation. Done by @weatherpattern and
|
||||
@pganssle (gh pr #671)
|
||||
- Updated the documentation for relativedelta. Removed references to tuple
|
||||
arguments for weekday, explained effect of weekday(_, 1) and better explained
|
||||
the order of operations that relativedelta applies. Fixed by @kvn219
|
||||
@huangy22 and @ElliotJH (gh pr #673)
|
||||
- Added changelog to documentation. (gh issue #692, gh pr #707)
|
||||
- Changed order of keywords in rrule docstring. Reported and fixed by
|
||||
@rmahajan14 (gh issue #686, gh pr #695).
|
||||
- Added documentation for ``dateutil.tz.gettz``. Reported by @pganssle (gh
|
||||
issue #647). Fixed by @weatherpattern (gh pr #704)
|
||||
- Cleaned up malformed RST in the ``tz`` documentation. (gh issue #702, gh pr
|
||||
#706)
|
||||
- Changed the default theme to sphinx_rtd_theme, and changed the sphinx
|
||||
configuration to go along with that. (gh pr #707)
|
||||
- Reorganized ``dateutil.tz`` documentation and fixed issue with the
|
||||
``dateutil.tz`` docstring. (gh pr #714)
|
||||
|
||||
|
||||
Misc
|
||||
----
|
||||
|
||||
- GH #674, GH #688, GH #699
|
||||
|
||||
|
||||
Version 2.7.2 (2018-03-26)
|
||||
==========================
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fixed an issue with the setup script running in non-UTF-8 environment.
|
||||
Reported and fixed by @gergondet (gh pr #651)
|
||||
|
||||
|
||||
Misc
|
||||
----
|
||||
|
||||
- GH #655
|
||||
|
||||
|
||||
Version 2.7.1 (2018-03-24)
|
||||
===========================
|
||||
|
||||
Data updates
|
||||
------------
|
||||
|
||||
- Updated tzdata version to 2018d.
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Fixed issue where parser.parse would occasionally raise
|
||||
decimal.Decimal-specific error types rather than ValueError. Reported by
|
||||
@amureki (gh issue #632). Fixed by @pganssle (gh pr #636).
|
||||
- Improve error message when rrule's dtstart and until are not both naive or
|
||||
both aware. Reported and fixed by @ryanpetrello (gh issue #633, gh pr #634)
|
||||
|
||||
|
||||
Misc
|
||||
----
|
||||
|
||||
- GH #644, GH #648
|
||||
|
||||
|
||||
Version 2.7.0
|
||||
=============
|
||||
- Dropped support for Python 2.6 (gh pr #362 by @jdufresne)
|
||||
- Dropped support for Python 3.2 (gh pr #626)
|
||||
- Updated zoneinfo file to 2018c (gh pr #616)
|
||||
- Changed licensing scheme so all new contributions are dual licensed under
|
||||
Apache 2.0 and BSD. (gh pr #542, issue #496)
|
||||
- Added __all__ variable to the root package. Reported by @tebriel
|
||||
(gh issue #406), fixed by @mariocj89 (gh pr #494)
|
||||
- Added python_requires to setup.py so that pip will distribute the right
|
||||
version of dateutil. Fixed by @jakec-github (gh issue #537, pr #552)
|
||||
- Added the utils submodule, for miscellaneous utilities.
|
||||
- Added within_delta function to utils - added by @justanr (gh issue #432,
|
||||
gh pr #437)
|
||||
- Added today function to utils (gh pr #474)
|
||||
- Added default_tzinfo function to utils (gh pr #475), solving an issue
|
||||
reported by @nealmcb (gh issue #94)
|
||||
- Added dedicated ISO 8601 parsing function isoparse (gh issue #424).
|
||||
Initial implementation by @pganssle in gh pr #489 and #622, with a
|
||||
pre-release fix by @kirit93 (gh issue #546, gh pr #573).
|
||||
- Moved parser module into parser/_parser.py and officially deprecated the use
|
||||
of several private functions and classes from that module. (gh pr #501, #515)
|
||||
- Tweaked parser error message to include rejected string format, added by
|
||||
@pbiering (gh pr #300)
|
||||
- Add support for parsing bytesarray, reported by @uckelman (gh issue #417) and
|
||||
fixed by @uckelman and @pganssle (gh pr #514)
|
||||
- Started raising a warning when the parser finds a timezone string that it
|
||||
cannot construct a tzinfo instance for (rather than succeeding with no
|
||||
indication of an error). Reported and fixed by @jbrockmendel (gh pr #540)
|
||||
- Dropped the use of assert in the parser. Fixed by @jbrockmendel (gh pr #502)
|
||||
- Fixed to assertion logic in parser to support dates like '2015-15-May',
|
||||
reported and fixed by @jbrockmendel (gh pr #409)
|
||||
- Fixed IndexError in parser on dates with trailing colons, reported and fixed
|
||||
by @jbrockmendel (gh pr #420)
|
||||
- Fixed bug where hours were not validated, leading to improper parse. Reported
|
||||
by @heappro (gh pr #353), fixed by @jbrockmendel (gh pr #482)
|
||||
- Fixed problem parsing strings in %b-%Y-%d format. Reported and fixed by
|
||||
@jbrockmendel (gh pr #481)
|
||||
- Fixed problem parsing strings in the %d%B%y format. Reported by @asishm
|
||||
(gh issue #360), fixed by @jbrockmendel (gh pr #483)
|
||||
- Fixed problem parsing certain unambiguous strings when year <99 (gh pr #510).
|
||||
Reported by @alexwlchan (gh issue #293).
|
||||
- Fixed issue with parsing an unambiguous string representation of an ambiguous
|
||||
datetime such that if possible the correct value for fold is set. Fixes
|
||||
issue reported by @JordonPhillips and @pganssle (gh issue #318, #320,
|
||||
gh pr #517)
|
||||
- Fixed issue with improper rounding of fractional components. Reported by
|
||||
@dddmello (gh issue #427), fixed by @m-dz (gh pr #570)
|
||||
- Performance improvement to parser from removing certain min() calls. Reported
|
||||
and fixed by @jbrockmendel (gh pr #589)
|
||||
- Significantly refactored parser code by @jbrockmendel (gh prs #419, #436,
|
||||
#490, #498, #539) and @pganssle (gh prs #435, #468)
|
||||
- Implementated of __hash__ for relativedelta and weekday, reported and fixed
|
||||
by @mrigor (gh pr #389)
|
||||
- Implemented __abs__ for relativedelta. Reported by @binnisb and @pferreir
|
||||
(gh issue #350, pr #472)
|
||||
- Fixed relativedelta.weeks property getter and setter to work for both
|
||||
negative and positive values. Reported and fixed by @souliane (gh issue #459,
|
||||
pr #460)
|
||||
- Fixed issue where passing whole number floats to the months or years
|
||||
arguments of the relativedelta constructor would lead to errors during
|
||||
addition. Reported by @arouanet (gh pr #411), fixed by @lkollar (gh pr #553)
|
||||
- Added a pre-built tz.UTC object representing UTC (gh pr #497)
|
||||
- Added a cache to tz.gettz so that by default it will return the same object
|
||||
for identical inputs. This will change the semantics of certain operations
|
||||
between datetimes constructed with tzinfo=tz.gettz(...). (gh pr #628)
|
||||
- Changed the behavior of tz.tzutc to return a singleton (gh pr #497, #504)
|
||||
- Changed the behavior of tz.tzoffset to return the same object when passed the
|
||||
same inputs, with a corresponding performance improvement (gh pr #504)
|
||||
- Changed the behavior of tz.tzstr to return the same object when passed the
|
||||
same inputs. (gh pr #628)
|
||||
- Added .instance alternate constructors for tz.tzoffset and tz.tzstr, to
|
||||
allow the construction of a new instance if desired. (gh pr #628)
|
||||
- Added the tz.gettz.nocache function to allow explicit retrieval of a new
|
||||
instance of the relevant tzinfo. (gh pr #628)
|
||||
- Expand definition of tz.tzlocal equality so that the local zone is allow
|
||||
equality with tzoffset and tzutc. (gh pr #598)
|
||||
- Deprecated the idiosyncratic tzstr format mentioned in several examples but
|
||||
evidently designed exclusively for dateutil, and very likely not used by
|
||||
any current users. (gh issue #595, gh pr #606)
|
||||
- Added the tz.resolve_imaginary function, which generates a real date from
|
||||
an imaginary one, if necessary. Implemented by @Cheukting (gh issue #339,
|
||||
gh pr #607)
|
||||
- Fixed issue where the tz.tzstr constructor would erroneously succeed if
|
||||
passed an invalid value for tzstr. Fixed by @pablogsal (gh issue #259,
|
||||
gh pr #581)
|
||||
- Fixed issue with tz.gettz for TZ variables that start with a colon. Reported
|
||||
and fixed by @lapointexavier (gh pr #601)
|
||||
- Added a lock to tz.tzical's cache. Reported and fixed by @Unrud (gh pr #430)
|
||||
- Fixed an issue with fold support on certain Python 3 implementations that
|
||||
used the pre-3.6 pure Python implementation of datetime.replace, most
|
||||
notably pypy3 (gh pr #446).
|
||||
- Added support for VALUE=DATE-TIME for DTSTART in rrulestr. Reported by @potuz
|
||||
(gh issue #401) and fixed by @Unrud (gh pr #429)
|
||||
- Started enforcing that within VTIMEZONE, the VALUE parameter can only be
|
||||
omitted or DATE-TIME, per RFC 5545. Reported by @Unrud (gh pr #439)
|
||||
- Added support for TZID parameter for DTSTART in rrulestr. Reported and
|
||||
fixed by @ryanpetrello (gh issue #614, gh pr #624)
|
||||
- Added 'RRULE:' prefix to rrule strings generated by rrule.__str__, in
|
||||
compliance with the RFC. Reported by @AndrewPashkin (gh issue #86), fixed by
|
||||
@jarondl and @mlorant (gh pr #450)
|
||||
- Switched to setuptools_scm for version management, automatically calculating
|
||||
a version number from the git metadata. Reported by @jreback (gh issue #511),
|
||||
implemented by @Sulley38 (gh pr #564)
|
||||
- Switched setup.py to use find_packages, and started testing against pip
|
||||
installed versions of dateutil in CI. Fixed issue with parser import
|
||||
discovered by @jreback in pandas-dev/pandas#18141. (gh issue #507, pr #509)
|
||||
- Switched test suite to using pytest (gh pr #495)
|
||||
- Switched CI over to use tox. Fixed by @gaborbernat (gh pr #549)
|
||||
- Added a test-only dependency on freezegun. (gh pr #474)
|
||||
- Reduced number of CI builds on Appveyor. Fixed by @kirit93 (gh issue #529,
|
||||
gh pr #579)
|
||||
- Made xfails strict by default, so that an xpass is a failure. (gh pr #567)
|
||||
- Added a documentation generation stage to tox and CI. (gh pr #568)
|
||||
- Added an explicit warning when running python setup.py explaining how to run
|
||||
the test suites with pytest. Fixed by @lkollar. (gh issue #544, gh pr #548)
|
||||
- Added requirements-dev.txt for test dependency management (gh pr #499, #516)
|
||||
- Fixed code coverage metrics to account for Windows builds (gh pr #526)
|
||||
- Fixed code coverage metrics to NOT count xfails. Fixed by @gaborbernat
|
||||
(gh issue #519, gh pr #563)
|
||||
- Style improvement to zoneinfo.tzfile that was confusing to static type
|
||||
checkers. Reported and fixed by @quodlibetor (gh pr #485)
|
||||
- Several unused imports were removed by @jdufresne. (gh pr #486)
|
||||
- Switched ``isinstance(*, collections.Callable)`` to callable, which is available
|
||||
on all supported Python versions. Implemented by @jdufresne (gh pr #612)
|
||||
- Added CONTRIBUTING.md (gh pr #533)
|
||||
- Added AUTHORS.md (gh pr #542)
|
||||
- Corrected setup.py metadata to reflect author vs. maintainer, (gh issue #477,
|
||||
gh pr #538)
|
||||
- Corrected README to reflect that tests are now run in pytest. Reported and
|
||||
fixed by @m-dz (gh issue #556, gh pr #557)
|
||||
- Updated all references to RFC 2445 (iCalendar) to point to RFC 5545. Fixed
|
||||
by @mariocj89 (gh issue #543, gh pr #555)
|
||||
- Corrected parse documentation to reflect proper integer offset units,
|
||||
reported and fixed by @abrugh (gh pr #458)
|
||||
- Fixed dangling parenthesis in tzoffset documentation (gh pr #461)
|
||||
- Started including the license file in wheels. Reported and fixed by
|
||||
@jdufresne (gh pr #476)
|
||||
- Indendation fixes to parser docstring by @jbrockmendel (gh pr #492)
|
||||
- Moved many examples from the "examples" documentation into their appropriate
|
||||
module documentation pages. Fixed by @Tomasz-Kluczkowski and @jakec-github
|
||||
(gh pr #558, #561)
|
||||
- Fixed documentation so that the parser.isoparse documentation displays.
|
||||
Fixed by @alexchamberlain (gh issue #545, gh pr #560)
|
||||
- Refactored build and release sections and added setup instructions to
|
||||
CONTRIBUTING. Reported and fixed by @kynan (gh pr #562)
|
||||
- Cleaned up various dead links in the documentation. (gh pr #602, #608, #618)
|
||||
|
||||
Version 2.6.1
|
||||
=============
|
||||
- Updated zoneinfo file to 2017b. (gh pr #395)
|
||||
- Added Python 3.6 to CI testing (gh pr #365)
|
||||
- Removed duplicate test name that was preventing a test from being run.
|
||||
Reported and fixed by @jdufresne (gh pr #371)
|
||||
- Fixed testing of folds and gaps, particularly on Windows (gh pr #392)
|
||||
- Fixed deprecated escape characters in regular expressions. Reported by
|
||||
@nascheme and @thierryba (gh issue #361), fixed by @thierryba (gh pr #358)
|
||||
- Many PEP8 style violations and other code smells were fixed by @jdufresne
|
||||
(gh prs #358, #363, #364, #366, #367, #368, #372, #374, #379, #380, #398)
|
||||
- Improved performance of tzutc and tzoffset objects. (gh pr #391)
|
||||
- Fixed issue with several time zone classes around DST transitions in any
|
||||
zones with +0 standard offset (e.g. Europe/London) (gh issue #321, pr #390)
|
||||
- Fixed issue with fuzzy parsing where tokens similar to AM/PM that are in the
|
||||
end skipped were dropped in the fuzzy_with_tokens list. Reported and fixed
|
||||
by @jbrockmendel (gh pr #332).
|
||||
- Fixed issue with parsing dates of the form X m YY. Reported by @jbrockmendel.
|
||||
(gh issue #333, pr #393)
|
||||
- Added support for parser weekdays with less than 3 characters. Reported by
|
||||
@arcadefoam (gh issue #343), fixed by @jonemo (gh pr #382)
|
||||
- Fixed issue with the addition and subtraction of certain relativedeltas.
|
||||
Reported and fixed by @kootenpv (gh issue #346, pr #347)
|
||||
- Fixed issue where the COUNT parameter of rrules was ignored if 0. Fixed by
|
||||
@mshenfield (gh pr #330), reported by @vaultah (gh issue #329).
|
||||
- Updated documentation to include the new tz methods. (gh pr #324)
|
||||
- Update documentation to reflect that the parser can raise TypeError, reported
|
||||
and fixed by @tomchuk (gh issue #336, pr #337)
|
||||
- Fixed an incorrect year in a parser doctest. Fixed by @xlotlu (gh pr #357)
|
||||
- Moved version information into _version.py and set up the versions more
|
||||
granularly.
|
||||
|
||||
Version 2.6.0
|
||||
=============
|
||||
- Added PEP-495-compatible methods to address ambiguous and imaginary dates in
|
||||
time zones in a backwards-compatible way. Ambiguous dates and times can now
|
||||
be safely represented by all dateutil time zones. Many thanks to Alexander
|
||||
Belopolski (@abalkin) and Tim Peters @tim-one for their inputs on how to
|
||||
address this. Original issues reported by Yupeng and @zed (lP: 1390262,
|
||||
gh issues #57, #112, #249, #284, #286, prs #127, #225, #248, #264, #302).
|
||||
- Added new methods for working with ambiguous and imaginary dates to the tz
|
||||
module. datetime_ambiguous() determines if a datetime is ambiguous for a given
|
||||
zone and datetime_exists() determines if a datetime exists in a given zone.
|
||||
This works for all fold-aware datetimes, not just those provided by dateutil.
|
||||
(gh issue #253, gh pr #302)
|
||||
- Fixed an issue where dst() in Portugal in 1996 was returning the wrong value
|
||||
in tz.tzfile objects. Reported by @abalkin (gh issue #128, pr #225)
|
||||
- Fixed an issue where zoneinfo.ZoneInfoFile errors were not being properly
|
||||
deep-copied. (gh issue #226, pr #225)
|
||||
- Refactored tzwin and tzrange as a subclass of a common class, tzrangebase, as
|
||||
there was substantial overlapping functionality. As part of this change,
|
||||
tzrange and tzstr now expose a transitions() function, which returns the
|
||||
DST on and off transitions for a given year. (gh issue #260, pr #302)
|
||||
- Deprecated zoneinfo.gettz() due to confusion with tz.gettz(), in favor of
|
||||
get() method of zoneinfo.ZoneInfoFile objects. (gh issue #11, pr #310)
|
||||
- For non-character, non-stream arguments, parser.parse now raises TypeError
|
||||
instead of AttributeError. (gh issues #171, #269, pr #247)
|
||||
- Fixed an issue where tzfile objects were not properly handling dst() and
|
||||
tzname() when attached to datetime.time objects. Reported by @ovacephaloid.
|
||||
(gh issue #292, pr #309)
|
||||
- /usr/share/lib/zoneinfo was added to TZPATHS for compatibility with Solaris
|
||||
systems. Reported by @dhduvall (gh issue #276, pr #307)
|
||||
- tzoffset and tzrange objects now accept either a number of seconds or a
|
||||
datetime.timedelta() object wherever previously only a number of seconds was
|
||||
allowed. (gh pr #264, #277)
|
||||
- datetime.timedelta objects can now be added to relativedelta objects. Reported
|
||||
and added by Alec Nikolas Reiter (@justanr) (gh issue #282, pr #283
|
||||
- Refactored relativedelta.weekday and rrule.weekday into a common base class
|
||||
to reduce code duplication. (gh issue #140, pr #311)
|
||||
- An issue where the WKST parameter was improperly rendering in str(rrule) was
|
||||
reported and fixed by Daniel LePage (@dplepage). (gh issue #262, pr #263)
|
||||
- A replace() method has been added to rrule objects by @jendas1, which creates
|
||||
new rrule with modified attributes, analogous to datetime.replace (gh pr #167)
|
||||
- Made some significant performance improvements to rrule objects in Python 2.x
|
||||
(gh pr #245)
|
||||
- All classes defining equality functions now return NotImplemented when
|
||||
compared to unsupported classes, rather than raising TypeError, to allow other
|
||||
classes to provide fallback support. (gh pr #236)
|
||||
- Several classes have been marked as explicitly unhashable to maintain
|
||||
identical behavior between Python 2 and 3. Submitted by Roy Williams
|
||||
(@rowillia) (gh pr #296)
|
||||
- Trailing whitespace in easter.py has been removed. Submitted by @OmgImAlexis
|
||||
(gh pr #299)
|
||||
- Windows-only batch files in build scripts had line endings switched to CRLF.
|
||||
(gh pr #237)
|
||||
- @adamchainz updated the documentation links to reflect that the canonical
|
||||
location for readthedocs links is now at .io, not .org. (gh pr #272)
|
||||
- Made some changes to the CI and codecov to test against newer versions of
|
||||
Python and pypy, and to adjust the code coverage requirements. For the moment,
|
||||
full pypy3 compatibility is not supported until a new release is available,
|
||||
due to upstream bugs in the old version affecting PEP-495 support.
|
||||
(gh prs #265, #266, #304, #308)
|
||||
- The full PGP signing key fingerprint was added to the README.md in favor of
|
||||
the previously used long-id. Reported by @valholl (gh issue #287, pr #304)
|
||||
- Updated zoneinfo to 2016i. (gh issue #298, gh pr #306)
|
||||
|
||||
|
||||
Version 2.5.3
|
||||
=============
|
||||
- Updated zoneinfo to 2016d
|
||||
- Fixed parser bug where unambiguous datetimes fail to parse when dayfirst is
|
||||
set to true. (gh issue #233, pr #234)
|
||||
- Bug in zoneinfo file on platforms such as Google App Engine which do not
|
||||
do not allow importing of subprocess.check_call was reported and fixed by
|
||||
@savraj (gh issue #239, gh pr #240)
|
||||
- Fixed incorrect version in documentation (gh issue #235, pr #243)
|
||||
|
||||
Version 2.5.2
|
||||
=============
|
||||
- Updated zoneinfo to 2016c
|
||||
- Fixed parser bug where yearfirst and dayfirst parameters were not being
|
||||
respected when no separator was present. (gh issue #81 and #217, pr #229)
|
||||
|
||||
Version 2.5.1
|
||||
=============
|
||||
- Updated zoneinfo to 2016b
|
||||
- Changed MANIFEST.in to explicitly include test suite in source distributions,
|
||||
with help from @koobs (gh issue #193, pr #194, #201, #221)
|
||||
- Explicitly set all line-endings to LF, except for the NEWS file, on a
|
||||
per-repository basis (gh pr #218)
|
||||
- Fixed an issue with improper caching behavior in rruleset objects (gh issue
|
||||
#104, pr #207)
|
||||
- Changed to an explicit error when rrulestr strings contain a missing BYDAY
|
||||
(gh issue #162, pr #211)
|
||||
- tzfile now correctly handles files containing leapcnt (although the leapcnt
|
||||
information is not actually used). Contributed by @hjoukl (gh issue #146, pr
|
||||
#147)
|
||||
- Fixed recursive import issue with tz module (gh pr #204)
|
||||
- Added compatibility between tzwin objects and datetime.time objects (gh issue
|
||||
#216, gh pr #219)
|
||||
- Refactored monolithic test suite by module (gh issue #61, pr #200 and #206)
|
||||
- Improved test coverage in the relativedelta module (gh pr #215)
|
||||
- Adjusted documentation to reflect possibly counter-intuitive properties of
|
||||
RFC-5545-compliant rrules, and other documentation improvements in the rrule
|
||||
module (gh issue #105, gh issue #149 - pointer to the solution by @phep,
|
||||
pr #213).
|
||||
|
||||
|
||||
Version 2.5.0
|
||||
=============
|
||||
- Updated zoneinfo to 2016a
|
||||
- zoneinfo_metadata file version increased to 2.0 - the updated updatezinfo.py
|
||||
script will work with older zoneinfo_metadata.json files, but new metadata
|
||||
files will not work with older updatezinfo.py versions. Additionally, we have
|
||||
started hosting our own mirror of the Olson databases on a github pages
|
||||
site (https://dateutil.github.io/tzdata/) (gh pr #183)
|
||||
- dateutil zoneinfo tarballs now contain the full zoneinfo_metadata file used
|
||||
to generate them. (gh issue #27, gh pr #85)
|
||||
- relativedelta can now be safely subclassed without derived objects reverting
|
||||
to base relativedelta objects as a result of arithmetic operations.
|
||||
(lp:1010199, gh issue #44, pr #49)
|
||||
- relativedelta 'weeks' parameter can now be set and retrieved as a property of
|
||||
relativedelta instances. (lp: 727525, gh issue #45, pr #49)
|
||||
- relativedelta now explicitly supports fractional relative weeks, days, hours,
|
||||
minutes and seconds. Fractional values in absolute parameters (year, day, etc)
|
||||
are now deprecated. (gh issue #40, pr #190)
|
||||
- relativedelta objects previously did not use microseconds to determine of two
|
||||
relativedelta objects were equal. This oversight has been corrected.
|
||||
Contributed by @elprans (gh pr #113)
|
||||
- rrule now has an xafter() method for retrieving multiple recurrences after a
|
||||
specified date. (gh pr #38)
|
||||
- str(rrule) now returns an RFC2445-compliant rrule string, contributed by
|
||||
@schinckel and @armicron (lp:1406305, gh issue #47, prs #50, #62 and #160)
|
||||
- rrule performance under certain conditions has been significantly improved
|
||||
thanks to a patch contributed by @dekoza, based on an article by Brian Beck
|
||||
(@exogen) (gh pr #136)
|
||||
- The use of both the 'until' and 'count' parameters is now deprecated as
|
||||
inconsistent with RFC2445 (gh pr #62, #185)
|
||||
- Parsing an empty string will now raise a ValueError, rather than returning the
|
||||
datetime passed to the 'default' parameter. (gh issue #78, pr #187)
|
||||
- tzwinlocal objects now have a meaningful repr() and str() implementation
|
||||
(gh issue #148, prs #184 and #186)
|
||||
- Added equality logic for tzwin and tzwinlocal objects. (gh issue #151,
|
||||
pr #180, #184)
|
||||
- Added some flexibility in subclassing timelex, and switched the default
|
||||
behavior over to using string methods rather than comparing against a fixed
|
||||
list. (gh pr #122, #139)
|
||||
- An issue causing tzstr() to crash on Python 2.x was fixed. (lp: 1331576,
|
||||
gh issue #51, pr #55)
|
||||
- An issue with string encoding causing exceptions under certain circumstances
|
||||
when tzname() is called was fixed. (gh issue #60, #74, pr #75)
|
||||
- Parser issue where calling parse() on dates with no day specified when the
|
||||
day of the month in the default datetime (which is "today" if unspecified) is
|
||||
greater than the number of days in the parsed month was fixed (this issue
|
||||
tended to crop up between the 29th and 31st of the month, for obvious reasons)
|
||||
(canonical gh issue #25, pr #30, #191)
|
||||
- Fixed parser issue causing fuzzy_with_tokens to raise an unexpected exception
|
||||
in certain circumstances. Contributed by @MichaelAquilina (gh pr #91)
|
||||
- Fixed parser issue where years > 100 AD were incorrectly parsed. Contributed
|
||||
by @Bachmann1234 (gh pr #130)
|
||||
- Fixed parser issue where commas were not a valid separator between seconds
|
||||
and microseconds, preventing parsing of ISO 8601 dates. Contributed by
|
||||
@ryanss (gh issue #28, pr #106)
|
||||
- Fixed issue with tzwin encoding in locales with non-Latin alphabets
|
||||
(gh issue #92, pr #98)
|
||||
- Fixed an issue where tzwin was not being properly imported on Windows.
|
||||
Contributed by @labrys. (gh pr #134)
|
||||
- Fixed a problem causing issues importing zoneinfo in certain circumstances.
|
||||
Issue and solution contributed by @alexxv (gh issue #97, pr #99)
|
||||
- Fixed an issue where dateutil timezones were not compatible with basic time
|
||||
objects. One of many, many timezone related issues contributed and tested by
|
||||
@labrys. (gh issue #132, pr #181)
|
||||
- Fixed issue where tzwinlocal had an invalid utcoffset. (gh issue #135,
|
||||
pr #141, #142)
|
||||
- Fixed issue with tzwin and tzwinlocal where DST transitions were incorrectly
|
||||
parsed from the registry. (gh issue #143, pr #178)
|
||||
- updatezinfo.py no longer suppresses certain OSErrors. Contributed by @bjamesv
|
||||
(gh pr #164)
|
||||
- An issue that arose when timezone locale changes during runtime has been
|
||||
fixed by @carlosxl and @mjschultz (gh issue #100, prs #107, #109)
|
||||
- Python 3.5 was added to the supported platforms in the metadata (@tacaswell
|
||||
gh pr #159) and the test suites (@moreati gh pr #117).
|
||||
- An issue with tox failing without unittest2 installed in Python 2.6 was fixed
|
||||
by @moreati (gh pr #115)
|
||||
- Several deprecated functions were replaced in the tests by @moreati
|
||||
(gh pr #116)
|
||||
- Improved the logic in Travis and Appveyor to alleviate issues where builds
|
||||
were failing due to connection issues when downloading the IANA timezone
|
||||
files. In addition to adding our own mirror for the files (gh pr #183), the
|
||||
download is now retried a number of times (with a delay) (gh pr #177)
|
||||
- Many failing doctests were fixed by @moreati. (gh pr #120)
|
||||
- Many fixes to the documentation (gh pr #103, gh pr #87 from @radarhere,
|
||||
gh pr #154 from @gpoesia, gh pr #156 from @awsum, gh pr #168 from @ja8zyjits)
|
||||
- Added a code coverage tool to the CI to help improve the library. (gh pr #182)
|
||||
- We now have a mailing list - dateutil@python.org, graciously hosted by
|
||||
Python.org.
|
||||
|
||||
|
||||
Version 2.4.2
|
||||
=============
|
||||
- Updated zoneinfo to 2015b.
|
||||
- Fixed issue with parsing of tzstr on Python 2.7.x; tzstr will now be decoded
|
||||
if not a unicode type. gh #51 (lp:1331576), gh pr #55.
|
||||
- Fix a parser issue where AM and PM tokens were showing up in fuzzy date
|
||||
stamps, triggering inappropriate errors. gh #56 (lp: 1428895), gh pr #63.
|
||||
- Missing function "setcachesize" removed from zoneinfo __all__ list by @ryanss,
|
||||
fixing an issue with wildcard imports of dateutil.zoneinfo. (gh pr #66).
|
||||
- (PyPI only) Fix an issue with source distributions not including the test
|
||||
suite.
|
||||
|
||||
|
||||
Version 2.4.1
|
||||
=============
|
||||
|
||||
- Added explicit check for valid hours if AM/PM is specified in parser.
|
||||
(gh pr #22, issue #21)
|
||||
- Fix bug in rrule introduced in 2.4.0 where byweekday parameter was not
|
||||
handled properly. (gh pr #35, issue #34)
|
||||
- Fix error where parser allowed some invalid dates, overwriting existing hours
|
||||
with the last 2-digit number in the string. (gh pr #32, issue #31)
|
||||
- Fix and add test for Python 2.x compatibility with boolean checking of
|
||||
relativedelta objects. Implemented by @nimasmi (gh pr #43) and Cédric Krier
|
||||
(lp: 1035038)
|
||||
- Replaced parse() calls with explicit datetime objects in unit tests unrelated
|
||||
to parser. (gh pr #36)
|
||||
- Changed private _byxxx from sets to sorted tuples and fixed one currently
|
||||
unreachable bug in _construct_byset. (gh pr #54)
|
||||
- Additional documentation for parser (gh pr #29, #33, #41) and rrule.
|
||||
- Formatting fixes to documentation of rrule and README.rst.
|
||||
- Updated zoneinfo to 2015a.
|
||||
|
||||
Version 2.4.0
|
||||
=============
|
||||
|
||||
- Fix an issue with relativedelta and freezegun (lp:1374022)
|
||||
- Fix tzinfo in windows for timezones without dst (lp:1010050, gh #2)
|
||||
- Ignore missing timezones in windows like in POSIX
|
||||
- Fix minimal version requirement for six (gh #6)
|
||||
- Many rrule changes and fixes by @pganssle (gh pull requests #13 #14 #17),
|
||||
including defusing some infinite loops (gh #4)
|
||||
|
||||
Version 2.3
|
||||
===========
|
||||
|
||||
- Cleanup directory structure, moved test.py to dateutil/tests/test.py
|
||||
|
||||
- Changed many aspects of dealing with the zone info file. Instead of a cache,
|
||||
all the zones are loaded to memory, but symbolic links are loaded only once,
|
||||
so not much memory is used.
|
||||
|
||||
- The package is now zip-safe, and universal-wheelable, thanks to changes in
|
||||
the handling of the zoneinfo file.
|
||||
|
||||
- Fixed tzwin silently not imported on windows python2
|
||||
|
||||
- New maintainer, together with new hosting: GitHub, Travis, Read-The-Docs
|
||||
|
||||
Version 2.2
|
||||
===========
|
||||
|
||||
- Updated zoneinfo to 2013h
|
||||
|
||||
- fuzzy_with_tokens parse addon from Christopher Corley
|
||||
|
||||
- Bug with LANG=C fixed by Mike Gilbert
|
||||
|
||||
Version 2.1
|
||||
===========
|
||||
|
||||
- New maintainer
|
||||
|
||||
- Dateutil now works on Python 2.6, 2.7 and 3.2 from same codebase (with six)
|
||||
|
||||
- #704047: Ismael Carnales' patch for a new time format
|
||||
|
||||
- Small bug fixes, thanks for reporters!
|
||||
|
||||
|
||||
Version 2.0
|
||||
===========
|
||||
|
||||
- Ported to Python 3, by Brian Jones. If you need dateutil for Python 2.X,
|
||||
please continue using the 1.X series.
|
||||
|
||||
- There's no such thing as a "PSF License". This source code is now
|
||||
made available under the Simplified BSD license. See LICENSE for
|
||||
details.
|
||||
|
||||
Version 1.5
|
||||
===========
|
||||
|
||||
- As reported by Mathieu Bridon, rrules were matching the bysecond rules
|
||||
incorrectly against byminute in some circumstances when the SECONDLY
|
||||
frequency was in use, due to a copy & paste bug. The problem has been
|
||||
unittested and corrected.
|
||||
|
||||
- Adam Ryan reported a problem in the relativedelta implementation which
|
||||
affected the yearday parameter in the month of January specifically.
|
||||
This has been unittested and fixed.
|
||||
|
||||
- Updated timezone information.
|
||||
|
||||
|
||||
Version 1.4.1
|
||||
=============
|
||||
|
||||
- Updated timezone information.
|
||||
|
||||
|
||||
Version 1.4
|
||||
===========
|
||||
|
||||
- Fixed another parser precision problem on conversion of decimal seconds
|
||||
to microseconds, as reported by Erik Brown. Now these issues are gone
|
||||
for real since it's not using floating point arithmetic anymore.
|
||||
|
||||
- Fixed case where tzrange.utcoffset and tzrange.dst() might fail due
|
||||
to a date being used where a datetime was expected (reported and fixed
|
||||
by Lennart Regebro).
|
||||
|
||||
- Prevent tzstr from introducing daylight timings in strings that didn't
|
||||
specify them (reported by Lennart Regebro).
|
||||
|
||||
- Calls like gettz("GMT+3") and gettz("UTC-2") will now return the
|
||||
expected values, instead of the TZ variable behavior.
|
||||
|
||||
- Fixed DST signal handling in zoneinfo files. Reported by
|
||||
Nicholas F. Fabry and John-Mark Gurney.
|
||||
|
||||
|
||||
Version 1.3
|
||||
===========
|
||||
|
||||
- Fixed precision problem on conversion of decimal seconds to
|
||||
microseconds, as reported by Skip Montanaro.
|
||||
|
||||
- Fixed bug in constructor of parser, and converted parser classes to
|
||||
new-style classes. Original report and patch by Michael Elsdörfer.
|
||||
|
||||
- Initialize tzid and comps in tz.py, to prevent the code from ever
|
||||
raising a NameError (even with broken files). Johan Dahlin suggested
|
||||
the fix after a pyflakes run.
|
||||
|
||||
- Version is now published in dateutil.__version__, as requested
|
||||
by Darren Dale.
|
||||
|
||||
- All code is compatible with new-style division.
|
||||
|
||||
|
||||
Version 1.2
|
||||
===========
|
||||
|
||||
- Now tzfile will round timezones to full-minutes if necessary,
|
||||
since Python's datetime doesn't support sub-minute offsets.
|
||||
Thanks to Ilpo Nyyssönen for reporting the issue.
|
||||
|
||||
- Removed bare string exceptions, as reported and fixed by
|
||||
Wilfredo Sánchez Vega.
|
||||
|
||||
- Fix bug in leap count parsing (reported and fixed by Eugene Oden).
|
||||
|
||||
|
||||
Version 1.1
|
||||
===========
|
||||
|
||||
- Fixed rrule byyearday handling. Abramo Bagnara pointed out that
|
||||
RFC2445 allows negative numbers.
|
||||
|
||||
- Fixed --prefix handling in setup.py (by Sidnei da Silva).
|
||||
|
||||
- Now tz.gettz() returns a tzlocal instance when not given any
|
||||
arguments and no other timezone information is found.
|
||||
|
||||
- Updating timezone information to version 2005q.
|
||||
|
||||
|
||||
Version 1.0
|
||||
===========
|
||||
|
||||
- Fixed parsing of XXhXXm formatted time after day/month/year
|
||||
has been parsed.
|
||||
|
||||
- Added patch by Jeffrey Harris optimizing rrule.__contains__.
|
||||
|
||||
|
||||
Version 0.9
|
||||
===========
|
||||
|
||||
- Fixed pickling of timezone types, as reported by
|
||||
Andreas Köhler.
|
||||
|
||||
- Implemented internal timezone information with binary
|
||||
timezone files. datautil.tz.gettz() function will now
|
||||
try to use the system timezone files, and fallback to
|
||||
the internal versions. It's also possible to ask for
|
||||
the internal versions directly by using
|
||||
dateutil.zoneinfo.gettz().
|
||||
|
||||
- New tzwin timezone type, allowing access to Windows
|
||||
internal timezones (contributed by Jeffrey Harris).
|
||||
|
||||
- Fixed parsing of unicode date strings.
|
||||
|
||||
- Accept parserinfo instances as the parser constructor
|
||||
parameter, besides parserinfo (sub)classes.
|
||||
|
||||
- Changed weekday to spell the not-set n value as None
|
||||
instead of 0.
|
||||
|
||||
- Fixed other reported bugs.
|
||||
|
||||
|
||||
Version 0.5
|
||||
===========
|
||||
|
||||
- Removed ``FREQ_`` prefix from rrule frequency constants
|
||||
WARNING: this breaks compatibility with previous versions.
|
||||
|
||||
- Fixed rrule.between() for cases where "after" is achieved
|
||||
before even starting, as reported by Andreas Köhler.
|
||||
|
||||
- Fixed two digit zero-year parsing (such as 31-Dec-00), as
|
||||
reported by Jim Abramson, and included test case for this.
|
||||
|
||||
- Sort exdate and rdate before iterating over them, so that
|
||||
it's not necessary to sort them before adding to the rruleset,
|
||||
as reported by Nicholas Piper.
|
|
@ -1,158 +0,0 @@
|
|||
dateutil - powerful extensions to datetime
|
||||
==========================================
|
||||
|
||||
|pypi| |support| |licence|
|
||||
|
||||
|gitter| |readthedocs|
|
||||
|
||||
|travis| |appveyor| |coverage|
|
||||
|
||||
.. |pypi| image:: https://img.shields.io/pypi/v/python-dateutil.svg?style=flat-square
|
||||
:target: https://pypi.org/project/python-dateutil/
|
||||
:alt: pypi version
|
||||
|
||||
.. |support| image:: https://img.shields.io/pypi/pyversions/python-dateutil.svg?style=flat-square
|
||||
:target: https://pypi.org/project/python-dateutil/
|
||||
:alt: supported Python version
|
||||
|
||||
.. |travis| image:: https://img.shields.io/travis/dateutil/dateutil/master.svg?style=flat-square&label=Travis%20Build
|
||||
:target: https://travis-ci.org/dateutil/dateutil
|
||||
:alt: travis build status
|
||||
|
||||
.. |appveyor| image:: https://img.shields.io/appveyor/ci/dateutil/dateutil/master.svg?style=flat-square&logo=appveyor
|
||||
:target: https://ci.appveyor.com/project/dateutil/dateutil
|
||||
:alt: appveyor build status
|
||||
|
||||
.. |coverage| image:: https://codecov.io/github/dateutil/dateutil/coverage.svg?branch=master
|
||||
:target: https://codecov.io/github/dateutil/dateutil?branch=master
|
||||
:alt: Code coverage
|
||||
|
||||
.. |gitter| image:: https://badges.gitter.im/dateutil/dateutil.svg
|
||||
:alt: Join the chat at https://gitter.im/dateutil/dateutil
|
||||
:target: https://gitter.im/dateutil/dateutil
|
||||
|
||||
.. |licence| image:: https://img.shields.io/pypi/l/python-dateutil.svg?style=flat-square
|
||||
:target: https://pypi.org/project/python-dateutil/
|
||||
:alt: licence
|
||||
|
||||
.. |readthedocs| image:: https://img.shields.io/readthedocs/dateutil/latest.svg?style=flat-square&label=Read%20the%20Docs
|
||||
:alt: Read the documentation at https://dateutil.readthedocs.io/en/latest/
|
||||
:target: https://dateutil.readthedocs.io/en/latest/
|
||||
|
||||
The `dateutil` module provides powerful extensions to
|
||||
the standard `datetime` module, available in Python.
|
||||
|
||||
|
||||
Download
|
||||
========
|
||||
dateutil is available on PyPI
|
||||
https://pypi.org/project/python-dateutil/
|
||||
|
||||
The documentation is hosted at:
|
||||
https://dateutil.readthedocs.io/en/stable/
|
||||
|
||||
Code
|
||||
====
|
||||
The code and issue tracker are hosted on Github:
|
||||
https://github.com/dateutil/dateutil/
|
||||
|
||||
Features
|
||||
========
|
||||
|
||||
* Computing of relative deltas (next month, next year,
|
||||
next monday, last week of month, etc);
|
||||
* Computing of relative deltas between two given
|
||||
date and/or datetime objects;
|
||||
* Computing of dates based on very flexible recurrence rules,
|
||||
using a superset of the `iCalendar <https://www.ietf.org/rfc/rfc2445.txt>`_
|
||||
specification. Parsing of RFC strings is supported as well.
|
||||
* Generic parsing of dates in almost any string format;
|
||||
* Timezone (tzinfo) implementations for tzfile(5) format
|
||||
files (/etc/localtime, /usr/share/zoneinfo, etc), TZ
|
||||
environment string (in all known formats), iCalendar
|
||||
format files, given ranges (with help from relative deltas),
|
||||
local machine timezone, fixed offset timezone, UTC timezone,
|
||||
and Windows registry-based time zones.
|
||||
* Internal up-to-date world timezone information based on
|
||||
Olson's database.
|
||||
* Computing of Easter Sunday dates for any given year,
|
||||
using Western, Orthodox or Julian algorithms;
|
||||
* A comprehensive test suite.
|
||||
|
||||
Quick example
|
||||
=============
|
||||
Here's a snapshot, just to give an idea about the power of the
|
||||
package. For more examples, look at the documentation.
|
||||
|
||||
Suppose you want to know how much time is left, in
|
||||
years/months/days/etc, before the next easter happening on a
|
||||
year with a Friday 13th in August, and you want to get today's
|
||||
date out of the "date" unix system command. Here is the code:
|
||||
|
||||
.. doctest:: readmeexample
|
||||
|
||||
>>> from dateutil.relativedelta import *
|
||||
>>> from dateutil.easter import *
|
||||
>>> from dateutil.rrule import *
|
||||
>>> from dateutil.parser import *
|
||||
>>> from datetime import *
|
||||
>>> now = parse("Sat Oct 11 17:13:46 UTC 2003")
|
||||
>>> today = now.date()
|
||||
>>> year = rrule(YEARLY,dtstart=now,bymonth=8,bymonthday=13,byweekday=FR)[0].year
|
||||
>>> rdelta = relativedelta(easter(year), today)
|
||||
>>> print("Today is: %s" % today)
|
||||
Today is: 2003-10-11
|
||||
>>> print("Year with next Aug 13th on a Friday is: %s" % year)
|
||||
Year with next Aug 13th on a Friday is: 2004
|
||||
>>> print("How far is the Easter of that year: %s" % rdelta)
|
||||
How far is the Easter of that year: relativedelta(months=+6)
|
||||
>>> print("And the Easter of that year is: %s" % (today+rdelta))
|
||||
And the Easter of that year is: 2004-04-11
|
||||
|
||||
Being exactly 6 months ahead was **really** a coincidence :)
|
||||
|
||||
Contributing
|
||||
============
|
||||
|
||||
We welcome many types of contributions - bug reports, pull requests (code, infrastructure or documentation fixes). For more information about how to contribute to the project, see the ``CONTRIBUTING.md`` file in the repository.
|
||||
|
||||
|
||||
Author
|
||||
======
|
||||
The dateutil module was written by Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
in 2003.
|
||||
|
||||
It is maintained by:
|
||||
|
||||
* Gustavo Niemeyer <gustavo@niemeyer.net> 2003-2011
|
||||
* Tomi Pieviläinen <tomi.pievilainen@iki.fi> 2012-2014
|
||||
* Yaron de Leeuw <me@jarondl.net> 2014-2016
|
||||
* Paul Ganssle <paul@ganssle.io> 2015-
|
||||
|
||||
Starting with version 2.4.1, all source and binary distributions will be signed
|
||||
by a PGP key that has, at the very least, been signed by the key which made the
|
||||
previous release. A table of release signing keys can be found below:
|
||||
|
||||
=========== ============================
|
||||
Releases Signing key fingerprint
|
||||
=========== ============================
|
||||
2.4.1- `6B49 ACBA DCF6 BD1C A206 67AB CD54 FCE3 D964 BEFB`_ (|pgp_mirror|_)
|
||||
=========== ============================
|
||||
|
||||
|
||||
Contact
|
||||
=======
|
||||
Our mailing list is available at `dateutil@python.org <https://mail.python.org/mailman/listinfo/dateutil>`_. As it is hosted by the PSF, it is subject to the `PSF code of
|
||||
conduct <https://www.python.org/psf/codeofconduct/>`_.
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
All contributions after December 1, 2017 released under dual license - either `Apache 2.0 License <https://www.apache.org/licenses/LICENSE-2.0>`_ or the `BSD 3-Clause License <https://opensource.org/licenses/BSD-3-Clause>`_. Contributions before December 1, 2017 - except those those explicitly relicensed - are released only under the BSD 3-Clause License.
|
||||
|
||||
|
||||
.. _6B49 ACBA DCF6 BD1C A206 67AB CD54 FCE3 D964 BEFB:
|
||||
https://pgp.mit.edu/pks/lookup?op=vindex&search=0xCD54FCE3D964BEFB
|
||||
|
||||
.. |pgp_mirror| replace:: mirror
|
||||
.. _pgp_mirror: https://sks-keyservers.net/pks/lookup?op=vindex&search=0xCD54FCE3D964BEFB
|
|
@ -1,8 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
try:
|
||||
from ._version import version as __version__
|
||||
except ImportError:
|
||||
__version__ = 'unknown'
|
||||
|
||||
__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz',
|
||||
'utils', 'zoneinfo', 'six']
|
|
@ -1,43 +0,0 @@
|
|||
"""
|
||||
Common code used in multiple modules.
|
||||
"""
|
||||
|
||||
|
||||
class weekday(object):
|
||||
__slots__ = ["weekday", "n"]
|
||||
|
||||
def __init__(self, weekday, n=None):
|
||||
self.weekday = weekday
|
||||
self.n = n
|
||||
|
||||
def __call__(self, n):
|
||||
if n == self.n:
|
||||
return self
|
||||
else:
|
||||
return self.__class__(self.weekday, n)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
if self.weekday != other.weekday or self.n != other.n:
|
||||
return False
|
||||
except AttributeError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def __hash__(self):
|
||||
return hash((
|
||||
self.weekday,
|
||||
self.n,
|
||||
))
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
def __repr__(self):
|
||||
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
|
||||
if not self.n:
|
||||
return s
|
||||
else:
|
||||
return "%s(%+d)" % (s, self.n)
|
||||
|
||||
# vim:ts=4:sw=4:et
|
|
@ -1,89 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
This module offers a generic easter computing method for any given year, using
|
||||
Western, Orthodox or Julian algorithms.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
|
||||
|
||||
EASTER_JULIAN = 1
|
||||
EASTER_ORTHODOX = 2
|
||||
EASTER_WESTERN = 3
|
||||
|
||||
|
||||
def easter(year, method=EASTER_WESTERN):
|
||||
"""
|
||||
This method was ported from the work done by GM Arts,
|
||||
on top of the algorithm by Claus Tondering, which was
|
||||
based in part on the algorithm of Ouding (1940), as
|
||||
quoted in "Explanatory Supplement to the Astronomical
|
||||
Almanac", P. Kenneth Seidelmann, editor.
|
||||
|
||||
This algorithm implements three different easter
|
||||
calculation methods:
|
||||
|
||||
1 - Original calculation in Julian calendar, valid in
|
||||
dates after 326 AD
|
||||
2 - Original method, with date converted to Gregorian
|
||||
calendar, valid in years 1583 to 4099
|
||||
3 - Revised method, in Gregorian calendar, valid in
|
||||
years 1583 to 4099 as well
|
||||
|
||||
These methods are represented by the constants:
|
||||
|
||||
* ``EASTER_JULIAN = 1``
|
||||
* ``EASTER_ORTHODOX = 2``
|
||||
* ``EASTER_WESTERN = 3``
|
||||
|
||||
The default method is method 3.
|
||||
|
||||
More about the algorithm may be found at:
|
||||
|
||||
`GM Arts: Easter Algorithms <http://www.gmarts.org/index.php?go=415>`_
|
||||
|
||||
and
|
||||
|
||||
`The Calendar FAQ: Easter <https://www.tondering.dk/claus/cal/easter.php>`_
|
||||
|
||||
"""
|
||||
|
||||
if not (1 <= method <= 3):
|
||||
raise ValueError("invalid method")
|
||||
|
||||
# g - Golden year - 1
|
||||
# c - Century
|
||||
# h - (23 - Epact) mod 30
|
||||
# i - Number of days from March 21 to Paschal Full Moon
|
||||
# j - Weekday for PFM (0=Sunday, etc)
|
||||
# p - Number of days from March 21 to Sunday on or before PFM
|
||||
# (-6 to 28 methods 1 & 3, to 56 for method 2)
|
||||
# e - Extra days to add for method 2 (converting Julian
|
||||
# date to Gregorian date)
|
||||
|
||||
y = year
|
||||
g = y % 19
|
||||
e = 0
|
||||
if method < 3:
|
||||
# Old method
|
||||
i = (19*g + 15) % 30
|
||||
j = (y + y//4 + i) % 7
|
||||
if method == 2:
|
||||
# Extra dates to convert Julian to Gregorian date
|
||||
e = 10
|
||||
if y > 1600:
|
||||
e = e + y//100 - 16 - (y//100 - 16)//4
|
||||
else:
|
||||
# New method
|
||||
c = y//100
|
||||
h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
|
||||
i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
|
||||
j = (y + y//4 + i + 2 - c + c//4) % 7
|
||||
|
||||
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
|
||||
# (later dates apply to method 2, although 23 May never actually occurs)
|
||||
p = i - j + e
|
||||
d = 1 + (p + 27 + (p + 6)//40) % 31
|
||||
m = 3 + (p + 26)//30
|
||||
return datetime.date(int(y), int(m), int(d))
|
|
@ -1,60 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from ._parser import parse, parser, parserinfo
|
||||
from ._parser import DEFAULTPARSER, DEFAULTTZPARSER
|
||||
from ._parser import UnknownTimezoneWarning
|
||||
|
||||
from ._parser import __doc__
|
||||
|
||||
from .isoparser import isoparser, isoparse
|
||||
|
||||
__all__ = ['parse', 'parser', 'parserinfo',
|
||||
'isoparse', 'isoparser',
|
||||
'UnknownTimezoneWarning']
|
||||
|
||||
|
||||
###
|
||||
# Deprecate portions of the private interface so that downstream code that
|
||||
# is improperly relying on it is given *some* notice.
|
||||
|
||||
|
||||
def __deprecated_private_func(f):
|
||||
from functools import wraps
|
||||
import warnings
|
||||
|
||||
msg = ('{name} is a private function and may break without warning, '
|
||||
'it will be moved and or renamed in future versions.')
|
||||
msg = msg.format(name=f.__name__)
|
||||
|
||||
@wraps(f)
|
||||
def deprecated_func(*args, **kwargs):
|
||||
warnings.warn(msg, DeprecationWarning)
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return deprecated_func
|
||||
|
||||
def __deprecate_private_class(c):
|
||||
import warnings
|
||||
|
||||
msg = ('{name} is a private class and may break without warning, '
|
||||
'it will be moved and or renamed in future versions.')
|
||||
msg = msg.format(name=c.__name__)
|
||||
|
||||
class private_class(c):
|
||||
__doc__ = c.__doc__
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
warnings.warn(msg, DeprecationWarning)
|
||||
super(private_class, self).__init__(*args, **kwargs)
|
||||
|
||||
private_class.__name__ = c.__name__
|
||||
|
||||
return private_class
|
||||
|
||||
|
||||
from ._parser import _timelex, _resultbase
|
||||
from ._parser import _tzparser, _parsetz
|
||||
|
||||
_timelex = __deprecate_private_class(_timelex)
|
||||
_tzparser = __deprecate_private_class(_tzparser)
|
||||
_resultbase = __deprecate_private_class(_resultbase)
|
||||
_parsetz = __deprecated_private_func(_parsetz)
|
File diff suppressed because it is too large
Load diff
|
@ -1,406 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
This module offers a parser for ISO-8601 strings
|
||||
|
||||
It is intended to support all valid date, time and datetime formats per the
|
||||
ISO-8601 specification.
|
||||
|
||||
..versionadded:: 2.7.0
|
||||
"""
|
||||
from datetime import datetime, timedelta, time, date
|
||||
import calendar
|
||||
from .. import tz
|
||||
|
||||
from functools import wraps
|
||||
|
||||
import re
|
||||
from .. import six
|
||||
|
||||
__all__ = ["isoparse", "isoparser"]
|
||||
|
||||
|
||||
def _takes_ascii(f):
|
||||
@wraps(f)
|
||||
def func(self, str_in, *args, **kwargs):
|
||||
# If it's a stream, read the whole thing
|
||||
str_in = getattr(str_in, 'read', lambda: str_in)()
|
||||
|
||||
# If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
|
||||
if isinstance(str_in, six.text_type):
|
||||
# ASCII is the same in UTF-8
|
||||
try:
|
||||
str_in = str_in.encode('ascii')
|
||||
except UnicodeEncodeError as e:
|
||||
msg = 'ISO-8601 strings should contain only ASCII characters'
|
||||
six.raise_from(ValueError(msg), e)
|
||||
|
||||
return f(self, str_in, *args, **kwargs)
|
||||
|
||||
return func
|
||||
|
||||
|
||||
class isoparser(object):
|
||||
def __init__(self, sep=None):
|
||||
"""
|
||||
:param sep:
|
||||
A single character that separates date and time portions. If
|
||||
``None``, the parser will accept any single character.
|
||||
For strict ISO-8601 adherence, pass ``'T'``.
|
||||
"""
|
||||
if sep is not None:
|
||||
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
|
||||
raise ValueError('Separator must be a single, non-numeric ' +
|
||||
'ASCII character')
|
||||
|
||||
sep = sep.encode('ascii')
|
||||
|
||||
self._sep = sep
|
||||
|
||||
@_takes_ascii
|
||||
def isoparse(self, dt_str):
|
||||
"""
|
||||
Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
|
||||
|
||||
An ISO-8601 datetime string consists of a date portion, followed
|
||||
optionally by a time portion - the date and time portions are separated
|
||||
by a single character separator, which is ``T`` in the official
|
||||
standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
|
||||
combined with a time portion.
|
||||
|
||||
Supported date formats are:
|
||||
|
||||
Common:
|
||||
|
||||
- ``YYYY``
|
||||
- ``YYYY-MM`` or ``YYYYMM``
|
||||
- ``YYYY-MM-DD`` or ``YYYYMMDD``
|
||||
|
||||
Uncommon:
|
||||
|
||||
- ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
|
||||
- ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
|
||||
|
||||
The ISO week and day numbering follows the same logic as
|
||||
:func:`datetime.date.isocalendar`.
|
||||
|
||||
Supported time formats are:
|
||||
|
||||
- ``hh``
|
||||
- ``hh:mm`` or ``hhmm``
|
||||
- ``hh:mm:ss`` or ``hhmmss``
|
||||
- ``hh:mm:ss.sss`` or ``hh:mm:ss.ssssss`` (3-6 sub-second digits)
|
||||
|
||||
Midnight is a special case for `hh`, as the standard supports both
|
||||
00:00 and 24:00 as a representation.
|
||||
|
||||
.. caution::
|
||||
|
||||
Support for fractional components other than seconds is part of the
|
||||
ISO-8601 standard, but is not currently implemented in this parser.
|
||||
|
||||
Supported time zone offset formats are:
|
||||
|
||||
- `Z` (UTC)
|
||||
- `±HH:MM`
|
||||
- `±HHMM`
|
||||
- `±HH`
|
||||
|
||||
Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
|
||||
with the exception of UTC, which will be represented as
|
||||
:class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
|
||||
as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
|
||||
|
||||
:param dt_str:
|
||||
A string or stream containing only an ISO-8601 datetime string
|
||||
|
||||
:return:
|
||||
Returns a :class:`datetime.datetime` representing the string.
|
||||
Unspecified components default to their lowest value.
|
||||
|
||||
.. warning::
|
||||
|
||||
As of version 2.7.0, the strictness of the parser should not be
|
||||
considered a stable part of the contract. Any valid ISO-8601 string
|
||||
that parses correctly with the default settings will continue to
|
||||
parse correctly in future versions, but invalid strings that
|
||||
currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
|
||||
guaranteed to continue failing in future versions if they encode
|
||||
a valid date.
|
||||
|
||||
.. versionadded:: 2.7.0
|
||||
"""
|
||||
components, pos = self._parse_isodate(dt_str)
|
||||
|
||||
if len(dt_str) > pos:
|
||||
if self._sep is None or dt_str[pos:pos + 1] == self._sep:
|
||||
components += self._parse_isotime(dt_str[pos + 1:])
|
||||
else:
|
||||
raise ValueError('String contains unknown ISO components')
|
||||
|
||||
return datetime(*components)
|
||||
|
||||
@_takes_ascii
|
||||
def parse_isodate(self, datestr):
|
||||
"""
|
||||
Parse the date portion of an ISO string.
|
||||
|
||||
:param datestr:
|
||||
The string portion of an ISO string, without a separator
|
||||
|
||||
:return:
|
||||
Returns a :class:`datetime.date` object
|
||||
"""
|
||||
components, pos = self._parse_isodate(datestr)
|
||||
if pos < len(datestr):
|
||||
raise ValueError('String contains unknown ISO ' +
|
||||
'components: {}'.format(datestr))
|
||||
return date(*components)
|
||||
|
||||
@_takes_ascii
|
||||
def parse_isotime(self, timestr):
|
||||
"""
|
||||
Parse the time portion of an ISO string.
|
||||
|
||||
:param timestr:
|
||||
The time portion of an ISO string, without a separator
|
||||
|
||||
:return:
|
||||
Returns a :class:`datetime.time` object
|
||||
"""
|
||||
return time(*self._parse_isotime(timestr))
|
||||
|
||||
@_takes_ascii
|
||||
def parse_tzstr(self, tzstr, zero_as_utc=True):
|
||||
"""
|
||||
Parse a valid ISO time zone string.
|
||||
|
||||
See :func:`isoparser.isoparse` for details on supported formats.
|
||||
|
||||
:param tzstr:
|
||||
A string representing an ISO time zone offset
|
||||
|
||||
:param zero_as_utc:
|
||||
Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
|
||||
|
||||
:return:
|
||||
Returns :class:`dateutil.tz.tzoffset` for offsets and
|
||||
:class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
|
||||
specified) offsets equivalent to UTC.
|
||||
"""
|
||||
return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
|
||||
|
||||
# Constants
|
||||
_MICROSECOND_END_REGEX = re.compile(b'[-+Z]+')
|
||||
_DATE_SEP = b'-'
|
||||
_TIME_SEP = b':'
|
||||
_MICRO_SEP = b'.'
|
||||
|
||||
def _parse_isodate(self, dt_str):
|
||||
try:
|
||||
return self._parse_isodate_common(dt_str)
|
||||
except ValueError:
|
||||
return self._parse_isodate_uncommon(dt_str)
|
||||
|
||||
def _parse_isodate_common(self, dt_str):
|
||||
len_str = len(dt_str)
|
||||
components = [1, 1, 1]
|
||||
|
||||
if len_str < 4:
|
||||
raise ValueError('ISO string too short')
|
||||
|
||||
# Year
|
||||
components[0] = int(dt_str[0:4])
|
||||
pos = 4
|
||||
if pos >= len_str:
|
||||
return components, pos
|
||||
|
||||
has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
|
||||
if has_sep:
|
||||
pos += 1
|
||||
|
||||
# Month
|
||||
if len_str - pos < 2:
|
||||
raise ValueError('Invalid common month')
|
||||
|
||||
components[1] = int(dt_str[pos:pos + 2])
|
||||
pos += 2
|
||||
|
||||
if pos >= len_str:
|
||||
if has_sep:
|
||||
return components, pos
|
||||
else:
|
||||
raise ValueError('Invalid ISO format')
|
||||
|
||||
if has_sep:
|
||||
if dt_str[pos:pos + 1] != self._DATE_SEP:
|
||||
raise ValueError('Invalid separator in ISO string')
|
||||
pos += 1
|
||||
|
||||
# Day
|
||||
if len_str - pos < 2:
|
||||
raise ValueError('Invalid common day')
|
||||
components[2] = int(dt_str[pos:pos + 2])
|
||||
return components, pos + 2
|
||||
|
||||
def _parse_isodate_uncommon(self, dt_str):
|
||||
if len(dt_str) < 4:
|
||||
raise ValueError('ISO string too short')
|
||||
|
||||
# All ISO formats start with the year
|
||||
year = int(dt_str[0:4])
|
||||
|
||||
has_sep = dt_str[4:5] == self._DATE_SEP
|
||||
|
||||
pos = 4 + has_sep # Skip '-' if it's there
|
||||
if dt_str[pos:pos + 1] == b'W':
|
||||
# YYYY-?Www-?D?
|
||||
pos += 1
|
||||
weekno = int(dt_str[pos:pos + 2])
|
||||
pos += 2
|
||||
|
||||
dayno = 1
|
||||
if len(dt_str) > pos:
|
||||
if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
|
||||
raise ValueError('Inconsistent use of dash separator')
|
||||
|
||||
pos += has_sep
|
||||
|
||||
dayno = int(dt_str[pos:pos + 1])
|
||||
pos += 1
|
||||
|
||||
base_date = self._calculate_weekdate(year, weekno, dayno)
|
||||
else:
|
||||
# YYYYDDD or YYYY-DDD
|
||||
if len(dt_str) - pos < 3:
|
||||
raise ValueError('Invalid ordinal day')
|
||||
|
||||
ordinal_day = int(dt_str[pos:pos + 3])
|
||||
pos += 3
|
||||
|
||||
if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
|
||||
raise ValueError('Invalid ordinal day' +
|
||||
' {} for year {}'.format(ordinal_day, year))
|
||||
|
||||
base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
|
||||
|
||||
components = [base_date.year, base_date.month, base_date.day]
|
||||
return components, pos
|
||||
|
||||
def _calculate_weekdate(self, year, week, day):
|
||||
"""
|
||||
Calculate the day of corresponding to the ISO year-week-day calendar.
|
||||
|
||||
This function is effectively the inverse of
|
||||
:func:`datetime.date.isocalendar`.
|
||||
|
||||
:param year:
|
||||
The year in the ISO calendar
|
||||
|
||||
:param week:
|
||||
The week in the ISO calendar - range is [1, 53]
|
||||
|
||||
:param day:
|
||||
The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
|
||||
|
||||
:return:
|
||||
Returns a :class:`datetime.date`
|
||||
"""
|
||||
if not 0 < week < 54:
|
||||
raise ValueError('Invalid week: {}'.format(week))
|
||||
|
||||
if not 0 < day < 8: # Range is 1-7
|
||||
raise ValueError('Invalid weekday: {}'.format(day))
|
||||
|
||||
# Get week 1 for the specific year:
|
||||
jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
|
||||
week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
|
||||
|
||||
# Now add the specific number of weeks and days to get what we want
|
||||
week_offset = (week - 1) * 7 + (day - 1)
|
||||
return week_1 + timedelta(days=week_offset)
|
||||
|
||||
def _parse_isotime(self, timestr):
|
||||
len_str = len(timestr)
|
||||
components = [0, 0, 0, 0, None]
|
||||
pos = 0
|
||||
comp = -1
|
||||
|
||||
if len(timestr) < 2:
|
||||
raise ValueError('ISO time too short')
|
||||
|
||||
has_sep = len_str >= 3 and timestr[2:3] == self._TIME_SEP
|
||||
|
||||
while pos < len_str and comp < 5:
|
||||
comp += 1
|
||||
|
||||
if timestr[pos:pos + 1] in b'-+Z':
|
||||
# Detect time zone boundary
|
||||
components[-1] = self._parse_tzstr(timestr[pos:])
|
||||
pos = len_str
|
||||
break
|
||||
|
||||
if comp < 3:
|
||||
# Hour, minute, second
|
||||
components[comp] = int(timestr[pos:pos + 2])
|
||||
pos += 2
|
||||
if (has_sep and pos < len_str and
|
||||
timestr[pos:pos + 1] == self._TIME_SEP):
|
||||
pos += 1
|
||||
|
||||
if comp == 3:
|
||||
# Microsecond
|
||||
if timestr[pos:pos + 1] != self._MICRO_SEP:
|
||||
continue
|
||||
|
||||
pos += 1
|
||||
us_str = self._MICROSECOND_END_REGEX.split(timestr[pos:pos + 6],
|
||||
1)[0]
|
||||
|
||||
components[comp] = int(us_str) * 10**(6 - len(us_str))
|
||||
pos += len(us_str)
|
||||
|
||||
if pos < len_str:
|
||||
raise ValueError('Unused components in ISO string')
|
||||
|
||||
if components[0] == 24:
|
||||
# Standard supports 00:00 and 24:00 as representations of midnight
|
||||
if any(component != 0 for component in components[1:4]):
|
||||
raise ValueError('Hour may only be 24 at 24:00:00.000')
|
||||
components[0] = 0
|
||||
|
||||
return components
|
||||
|
||||
def _parse_tzstr(self, tzstr, zero_as_utc=True):
|
||||
if tzstr == b'Z':
|
||||
return tz.tzutc()
|
||||
|
||||
if len(tzstr) not in {3, 5, 6}:
|
||||
raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters')
|
||||
|
||||
if tzstr[0:1] == b'-':
|
||||
mult = -1
|
||||
elif tzstr[0:1] == b'+':
|
||||
mult = 1
|
||||
else:
|
||||
raise ValueError('Time zone offset requires sign')
|
||||
|
||||
hours = int(tzstr[1:3])
|
||||
if len(tzstr) == 3:
|
||||
minutes = 0
|
||||
else:
|
||||
minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):])
|
||||
|
||||
if zero_as_utc and hours == 0 and minutes == 0:
|
||||
return tz.tzutc()
|
||||
else:
|
||||
if minutes > 59:
|
||||
raise ValueError('Invalid minutes in time zone offset')
|
||||
|
||||
if hours > 23:
|
||||
raise ValueError('Invalid hours in time zone offset')
|
||||
|
||||
return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60)
|
||||
|
||||
|
||||
DEFAULT_ISOPARSER = isoparser()
|
||||
isoparse = DEFAULT_ISOPARSER.isoparse
|
|
@ -1,590 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import datetime
|
||||
import calendar
|
||||
|
||||
import operator
|
||||
from math import copysign
|
||||
|
||||
from six import integer_types
|
||||
from warnings import warn
|
||||
|
||||
from ._common import weekday
|
||||
|
||||
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
|
||||
|
||||
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
|
||||
|
||||
|
||||
class relativedelta(object):
|
||||
"""
|
||||
The relativedelta type is based on the specification of the excellent
|
||||
work done by M.-A. Lemburg in his
|
||||
`mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension.
|
||||
However, notice that this type does *NOT* implement the same algorithm as
|
||||
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
|
||||
|
||||
There are two different ways to build a relativedelta instance. The
|
||||
first one is passing it two date/datetime classes::
|
||||
|
||||
relativedelta(datetime1, datetime2)
|
||||
|
||||
The second one is passing it any number of the following keyword arguments::
|
||||
|
||||
relativedelta(arg1=x,arg2=y,arg3=z...)
|
||||
|
||||
year, month, day, hour, minute, second, microsecond:
|
||||
Absolute information (argument is singular); adding or subtracting a
|
||||
relativedelta with absolute information does not perform an arithmetic
|
||||
operation, but rather REPLACES the corresponding value in the
|
||||
original datetime with the value(s) in relativedelta.
|
||||
|
||||
years, months, weeks, days, hours, minutes, seconds, microseconds:
|
||||
Relative information, may be negative (argument is plural); adding
|
||||
or subtracting a relativedelta with relative information performs
|
||||
the corresponding aritmetic operation on the original datetime value
|
||||
with the information in the relativedelta.
|
||||
|
||||
weekday:
|
||||
One of the weekday instances (MO, TU, etc). These
|
||||
instances may receive a parameter N, specifying the Nth
|
||||
weekday, which could be positive or negative (like MO(+1)
|
||||
or MO(-2). Not specifying it is the same as specifying
|
||||
+1. You can also use an integer, where 0=MO. Notice that
|
||||
if the calculated date is already Monday, for example,
|
||||
using MO(1) or MO(-1) won't change the day.
|
||||
|
||||
leapdays:
|
||||
Will add given days to the date found, if year is a leap
|
||||
year, and the date found is post 28 of february.
|
||||
|
||||
yearday, nlyearday:
|
||||
Set the yearday or the non-leap year day (jump leap days).
|
||||
These are converted to day/month/leapdays information.
|
||||
|
||||
There are relative and absolute forms of the keyword
|
||||
arguments. The plural is relative, and the singular is
|
||||
absolute. For each argument in the order below, the absolute form
|
||||
is applied first (by setting each attribute to that value) and
|
||||
then the relative form (by adding the value to the attribute).
|
||||
|
||||
The order of attributes considered when this relativedelta is
|
||||
added to a datetime is:
|
||||
|
||||
1. Year
|
||||
2. Month
|
||||
3. Day
|
||||
4. Hours
|
||||
5. Minutes
|
||||
6. Seconds
|
||||
7. Microseconds
|
||||
|
||||
Finally, weekday is applied, using the rule described above.
|
||||
|
||||
For example
|
||||
|
||||
>>> dt = datetime(2018, 4, 9, 13, 37, 0)
|
||||
>>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
|
||||
datetime(2018, 4, 2, 14, 37, 0)
|
||||
|
||||
First, the day is set to 1 (the first of the month), then 25 hours
|
||||
are added, to get to the 2nd day and 14th hour, finally the
|
||||
weekday is applied, but since the 2nd is already a Monday there is
|
||||
no effect.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, dt1=None, dt2=None,
|
||||
years=0, months=0, days=0, leapdays=0, weeks=0,
|
||||
hours=0, minutes=0, seconds=0, microseconds=0,
|
||||
year=None, month=None, day=None, weekday=None,
|
||||
yearday=None, nlyearday=None,
|
||||
hour=None, minute=None, second=None, microsecond=None):
|
||||
|
||||
if dt1 and dt2:
|
||||
# datetime is a subclass of date. So both must be date
|
||||
if not (isinstance(dt1, datetime.date) and
|
||||
isinstance(dt2, datetime.date)):
|
||||
raise TypeError("relativedelta only diffs datetime/date")
|
||||
|
||||
# We allow two dates, or two datetimes, so we coerce them to be
|
||||
# of the same type
|
||||
if (isinstance(dt1, datetime.datetime) !=
|
||||
isinstance(dt2, datetime.datetime)):
|
||||
if not isinstance(dt1, datetime.datetime):
|
||||
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
|
||||
elif not isinstance(dt2, datetime.datetime):
|
||||
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
|
||||
|
||||
self.years = 0
|
||||
self.months = 0
|
||||
self.days = 0
|
||||
self.leapdays = 0
|
||||
self.hours = 0
|
||||
self.minutes = 0
|
||||
self.seconds = 0
|
||||
self.microseconds = 0
|
||||
self.year = None
|
||||
self.month = None
|
||||
self.day = None
|
||||
self.weekday = None
|
||||
self.hour = None
|
||||
self.minute = None
|
||||
self.second = None
|
||||
self.microsecond = None
|
||||
self._has_time = 0
|
||||
|
||||
# Get year / month delta between the two
|
||||
months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
|
||||
self._set_months(months)
|
||||
|
||||
# Remove the year/month delta so the timedelta is just well-defined
|
||||
# time units (seconds, days and microseconds)
|
||||
dtm = self.__radd__(dt2)
|
||||
|
||||
# If we've overshot our target, make an adjustment
|
||||
if dt1 < dt2:
|
||||
compare = operator.gt
|
||||
increment = 1
|
||||
else:
|
||||
compare = operator.lt
|
||||
increment = -1
|
||||
|
||||
while compare(dt1, dtm):
|
||||
months += increment
|
||||
self._set_months(months)
|
||||
dtm = self.__radd__(dt2)
|
||||
|
||||
# Get the timedelta between the "months-adjusted" date and dt1
|
||||
delta = dt1 - dtm
|
||||
self.seconds = delta.seconds + delta.days * 86400
|
||||
self.microseconds = delta.microseconds
|
||||
else:
|
||||
# Check for non-integer values in integer-only quantities
|
||||
if any(x is not None and x != int(x) for x in (years, months)):
|
||||
raise ValueError("Non-integer years and months are "
|
||||
"ambiguous and not currently supported.")
|
||||
|
||||
# Relative information
|
||||
self.years = int(years)
|
||||
self.months = int(months)
|
||||
self.days = days + weeks * 7
|
||||
self.leapdays = leapdays
|
||||
self.hours = hours
|
||||
self.minutes = minutes
|
||||
self.seconds = seconds
|
||||
self.microseconds = microseconds
|
||||
|
||||
# Absolute information
|
||||
self.year = year
|
||||
self.month = month
|
||||
self.day = day
|
||||
self.hour = hour
|
||||
self.minute = minute
|
||||
self.second = second
|
||||
self.microsecond = microsecond
|
||||
|
||||
if any(x is not None and int(x) != x
|
||||
for x in (year, month, day, hour,
|
||||
minute, second, microsecond)):
|
||||
# For now we'll deprecate floats - later it'll be an error.
|
||||
warn("Non-integer value passed as absolute information. " +
|
||||
"This is not a well-defined condition and will raise " +
|
||||
"errors in future versions.", DeprecationWarning)
|
||||
|
||||
if isinstance(weekday, integer_types):
|
||||
self.weekday = weekdays[weekday]
|
||||
else:
|
||||
self.weekday = weekday
|
||||
|
||||
yday = 0
|
||||
if nlyearday:
|
||||
yday = nlyearday
|
||||
elif yearday:
|
||||
yday = yearday
|
||||
if yearday > 59:
|
||||
self.leapdays = -1
|
||||
if yday:
|
||||
ydayidx = [31, 59, 90, 120, 151, 181, 212,
|
||||
243, 273, 304, 334, 366]
|
||||
for idx, ydays in enumerate(ydayidx):
|
||||
if yday <= ydays:
|
||||
self.month = idx+1
|
||||
if idx == 0:
|
||||
self.day = yday
|
||||
else:
|
||||
self.day = yday-ydayidx[idx-1]
|
||||
break
|
||||
else:
|
||||
raise ValueError("invalid year day (%d)" % yday)
|
||||
|
||||
self._fix()
|
||||
|
||||
def _fix(self):
|
||||
if abs(self.microseconds) > 999999:
|
||||
s = _sign(self.microseconds)
|
||||
div, mod = divmod(self.microseconds * s, 1000000)
|
||||
self.microseconds = mod * s
|
||||
self.seconds += div * s
|
||||
if abs(self.seconds) > 59:
|
||||
s = _sign(self.seconds)
|
||||
div, mod = divmod(self.seconds * s, 60)
|
||||
self.seconds = mod * s
|
||||
self.minutes += div * s
|
||||
if abs(self.minutes) > 59:
|
||||
s = _sign(self.minutes)
|
||||
div, mod = divmod(self.minutes * s, 60)
|
||||
self.minutes = mod * s
|
||||
self.hours += div * s
|
||||
if abs(self.hours) > 23:
|
||||
s = _sign(self.hours)
|
||||
div, mod = divmod(self.hours * s, 24)
|
||||
self.hours = mod * s
|
||||
self.days += div * s
|
||||
if abs(self.months) > 11:
|
||||
s = _sign(self.months)
|
||||
div, mod = divmod(self.months * s, 12)
|
||||
self.months = mod * s
|
||||
self.years += div * s
|
||||
if (self.hours or self.minutes or self.seconds or self.microseconds
|
||||
or self.hour is not None or self.minute is not None or
|
||||
self.second is not None or self.microsecond is not None):
|
||||
self._has_time = 1
|
||||
else:
|
||||
self._has_time = 0
|
||||
|
||||
@property
|
||||
def weeks(self):
|
||||
return int(self.days / 7.0)
|
||||
|
||||
@weeks.setter
|
||||
def weeks(self, value):
|
||||
self.days = self.days - (self.weeks * 7) + value * 7
|
||||
|
||||
def _set_months(self, months):
|
||||
self.months = months
|
||||
if abs(self.months) > 11:
|
||||
s = _sign(self.months)
|
||||
div, mod = divmod(self.months * s, 12)
|
||||
self.months = mod * s
|
||||
self.years = div * s
|
||||
else:
|
||||
self.years = 0
|
||||
|
||||
def normalized(self):
|
||||
"""
|
||||
Return a version of this object represented entirely using integer
|
||||
values for the relative attributes.
|
||||
|
||||
>>> relativedelta(days=1.5, hours=2).normalized()
|
||||
relativedelta(days=1, hours=14)
|
||||
|
||||
:return:
|
||||
Returns a :class:`dateutil.relativedelta.relativedelta` object.
|
||||
"""
|
||||
# Cascade remainders down (rounding each to roughly nearest microsecond)
|
||||
days = int(self.days)
|
||||
|
||||
hours_f = round(self.hours + 24 * (self.days - days), 11)
|
||||
hours = int(hours_f)
|
||||
|
||||
minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
|
||||
minutes = int(minutes_f)
|
||||
|
||||
seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
|
||||
seconds = int(seconds_f)
|
||||
|
||||
microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
|
||||
|
||||
# Constructor carries overflow back up with call to _fix()
|
||||
return self.__class__(years=self.years, months=self.months,
|
||||
days=days, hours=hours, minutes=minutes,
|
||||
seconds=seconds, microseconds=microseconds,
|
||||
leapdays=self.leapdays, year=self.year,
|
||||
month=self.month, day=self.day,
|
||||
weekday=self.weekday, hour=self.hour,
|
||||
minute=self.minute, second=self.second,
|
||||
microsecond=self.microsecond)
|
||||
|
||||
def __add__(self, other):
|
||||
if isinstance(other, relativedelta):
|
||||
return self.__class__(years=other.years + self.years,
|
||||
months=other.months + self.months,
|
||||
days=other.days + self.days,
|
||||
hours=other.hours + self.hours,
|
||||
minutes=other.minutes + self.minutes,
|
||||
seconds=other.seconds + self.seconds,
|
||||
microseconds=(other.microseconds +
|
||||
self.microseconds),
|
||||
leapdays=other.leapdays or self.leapdays,
|
||||
year=(other.year if other.year is not None
|
||||
else self.year),
|
||||
month=(other.month if other.month is not None
|
||||
else self.month),
|
||||
day=(other.day if other.day is not None
|
||||
else self.day),
|
||||
weekday=(other.weekday if other.weekday is not None
|
||||
else self.weekday),
|
||||
hour=(other.hour if other.hour is not None
|
||||
else self.hour),
|
||||
minute=(other.minute if other.minute is not None
|
||||
else self.minute),
|
||||
second=(other.second if other.second is not None
|
||||
else self.second),
|
||||
microsecond=(other.microsecond if other.microsecond
|
||||
is not None else
|
||||
self.microsecond))
|
||||
if isinstance(other, datetime.timedelta):
|
||||
return self.__class__(years=self.years,
|
||||
months=self.months,
|
||||
days=self.days + other.days,
|
||||
hours=self.hours,
|
||||
minutes=self.minutes,
|
||||
seconds=self.seconds + other.seconds,
|
||||
microseconds=self.microseconds + other.microseconds,
|
||||
leapdays=self.leapdays,
|
||||
year=self.year,
|
||||
month=self.month,
|
||||
day=self.day,
|
||||
weekday=self.weekday,
|
||||
hour=self.hour,
|
||||
minute=self.minute,
|
||||
second=self.second,
|
||||
microsecond=self.microsecond)
|
||||
if not isinstance(other, datetime.date):
|
||||
return NotImplemented
|
||||
elif self._has_time and not isinstance(other, datetime.datetime):
|
||||
other = datetime.datetime.fromordinal(other.toordinal())
|
||||
year = (self.year or other.year)+self.years
|
||||
month = self.month or other.month
|
||||
if self.months:
|
||||
assert 1 <= abs(self.months) <= 12
|
||||
month += self.months
|
||||
if month > 12:
|
||||
year += 1
|
||||
month -= 12
|
||||
elif month < 1:
|
||||
year -= 1
|
||||
month += 12
|
||||
day = min(calendar.monthrange(year, month)[1],
|
||||
self.day or other.day)
|
||||
repl = {"year": year, "month": month, "day": day}
|
||||
for attr in ["hour", "minute", "second", "microsecond"]:
|
||||
value = getattr(self, attr)
|
||||
if value is not None:
|
||||
repl[attr] = value
|
||||
days = self.days
|
||||
if self.leapdays and month > 2 and calendar.isleap(year):
|
||||
days += self.leapdays
|
||||
ret = (other.replace(**repl)
|
||||
+ datetime.timedelta(days=days,
|
||||
hours=self.hours,
|
||||
minutes=self.minutes,
|
||||
seconds=self.seconds,
|
||||
microseconds=self.microseconds))
|
||||
if self.weekday:
|
||||
weekday, nth = self.weekday.weekday, self.weekday.n or 1
|
||||
jumpdays = (abs(nth) - 1) * 7
|
||||
if nth > 0:
|
||||
jumpdays += (7 - ret.weekday() + weekday) % 7
|
||||
else:
|
||||
jumpdays += (ret.weekday() - weekday) % 7
|
||||
jumpdays *= -1
|
||||
ret += datetime.timedelta(days=jumpdays)
|
||||
return ret
|
||||
|
||||
def __radd__(self, other):
|
||||
return self.__add__(other)
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self.__neg__().__radd__(other)
|
||||
|
||||
def __sub__(self, other):
|
||||
if not isinstance(other, relativedelta):
|
||||
return NotImplemented # In case the other object defines __rsub__
|
||||
return self.__class__(years=self.years - other.years,
|
||||
months=self.months - other.months,
|
||||
days=self.days - other.days,
|
||||
hours=self.hours - other.hours,
|
||||
minutes=self.minutes - other.minutes,
|
||||
seconds=self.seconds - other.seconds,
|
||||
microseconds=self.microseconds - other.microseconds,
|
||||
leapdays=self.leapdays or other.leapdays,
|
||||
year=(self.year if self.year is not None
|
||||
else other.year),
|
||||
month=(self.month if self.month is not None else
|
||||
other.month),
|
||||
day=(self.day if self.day is not None else
|
||||
other.day),
|
||||
weekday=(self.weekday if self.weekday is not None else
|
||||
other.weekday),
|
||||
hour=(self.hour if self.hour is not None else
|
||||
other.hour),
|
||||
minute=(self.minute if self.minute is not None else
|
||||
other.minute),
|
||||
second=(self.second if self.second is not None else
|
||||
other.second),
|
||||
microsecond=(self.microsecond if self.microsecond
|
||||
is not None else
|
||||
other.microsecond))
|
||||
|
||||
def __abs__(self):
|
||||
return self.__class__(years=abs(self.years),
|
||||
months=abs(self.months),
|
||||
days=abs(self.days),
|
||||
hours=abs(self.hours),
|
||||
minutes=abs(self.minutes),
|
||||
seconds=abs(self.seconds),
|
||||
microseconds=abs(self.microseconds),
|
||||
leapdays=self.leapdays,
|
||||
year=self.year,
|
||||
month=self.month,
|
||||
day=self.day,
|
||||
weekday=self.weekday,
|
||||
hour=self.hour,
|
||||
minute=self.minute,
|
||||
second=self.second,
|
||||
microsecond=self.microsecond)
|
||||
|
||||
def __neg__(self):
|
||||
return self.__class__(years=-self.years,
|
||||
months=-self.months,
|
||||
days=-self.days,
|
||||
hours=-self.hours,
|
||||
minutes=-self.minutes,
|
||||
seconds=-self.seconds,
|
||||
microseconds=-self.microseconds,
|
||||
leapdays=self.leapdays,
|
||||
year=self.year,
|
||||
month=self.month,
|
||||
day=self.day,
|
||||
weekday=self.weekday,
|
||||
hour=self.hour,
|
||||
minute=self.minute,
|
||||
second=self.second,
|
||||
microsecond=self.microsecond)
|
||||
|
||||
def __bool__(self):
|
||||
return not (not self.years and
|
||||
not self.months and
|
||||
not self.days and
|
||||
not self.hours and
|
||||
not self.minutes and
|
||||
not self.seconds and
|
||||
not self.microseconds and
|
||||
not self.leapdays and
|
||||
self.year is None and
|
||||
self.month is None and
|
||||
self.day is None and
|
||||
self.weekday is None and
|
||||
self.hour is None and
|
||||
self.minute is None and
|
||||
self.second is None and
|
||||
self.microsecond is None)
|
||||
# Compatibility with Python 2.x
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def __mul__(self, other):
|
||||
try:
|
||||
f = float(other)
|
||||
except TypeError:
|
||||
return NotImplemented
|
||||
|
||||
return self.__class__(years=int(self.years * f),
|
||||
months=int(self.months * f),
|
||||
days=int(self.days * f),
|
||||
hours=int(self.hours * f),
|
||||
minutes=int(self.minutes * f),
|
||||
seconds=int(self.seconds * f),
|
||||
microseconds=int(self.microseconds * f),
|
||||
leapdays=self.leapdays,
|
||||
year=self.year,
|
||||
month=self.month,
|
||||
day=self.day,
|
||||
weekday=self.weekday,
|
||||
hour=self.hour,
|
||||
minute=self.minute,
|
||||
second=self.second,
|
||||
microsecond=self.microsecond)
|
||||
|
||||
__rmul__ = __mul__
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, relativedelta):
|
||||
return NotImplemented
|
||||
if self.weekday or other.weekday:
|
||||
if not self.weekday or not other.weekday:
|
||||
return False
|
||||
if self.weekday.weekday != other.weekday.weekday:
|
||||
return False
|
||||
n1, n2 = self.weekday.n, other.weekday.n
|
||||
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
|
||||
return False
|
||||
return (self.years == other.years and
|
||||
self.months == other.months and
|
||||
self.days == other.days and
|
||||
self.hours == other.hours and
|
||||
self.minutes == other.minutes and
|
||||
self.seconds == other.seconds and
|
||||
self.microseconds == other.microseconds and
|
||||
self.leapdays == other.leapdays and
|
||||
self.year == other.year and
|
||||
self.month == other.month and
|
||||
self.day == other.day and
|
||||
self.hour == other.hour and
|
||||
self.minute == other.minute and
|
||||
self.second == other.second and
|
||||
self.microsecond == other.microsecond)
|
||||
|
||||
def __hash__(self):
|
||||
return hash((
|
||||
self.weekday,
|
||||
self.years,
|
||||
self.months,
|
||||
self.days,
|
||||
self.hours,
|
||||
self.minutes,
|
||||
self.seconds,
|
||||
self.microseconds,
|
||||
self.leapdays,
|
||||
self.year,
|
||||
self.month,
|
||||
self.day,
|
||||
self.hour,
|
||||
self.minute,
|
||||
self.second,
|
||||
self.microsecond,
|
||||
))
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __div__(self, other):
|
||||
try:
|
||||
reciprocal = 1 / float(other)
|
||||
except TypeError:
|
||||
return NotImplemented
|
||||
|
||||
return self.__mul__(reciprocal)
|
||||
|
||||
__truediv__ = __div__
|
||||
|
||||
def __repr__(self):
|
||||
l = []
|
||||
for attr in ["years", "months", "days", "leapdays",
|
||||
"hours", "minutes", "seconds", "microseconds"]:
|
||||
value = getattr(self, attr)
|
||||
if value:
|
||||
l.append("{attr}={value:+g}".format(attr=attr, value=value))
|
||||
for attr in ["year", "month", "day", "weekday",
|
||||
"hour", "minute", "second", "microsecond"]:
|
||||
value = getattr(self, attr)
|
||||
if value is not None:
|
||||
l.append("{attr}={value}".format(attr=attr, value=repr(value)))
|
||||
return "{classname}({attrs})".format(classname=self.__class__.__name__,
|
||||
attrs=", ".join(l))
|
||||
|
||||
|
||||
def _sign(x):
|
||||
return int(copysign(1, x))
|
||||
|
||||
# vim:ts=4:sw=4:et
|
File diff suppressed because it is too large
Load diff
|
@ -1,891 +0,0 @@
|
|||
# Copyright (c) 2010-2017 Benjamin Peterson
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
"""Utilities for writing code that runs on Python 2 and 3"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import functools
|
||||
import itertools
|
||||
import operator
|
||||
import sys
|
||||
import types
|
||||
|
||||
__author__ = "Benjamin Peterson <benjamin@python.org>"
|
||||
__version__ = "1.11.0"
|
||||
|
||||
|
||||
# Useful for very coarse version differentiation.
|
||||
PY2 = sys.version_info[0] == 2
|
||||
PY3 = sys.version_info[0] == 3
|
||||
PY34 = sys.version_info[0:2] >= (3, 4)
|
||||
|
||||
if PY3:
|
||||
string_types = str,
|
||||
integer_types = int,
|
||||
class_types = type,
|
||||
text_type = str
|
||||
binary_type = bytes
|
||||
|
||||
MAXSIZE = sys.maxsize
|
||||
else:
|
||||
string_types = basestring,
|
||||
integer_types = (int, long)
|
||||
class_types = (type, types.ClassType)
|
||||
text_type = unicode
|
||||
binary_type = str
|
||||
|
||||
if sys.platform.startswith("java"):
|
||||
# Jython always uses 32 bits.
|
||||
MAXSIZE = int((1 << 31) - 1)
|
||||
else:
|
||||
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
|
||||
class X(object):
|
||||
|
||||
def __len__(self):
|
||||
return 1 << 31
|
||||
try:
|
||||
len(X())
|
||||
except OverflowError:
|
||||
# 32-bit
|
||||
MAXSIZE = int((1 << 31) - 1)
|
||||
else:
|
||||
# 64-bit
|
||||
MAXSIZE = int((1 << 63) - 1)
|
||||
del X
|
||||
|
||||
|
||||
def _add_doc(func, doc):
|
||||
"""Add documentation to a function."""
|
||||
func.__doc__ = doc
|
||||
|
||||
|
||||
def _import_module(name):
|
||||
"""Import module, returning the module after the last dot."""
|
||||
__import__(name)
|
||||
return sys.modules[name]
|
||||
|
||||
|
||||
class _LazyDescr(object):
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def __get__(self, obj, tp):
|
||||
result = self._resolve()
|
||||
setattr(obj, self.name, result) # Invokes __set__.
|
||||
try:
|
||||
# This is a bit ugly, but it avoids running this again by
|
||||
# removing this descriptor.
|
||||
delattr(obj.__class__, self.name)
|
||||
except AttributeError:
|
||||
pass
|
||||
return result
|
||||
|
||||
|
||||
class MovedModule(_LazyDescr):
|
||||
|
||||
def __init__(self, name, old, new=None):
|
||||
super(MovedModule, self).__init__(name)
|
||||
if PY3:
|
||||
if new is None:
|
||||
new = name
|
||||
self.mod = new
|
||||
else:
|
||||
self.mod = old
|
||||
|
||||
def _resolve(self):
|
||||
return _import_module(self.mod)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
_module = self._resolve()
|
||||
value = getattr(_module, attr)
|
||||
setattr(self, attr, value)
|
||||
return value
|
||||
|
||||
|
||||
class _LazyModule(types.ModuleType):
|
||||
|
||||
def __init__(self, name):
|
||||
super(_LazyModule, self).__init__(name)
|
||||
self.__doc__ = self.__class__.__doc__
|
||||
|
||||
def __dir__(self):
|
||||
attrs = ["__doc__", "__name__"]
|
||||
attrs += [attr.name for attr in self._moved_attributes]
|
||||
return attrs
|
||||
|
||||
# Subclasses should override this
|
||||
_moved_attributes = []
|
||||
|
||||
|
||||
class MovedAttribute(_LazyDescr):
|
||||
|
||||
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
|
||||
super(MovedAttribute, self).__init__(name)
|
||||
if PY3:
|
||||
if new_mod is None:
|
||||
new_mod = name
|
||||
self.mod = new_mod
|
||||
if new_attr is None:
|
||||
if old_attr is None:
|
||||
new_attr = name
|
||||
else:
|
||||
new_attr = old_attr
|
||||
self.attr = new_attr
|
||||
else:
|
||||
self.mod = old_mod
|
||||
if old_attr is None:
|
||||
old_attr = name
|
||||
self.attr = old_attr
|
||||
|
||||
def _resolve(self):
|
||||
module = _import_module(self.mod)
|
||||
return getattr(module, self.attr)
|
||||
|
||||
|
||||
class _SixMetaPathImporter(object):
|
||||
|
||||
"""
|
||||
A meta path importer to import six.moves and its submodules.
|
||||
|
||||
This class implements a PEP302 finder and loader. It should be compatible
|
||||
with Python 2.5 and all existing versions of Python3
|
||||
"""
|
||||
|
||||
def __init__(self, six_module_name):
|
||||
self.name = six_module_name
|
||||
self.known_modules = {}
|
||||
|
||||
def _add_module(self, mod, *fullnames):
|
||||
for fullname in fullnames:
|
||||
self.known_modules[self.name + "." + fullname] = mod
|
||||
|
||||
def _get_module(self, fullname):
|
||||
return self.known_modules[self.name + "." + fullname]
|
||||
|
||||
def find_module(self, fullname, path=None):
|
||||
if fullname in self.known_modules:
|
||||
return self
|
||||
return None
|
||||
|
||||
def __get_module(self, fullname):
|
||||
try:
|
||||
return self.known_modules[fullname]
|
||||
except KeyError:
|
||||
raise ImportError("This loader does not know module " + fullname)
|
||||
|
||||
def load_module(self, fullname):
|
||||
try:
|
||||
# in case of a reload
|
||||
return sys.modules[fullname]
|
||||
except KeyError:
|
||||
pass
|
||||
mod = self.__get_module(fullname)
|
||||
if isinstance(mod, MovedModule):
|
||||
mod = mod._resolve()
|
||||
else:
|
||||
mod.__loader__ = self
|
||||
sys.modules[fullname] = mod
|
||||
return mod
|
||||
|
||||
def is_package(self, fullname):
|
||||
"""
|
||||
Return true, if the named module is a package.
|
||||
|
||||
We need this method to get correct spec objects with
|
||||
Python 3.4 (see PEP451)
|
||||
"""
|
||||
return hasattr(self.__get_module(fullname), "__path__")
|
||||
|
||||
def get_code(self, fullname):
|
||||
"""Return None
|
||||
|
||||
Required, if is_package is implemented"""
|
||||
self.__get_module(fullname) # eventually raises ImportError
|
||||
return None
|
||||
get_source = get_code # same as get_code
|
||||
|
||||
_importer = _SixMetaPathImporter(__name__)
|
||||
|
||||
|
||||
class _MovedItems(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects"""
|
||||
__path__ = [] # mark as package
|
||||
|
||||
|
||||
_moved_attributes = [
|
||||
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
|
||||
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
|
||||
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
|
||||
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
|
||||
MovedAttribute("intern", "__builtin__", "sys"),
|
||||
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
|
||||
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
|
||||
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
|
||||
MovedAttribute("getoutput", "commands", "subprocess"),
|
||||
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
|
||||
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
|
||||
MovedAttribute("reduce", "__builtin__", "functools"),
|
||||
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
|
||||
MovedAttribute("StringIO", "StringIO", "io"),
|
||||
MovedAttribute("UserDict", "UserDict", "collections"),
|
||||
MovedAttribute("UserList", "UserList", "collections"),
|
||||
MovedAttribute("UserString", "UserString", "collections"),
|
||||
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
|
||||
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
|
||||
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
|
||||
MovedModule("builtins", "__builtin__"),
|
||||
MovedModule("configparser", "ConfigParser"),
|
||||
MovedModule("copyreg", "copy_reg"),
|
||||
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
|
||||
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
|
||||
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
|
||||
MovedModule("http_cookies", "Cookie", "http.cookies"),
|
||||
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
|
||||
MovedModule("html_parser", "HTMLParser", "html.parser"),
|
||||
MovedModule("http_client", "httplib", "http.client"),
|
||||
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
|
||||
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
|
||||
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
|
||||
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
|
||||
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
|
||||
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
|
||||
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
|
||||
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
|
||||
MovedModule("cPickle", "cPickle", "pickle"),
|
||||
MovedModule("queue", "Queue"),
|
||||
MovedModule("reprlib", "repr"),
|
||||
MovedModule("socketserver", "SocketServer"),
|
||||
MovedModule("_thread", "thread", "_thread"),
|
||||
MovedModule("tkinter", "Tkinter"),
|
||||
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
|
||||
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
|
||||
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
|
||||
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
|
||||
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
|
||||
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
|
||||
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
|
||||
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
|
||||
MovedModule("tkinter_colorchooser", "tkColorChooser",
|
||||
"tkinter.colorchooser"),
|
||||
MovedModule("tkinter_commondialog", "tkCommonDialog",
|
||||
"tkinter.commondialog"),
|
||||
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
|
||||
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
|
||||
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
|
||||
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
|
||||
"tkinter.simpledialog"),
|
||||
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
|
||||
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
|
||||
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
|
||||
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
|
||||
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
|
||||
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
|
||||
]
|
||||
# Add windows specific modules.
|
||||
if sys.platform == "win32":
|
||||
_moved_attributes += [
|
||||
MovedModule("winreg", "_winreg"),
|
||||
]
|
||||
|
||||
for attr in _moved_attributes:
|
||||
setattr(_MovedItems, attr.name, attr)
|
||||
if isinstance(attr, MovedModule):
|
||||
_importer._add_module(attr, "moves." + attr.name)
|
||||
del attr
|
||||
|
||||
_MovedItems._moved_attributes = _moved_attributes
|
||||
|
||||
moves = _MovedItems(__name__ + ".moves")
|
||||
_importer._add_module(moves, "moves")
|
||||
|
||||
|
||||
class Module_six_moves_urllib_parse(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects in six.moves.urllib_parse"""
|
||||
|
||||
|
||||
_urllib_parse_moved_attributes = [
|
||||
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("quote", "urllib", "urllib.parse"),
|
||||
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
|
||||
MovedAttribute("unquote", "urllib", "urllib.parse"),
|
||||
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
|
||||
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
|
||||
MovedAttribute("urlencode", "urllib", "urllib.parse"),
|
||||
MovedAttribute("splitquery", "urllib", "urllib.parse"),
|
||||
MovedAttribute("splittag", "urllib", "urllib.parse"),
|
||||
MovedAttribute("splituser", "urllib", "urllib.parse"),
|
||||
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
|
||||
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
|
||||
]
|
||||
for attr in _urllib_parse_moved_attributes:
|
||||
setattr(Module_six_moves_urllib_parse, attr.name, attr)
|
||||
del attr
|
||||
|
||||
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
|
||||
"moves.urllib_parse", "moves.urllib.parse")
|
||||
|
||||
|
||||
class Module_six_moves_urllib_error(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects in six.moves.urllib_error"""
|
||||
|
||||
|
||||
_urllib_error_moved_attributes = [
|
||||
MovedAttribute("URLError", "urllib2", "urllib.error"),
|
||||
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
|
||||
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
|
||||
]
|
||||
for attr in _urllib_error_moved_attributes:
|
||||
setattr(Module_six_moves_urllib_error, attr.name, attr)
|
||||
del attr
|
||||
|
||||
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
|
||||
"moves.urllib_error", "moves.urllib.error")
|
||||
|
||||
|
||||
class Module_six_moves_urllib_request(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects in six.moves.urllib_request"""
|
||||
|
||||
|
||||
_urllib_request_moved_attributes = [
|
||||
MovedAttribute("urlopen", "urllib2", "urllib.request"),
|
||||
MovedAttribute("install_opener", "urllib2", "urllib.request"),
|
||||
MovedAttribute("build_opener", "urllib2", "urllib.request"),
|
||||
MovedAttribute("pathname2url", "urllib", "urllib.request"),
|
||||
MovedAttribute("url2pathname", "urllib", "urllib.request"),
|
||||
MovedAttribute("getproxies", "urllib", "urllib.request"),
|
||||
MovedAttribute("Request", "urllib2", "urllib.request"),
|
||||
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
|
||||
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
|
||||
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
|
||||
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
|
||||
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
|
||||
MovedAttribute("URLopener", "urllib", "urllib.request"),
|
||||
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
|
||||
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
|
||||
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
|
||||
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
|
||||
]
|
||||
for attr in _urllib_request_moved_attributes:
|
||||
setattr(Module_six_moves_urllib_request, attr.name, attr)
|
||||
del attr
|
||||
|
||||
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
|
||||
"moves.urllib_request", "moves.urllib.request")
|
||||
|
||||
|
||||
class Module_six_moves_urllib_response(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects in six.moves.urllib_response"""
|
||||
|
||||
|
||||
_urllib_response_moved_attributes = [
|
||||
MovedAttribute("addbase", "urllib", "urllib.response"),
|
||||
MovedAttribute("addclosehook", "urllib", "urllib.response"),
|
||||
MovedAttribute("addinfo", "urllib", "urllib.response"),
|
||||
MovedAttribute("addinfourl", "urllib", "urllib.response"),
|
||||
]
|
||||
for attr in _urllib_response_moved_attributes:
|
||||
setattr(Module_six_moves_urllib_response, attr.name, attr)
|
||||
del attr
|
||||
|
||||
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
|
||||
"moves.urllib_response", "moves.urllib.response")
|
||||
|
||||
|
||||
class Module_six_moves_urllib_robotparser(_LazyModule):
|
||||
|
||||
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
|
||||
|
||||
|
||||
_urllib_robotparser_moved_attributes = [
|
||||
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
|
||||
]
|
||||
for attr in _urllib_robotparser_moved_attributes:
|
||||
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
|
||||
del attr
|
||||
|
||||
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
|
||||
"moves.urllib_robotparser", "moves.urllib.robotparser")
|
||||
|
||||
|
||||
class Module_six_moves_urllib(types.ModuleType):
|
||||
|
||||
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
|
||||
__path__ = [] # mark as package
|
||||
parse = _importer._get_module("moves.urllib_parse")
|
||||
error = _importer._get_module("moves.urllib_error")
|
||||
request = _importer._get_module("moves.urllib_request")
|
||||
response = _importer._get_module("moves.urllib_response")
|
||||
robotparser = _importer._get_module("moves.urllib_robotparser")
|
||||
|
||||
def __dir__(self):
|
||||
return ['parse', 'error', 'request', 'response', 'robotparser']
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
|
||||
"moves.urllib")
|
||||
|
||||
|
||||
def add_move(move):
|
||||
"""Add an item to six.moves."""
|
||||
setattr(_MovedItems, move.name, move)
|
||||
|
||||
|
||||
def remove_move(name):
|
||||
"""Remove item from six.moves."""
|
||||
try:
|
||||
delattr(_MovedItems, name)
|
||||
except AttributeError:
|
||||
try:
|
||||
del moves.__dict__[name]
|
||||
except KeyError:
|
||||
raise AttributeError("no such move, %r" % (name,))
|
||||
|
||||
|
||||
if PY3:
|
||||
_meth_func = "__func__"
|
||||
_meth_self = "__self__"
|
||||
|
||||
_func_closure = "__closure__"
|
||||
_func_code = "__code__"
|
||||
_func_defaults = "__defaults__"
|
||||
_func_globals = "__globals__"
|
||||
else:
|
||||
_meth_func = "im_func"
|
||||
_meth_self = "im_self"
|
||||
|
||||
_func_closure = "func_closure"
|
||||
_func_code = "func_code"
|
||||
_func_defaults = "func_defaults"
|
||||
_func_globals = "func_globals"
|
||||
|
||||
|
||||
try:
|
||||
advance_iterator = next
|
||||
except NameError:
|
||||
def advance_iterator(it):
|
||||
return it.next()
|
||||
next = advance_iterator
|
||||
|
||||
|
||||
try:
|
||||
callable = callable
|
||||
except NameError:
|
||||
def callable(obj):
|
||||
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
|
||||
|
||||
|
||||
if PY3:
|
||||
def get_unbound_function(unbound):
|
||||
return unbound
|
||||
|
||||
create_bound_method = types.MethodType
|
||||
|
||||
def create_unbound_method(func, cls):
|
||||
return func
|
||||
|
||||
Iterator = object
|
||||
else:
|
||||
def get_unbound_function(unbound):
|
||||
return unbound.im_func
|
||||
|
||||
def create_bound_method(func, obj):
|
||||
return types.MethodType(func, obj, obj.__class__)
|
||||
|
||||
def create_unbound_method(func, cls):
|
||||
return types.MethodType(func, None, cls)
|
||||
|
||||
class Iterator(object):
|
||||
|
||||
def next(self):
|
||||
return type(self).__next__(self)
|
||||
|
||||
callable = callable
|
||||
_add_doc(get_unbound_function,
|
||||
"""Get the function out of a possibly unbound function""")
|
||||
|
||||
|
||||
get_method_function = operator.attrgetter(_meth_func)
|
||||
get_method_self = operator.attrgetter(_meth_self)
|
||||
get_function_closure = operator.attrgetter(_func_closure)
|
||||
get_function_code = operator.attrgetter(_func_code)
|
||||
get_function_defaults = operator.attrgetter(_func_defaults)
|
||||
get_function_globals = operator.attrgetter(_func_globals)
|
||||
|
||||
|
||||
if PY3:
|
||||
def iterkeys(d, **kw):
|
||||
return iter(d.keys(**kw))
|
||||
|
||||
def itervalues(d, **kw):
|
||||
return iter(d.values(**kw))
|
||||
|
||||
def iteritems(d, **kw):
|
||||
return iter(d.items(**kw))
|
||||
|
||||
def iterlists(d, **kw):
|
||||
return iter(d.lists(**kw))
|
||||
|
||||
viewkeys = operator.methodcaller("keys")
|
||||
|
||||
viewvalues = operator.methodcaller("values")
|
||||
|
||||
viewitems = operator.methodcaller("items")
|
||||
else:
|
||||
def iterkeys(d, **kw):
|
||||
return d.iterkeys(**kw)
|
||||
|
||||
def itervalues(d, **kw):
|
||||
return d.itervalues(**kw)
|
||||
|
||||
def iteritems(d, **kw):
|
||||
return d.iteritems(**kw)
|
||||
|
||||
def iterlists(d, **kw):
|
||||
return d.iterlists(**kw)
|
||||
|
||||
viewkeys = operator.methodcaller("viewkeys")
|
||||
|
||||
viewvalues = operator.methodcaller("viewvalues")
|
||||
|
||||
viewitems = operator.methodcaller("viewitems")
|
||||
|
||||
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
|
||||
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
|
||||
_add_doc(iteritems,
|
||||
"Return an iterator over the (key, value) pairs of a dictionary.")
|
||||
_add_doc(iterlists,
|
||||
"Return an iterator over the (key, [values]) pairs of a dictionary.")
|
||||
|
||||
|
||||
if PY3:
|
||||
def b(s):
|
||||
return s.encode("latin-1")
|
||||
|
||||
def u(s):
|
||||
return s
|
||||
unichr = chr
|
||||
import struct
|
||||
int2byte = struct.Struct(">B").pack
|
||||
del struct
|
||||
byte2int = operator.itemgetter(0)
|
||||
indexbytes = operator.getitem
|
||||
iterbytes = iter
|
||||
import io
|
||||
StringIO = io.StringIO
|
||||
BytesIO = io.BytesIO
|
||||
_assertCountEqual = "assertCountEqual"
|
||||
if sys.version_info[1] <= 1:
|
||||
_assertRaisesRegex = "assertRaisesRegexp"
|
||||
_assertRegex = "assertRegexpMatches"
|
||||
else:
|
||||
_assertRaisesRegex = "assertRaisesRegex"
|
||||
_assertRegex = "assertRegex"
|
||||
else:
|
||||
def b(s):
|
||||
return s
|
||||
# Workaround for standalone backslash
|
||||
|
||||
def u(s):
|
||||
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
|
||||
unichr = unichr
|
||||
int2byte = chr
|
||||
|
||||
def byte2int(bs):
|
||||
return ord(bs[0])
|
||||
|
||||
def indexbytes(buf, i):
|
||||
return ord(buf[i])
|
||||
iterbytes = functools.partial(itertools.imap, ord)
|
||||
import StringIO
|
||||
StringIO = BytesIO = StringIO.StringIO
|
||||
_assertCountEqual = "assertItemsEqual"
|
||||
_assertRaisesRegex = "assertRaisesRegexp"
|
||||
_assertRegex = "assertRegexpMatches"
|
||||
_add_doc(b, """Byte literal""")
|
||||
_add_doc(u, """Text literal""")
|
||||
|
||||
|
||||
def assertCountEqual(self, *args, **kwargs):
|
||||
return getattr(self, _assertCountEqual)(*args, **kwargs)
|
||||
|
||||
|
||||
def assertRaisesRegex(self, *args, **kwargs):
|
||||
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
|
||||
|
||||
|
||||
def assertRegex(self, *args, **kwargs):
|
||||
return getattr(self, _assertRegex)(*args, **kwargs)
|
||||
|
||||
|
||||
if PY3:
|
||||
exec_ = getattr(moves.builtins, "exec")
|
||||
|
||||
def reraise(tp, value, tb=None):
|
||||
try:
|
||||
if value is None:
|
||||
value = tp()
|
||||
if value.__traceback__ is not tb:
|
||||
raise value.with_traceback(tb)
|
||||
raise value
|
||||
finally:
|
||||
value = None
|
||||
tb = None
|
||||
|
||||
else:
|
||||
def exec_(_code_, _globs_=None, _locs_=None):
|
||||
"""Execute code in a namespace."""
|
||||
if _globs_ is None:
|
||||
frame = sys._getframe(1)
|
||||
_globs_ = frame.f_globals
|
||||
if _locs_ is None:
|
||||
_locs_ = frame.f_locals
|
||||
del frame
|
||||
elif _locs_ is None:
|
||||
_locs_ = _globs_
|
||||
exec("""exec _code_ in _globs_, _locs_""")
|
||||
|
||||
exec_("""def reraise(tp, value, tb=None):
|
||||
try:
|
||||
raise tp, value, tb
|
||||
finally:
|
||||
tb = None
|
||||
""")
|
||||
|
||||
|
||||
if sys.version_info[:2] == (3, 2):
|
||||
exec_("""def raise_from(value, from_value):
|
||||
try:
|
||||
if from_value is None:
|
||||
raise value
|
||||
raise value from from_value
|
||||
finally:
|
||||
value = None
|
||||
""")
|
||||
elif sys.version_info[:2] > (3, 2):
|
||||
exec_("""def raise_from(value, from_value):
|
||||
try:
|
||||
raise value from from_value
|
||||
finally:
|
||||
value = None
|
||||
""")
|
||||
else:
|
||||
def raise_from(value, from_value):
|
||||
raise value
|
||||
|
||||
|
||||
print_ = getattr(moves.builtins, "print", None)
|
||||
if print_ is None:
|
||||
def print_(*args, **kwargs):
|
||||
"""The new-style print function for Python 2.4 and 2.5."""
|
||||
fp = kwargs.pop("file", sys.stdout)
|
||||
if fp is None:
|
||||
return
|
||||
|
||||
def write(data):
|
||||
if not isinstance(data, basestring):
|
||||
data = str(data)
|
||||
# If the file has an encoding, encode unicode with it.
|
||||
if (isinstance(fp, file) and
|
||||
isinstance(data, unicode) and
|
||||
fp.encoding is not None):
|
||||
errors = getattr(fp, "errors", None)
|
||||
if errors is None:
|
||||
errors = "strict"
|
||||
data = data.encode(fp.encoding, errors)
|
||||
fp.write(data)
|
||||
want_unicode = False
|
||||
sep = kwargs.pop("sep", None)
|
||||
if sep is not None:
|
||||
if isinstance(sep, unicode):
|
||||
want_unicode = True
|
||||
elif not isinstance(sep, str):
|
||||
raise TypeError("sep must be None or a string")
|
||||
end = kwargs.pop("end", None)
|
||||
if end is not None:
|
||||
if isinstance(end, unicode):
|
||||
want_unicode = True
|
||||
elif not isinstance(end, str):
|
||||
raise TypeError("end must be None or a string")
|
||||
if kwargs:
|
||||
raise TypeError("invalid keyword arguments to print()")
|
||||
if not want_unicode:
|
||||
for arg in args:
|
||||
if isinstance(arg, unicode):
|
||||
want_unicode = True
|
||||
break
|
||||
if want_unicode:
|
||||
newline = unicode("\n")
|
||||
space = unicode(" ")
|
||||
else:
|
||||
newline = "\n"
|
||||
space = " "
|
||||
if sep is None:
|
||||
sep = space
|
||||
if end is None:
|
||||
end = newline
|
||||
for i, arg in enumerate(args):
|
||||
if i:
|
||||
write(sep)
|
||||
write(arg)
|
||||
write(end)
|
||||
if sys.version_info[:2] < (3, 3):
|
||||
_print = print_
|
||||
|
||||
def print_(*args, **kwargs):
|
||||
fp = kwargs.get("file", sys.stdout)
|
||||
flush = kwargs.pop("flush", False)
|
||||
_print(*args, **kwargs)
|
||||
if flush and fp is not None:
|
||||
fp.flush()
|
||||
|
||||
_add_doc(reraise, """Reraise an exception.""")
|
||||
|
||||
if sys.version_info[0:2] < (3, 4):
|
||||
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
|
||||
updated=functools.WRAPPER_UPDATES):
|
||||
def wrapper(f):
|
||||
f = functools.wraps(wrapped, assigned, updated)(f)
|
||||
f.__wrapped__ = wrapped
|
||||
return f
|
||||
return wrapper
|
||||
else:
|
||||
wraps = functools.wraps
|
||||
|
||||
|
||||
def with_metaclass(meta, *bases):
|
||||
"""Create a base class with a metaclass."""
|
||||
# This requires a bit of explanation: the basic idea is to make a dummy
|
||||
# metaclass for one level of class instantiation that replaces itself with
|
||||
# the actual metaclass.
|
||||
class metaclass(type):
|
||||
|
||||
def __new__(cls, name, this_bases, d):
|
||||
return meta(name, bases, d)
|
||||
|
||||
@classmethod
|
||||
def __prepare__(cls, name, this_bases):
|
||||
return meta.__prepare__(name, bases)
|
||||
return type.__new__(metaclass, 'temporary_class', (), {})
|
||||
|
||||
|
||||
def add_metaclass(metaclass):
|
||||
"""Class decorator for creating a class with a metaclass."""
|
||||
def wrapper(cls):
|
||||
orig_vars = cls.__dict__.copy()
|
||||
slots = orig_vars.get('__slots__')
|
||||
if slots is not None:
|
||||
if isinstance(slots, str):
|
||||
slots = [slots]
|
||||
for slots_var in slots:
|
||||
orig_vars.pop(slots_var)
|
||||
orig_vars.pop('__dict__', None)
|
||||
orig_vars.pop('__weakref__', None)
|
||||
return metaclass(cls.__name__, cls.__bases__, orig_vars)
|
||||
return wrapper
|
||||
|
||||
|
||||
def python_2_unicode_compatible(klass):
|
||||
"""
|
||||
A decorator that defines __unicode__ and __str__ methods under Python 2.
|
||||
Under Python 3 it does nothing.
|
||||
|
||||
To support Python 2 and 3 with a single code base, define a __str__ method
|
||||
returning text and apply this decorator to the class.
|
||||
"""
|
||||
if PY2:
|
||||
if '__str__' not in klass.__dict__:
|
||||
raise ValueError("@python_2_unicode_compatible cannot be applied "
|
||||
"to %s because it doesn't define __str__()." %
|
||||
klass.__name__)
|
||||
klass.__unicode__ = klass.__str__
|
||||
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
|
||||
return klass
|
||||
|
||||
|
||||
# Complete the moves implementation.
|
||||
# This code is at the end of this module to speed up module loading.
|
||||
# Turn this module into a package.
|
||||
__path__ = [] # required for PEP 302 and PEP 451
|
||||
__package__ = __name__ # see PEP 366 @ReservedAssignment
|
||||
if globals().get("__spec__") is not None:
|
||||
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
|
||||
# Remove other six meta path importers, since they cause problems. This can
|
||||
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
|
||||
# this for some reason.)
|
||||
if sys.meta_path:
|
||||
for i, importer in enumerate(sys.meta_path):
|
||||
# Here's some real nastiness: Another "instance" of the six module might
|
||||
# be floating around. Therefore, we can't use isinstance() to check for
|
||||
# the six meta path importer, since the other six instance will have
|
||||
# inserted an importer with different class.
|
||||
if (type(importer).__name__ == "_SixMetaPathImporter" and
|
||||
importer.name == __name__):
|
||||
del sys.meta_path[i]
|
||||
break
|
||||
del i, importer
|
||||
# Finally, add the importer to the meta path import hook.
|
||||
sys.meta_path.append(_importer)
|
|
@ -1,275 +0,0 @@
|
|||
from __future__ import unicode_literals
|
||||
import os
|
||||
import time
|
||||
import subprocess
|
||||
import warnings
|
||||
import tempfile
|
||||
import pickle
|
||||
|
||||
|
||||
class WarningTestMixin(object):
|
||||
# Based on https://stackoverflow.com/a/12935176/467366
|
||||
class _AssertWarnsContext(warnings.catch_warnings):
|
||||
def __init__(self, expected_warnings, parent, **kwargs):
|
||||
super(WarningTestMixin._AssertWarnsContext, self).__init__(**kwargs)
|
||||
|
||||
self.parent = parent
|
||||
try:
|
||||
self.expected_warnings = list(expected_warnings)
|
||||
except TypeError:
|
||||
self.expected_warnings = [expected_warnings]
|
||||
|
||||
self._warning_log = []
|
||||
|
||||
def __enter__(self, *args, **kwargs):
|
||||
rv = super(WarningTestMixin._AssertWarnsContext, self).__enter__(*args, **kwargs)
|
||||
|
||||
if self._showwarning is not self._module.showwarning:
|
||||
super_showwarning = self._module.showwarning
|
||||
else:
|
||||
super_showwarning = None
|
||||
|
||||
def showwarning(*args, **kwargs):
|
||||
if super_showwarning is not None:
|
||||
super_showwarning(*args, **kwargs)
|
||||
|
||||
self._warning_log.append(warnings.WarningMessage(*args, **kwargs))
|
||||
|
||||
self._module.showwarning = showwarning
|
||||
return rv
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
super(WarningTestMixin._AssertWarnsContext, self).__exit__(self, *args, **kwargs)
|
||||
|
||||
self.parent.assertTrue(any(issubclass(item.category, warning)
|
||||
for warning in self.expected_warnings
|
||||
for item in self._warning_log))
|
||||
|
||||
def assertWarns(self, warning, callable=None, *args, **kwargs):
|
||||
warnings.simplefilter('always')
|
||||
context = self.__class__._AssertWarnsContext(warning, self)
|
||||
if callable is None:
|
||||
return context
|
||||
else:
|
||||
with context:
|
||||
callable(*args, **kwargs)
|
||||
|
||||
|
||||
class PicklableMixin(object):
|
||||
def _get_nobj_bytes(self, obj, dump_kwargs, load_kwargs):
|
||||
"""
|
||||
Pickle and unpickle an object using ``pickle.dumps`` / ``pickle.loads``
|
||||
"""
|
||||
pkl = pickle.dumps(obj, **dump_kwargs)
|
||||
return pickle.loads(pkl, **load_kwargs)
|
||||
|
||||
def _get_nobj_file(self, obj, dump_kwargs, load_kwargs):
|
||||
"""
|
||||
Pickle and unpickle an object using ``pickle.dump`` / ``pickle.load`` on
|
||||
a temporary file.
|
||||
"""
|
||||
with tempfile.TemporaryFile('w+b') as pkl:
|
||||
pickle.dump(obj, pkl, **dump_kwargs)
|
||||
pkl.seek(0) # Reset the file to the beginning to read it
|
||||
nobj = pickle.load(pkl, **load_kwargs)
|
||||
|
||||
return nobj
|
||||
|
||||
def assertPicklable(self, obj, singleton=False, asfile=False,
|
||||
dump_kwargs=None, load_kwargs=None):
|
||||
"""
|
||||
Assert that an object can be pickled and unpickled. This assertion
|
||||
assumes that the desired behavior is that the unpickled object compares
|
||||
equal to the original object, but is not the same object.
|
||||
"""
|
||||
get_nobj = self._get_nobj_file if asfile else self._get_nobj_bytes
|
||||
dump_kwargs = dump_kwargs or {}
|
||||
load_kwargs = load_kwargs or {}
|
||||
|
||||
nobj = get_nobj(obj, dump_kwargs, load_kwargs)
|
||||
if not singleton:
|
||||
self.assertIsNot(obj, nobj)
|
||||
self.assertEqual(obj, nobj)
|
||||
|
||||
|
||||
class TZContextBase(object):
|
||||
"""
|
||||
Base class for a context manager which allows changing of time zones.
|
||||
|
||||
Subclasses may define a guard variable to either block or or allow time
|
||||
zone changes by redefining ``_guard_var_name`` and ``_guard_allows_change``.
|
||||
The default is that the guard variable must be affirmatively set.
|
||||
|
||||
Subclasses must define ``get_current_tz`` and ``set_current_tz``.
|
||||
"""
|
||||
_guard_var_name = "DATEUTIL_MAY_CHANGE_TZ"
|
||||
_guard_allows_change = True
|
||||
|
||||
def __init__(self, tzval):
|
||||
self.tzval = tzval
|
||||
self._old_tz = None
|
||||
|
||||
@classmethod
|
||||
def tz_change_allowed(cls):
|
||||
"""
|
||||
Class method used to query whether or not this class allows time zone
|
||||
changes.
|
||||
"""
|
||||
guard = bool(os.environ.get(cls._guard_var_name, False))
|
||||
|
||||
# _guard_allows_change gives the "default" behavior - if True, the
|
||||
# guard is overcoming a block. If false, the guard is causing a block.
|
||||
# Whether tz_change is allowed is therefore the XNOR of the two.
|
||||
return guard == cls._guard_allows_change
|
||||
|
||||
@classmethod
|
||||
def tz_change_disallowed_message(cls):
|
||||
""" Generate instructions on how to allow tz changes """
|
||||
msg = ('Changing time zone not allowed. Set {envar} to {gval} '
|
||||
'if you would like to allow this behavior')
|
||||
|
||||
return msg.format(envar=cls._guard_var_name,
|
||||
gval=cls._guard_allows_change)
|
||||
|
||||
def __enter__(self):
|
||||
if not self.tz_change_allowed():
|
||||
raise ValueError(self.tz_change_disallowed_message())
|
||||
|
||||
self._old_tz = self.get_current_tz()
|
||||
self.set_current_tz(self.tzval)
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if self._old_tz is not None:
|
||||
self.set_current_tz(self._old_tz)
|
||||
|
||||
self._old_tz = None
|
||||
|
||||
def get_current_tz(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_current_tz(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class TZEnvContext(TZContextBase):
|
||||
"""
|
||||
Context manager that temporarily sets the `TZ` variable (for use on
|
||||
*nix-like systems). Because the effect is local to the shell anyway, this
|
||||
will apply *unless* a guard is set.
|
||||
|
||||
If you do not want the TZ environment variable set, you may set the
|
||||
``DATEUTIL_MAY_NOT_CHANGE_TZ_VAR`` variable to a truthy value.
|
||||
"""
|
||||
_guard_var_name = "DATEUTIL_MAY_NOT_CHANGE_TZ_VAR"
|
||||
_guard_allows_change = False
|
||||
|
||||
def get_current_tz(self):
|
||||
return os.environ.get('TZ', UnsetTz)
|
||||
|
||||
def set_current_tz(self, tzval):
|
||||
if tzval is UnsetTz and 'TZ' in os.environ:
|
||||
del os.environ['TZ']
|
||||
else:
|
||||
os.environ['TZ'] = tzval
|
||||
|
||||
time.tzset()
|
||||
|
||||
|
||||
class TZWinContext(TZContextBase):
|
||||
"""
|
||||
Context manager for changing local time zone on Windows.
|
||||
|
||||
Because the effect of this is system-wide and global, it may have
|
||||
unintended side effect. Set the ``DATEUTIL_MAY_CHANGE_TZ`` environment
|
||||
variable to a truthy value before using this context manager.
|
||||
"""
|
||||
def get_current_tz(self):
|
||||
p = subprocess.Popen(['tzutil', '/g'], stdout=subprocess.PIPE)
|
||||
|
||||
ctzname, err = p.communicate()
|
||||
ctzname = ctzname.decode() # Popen returns
|
||||
|
||||
if p.returncode:
|
||||
raise OSError('Failed to get current time zone: ' + err)
|
||||
|
||||
return ctzname
|
||||
|
||||
def set_current_tz(self, tzname):
|
||||
p = subprocess.Popen('tzutil /s "' + tzname + '"')
|
||||
|
||||
out, err = p.communicate()
|
||||
|
||||
if p.returncode:
|
||||
raise OSError('Failed to set current time zone: ' +
|
||||
(err or 'Unknown error.'))
|
||||
|
||||
|
||||
###
|
||||
# Utility classes
|
||||
class NotAValueClass(object):
|
||||
"""
|
||||
A class analogous to NaN that has operations defined for any type.
|
||||
"""
|
||||
def _op(self, other):
|
||||
return self # Operation with NotAValue returns NotAValue
|
||||
|
||||
def _cmp(self, other):
|
||||
return False
|
||||
|
||||
__add__ = __radd__ = _op
|
||||
__sub__ = __rsub__ = _op
|
||||
__mul__ = __rmul__ = _op
|
||||
__div__ = __rdiv__ = _op
|
||||
__truediv__ = __rtruediv__ = _op
|
||||
__floordiv__ = __rfloordiv__ = _op
|
||||
|
||||
__lt__ = __rlt__ = _op
|
||||
__gt__ = __rgt__ = _op
|
||||
__eq__ = __req__ = _op
|
||||
__le__ = __rle__ = _op
|
||||
__ge__ = __rge__ = _op
|
||||
|
||||
|
||||
NotAValue = NotAValueClass()
|
||||
|
||||
|
||||
class ComparesEqualClass(object):
|
||||
"""
|
||||
A class that is always equal to whatever you compare it to.
|
||||
"""
|
||||
|
||||
def __eq__(self, other):
|
||||
return True
|
||||
|
||||
def __ne__(self, other):
|
||||
return False
|
||||
|
||||
def __le__(self, other):
|
||||
return True
|
||||
|
||||
def __ge__(self, other):
|
||||
return True
|
||||
|
||||
def __lt__(self, other):
|
||||
return False
|
||||
|
||||
def __gt__(self, other):
|
||||
return False
|
||||
|
||||
__req__ = __eq__
|
||||
__rne__ = __ne__
|
||||
__rle__ = __le__
|
||||
__rge__ = __ge__
|
||||
__rlt__ = __lt__
|
||||
__rgt__ = __gt__
|
||||
|
||||
|
||||
ComparesEqual = ComparesEqualClass()
|
||||
|
||||
|
||||
class UnsetTzClass(object):
|
||||
""" Sentinel class for unset time zone variable """
|
||||
pass
|
||||
|
||||
|
||||
UnsetTz = UnsetTzClass()
|
|
@ -1,27 +0,0 @@
|
|||
from hypothesis import given, assume
|
||||
from hypothesis import strategies as st
|
||||
|
||||
from dateutil import tz
|
||||
from dateutil.parser import isoparse
|
||||
|
||||
import pytest
|
||||
|
||||
# Strategies
|
||||
TIME_ZONE_STRATEGY = st.sampled_from([None, tz.tzutc()] +
|
||||
[tz.gettz(zname) for zname in ('US/Eastern', 'US/Pacific',
|
||||
'Australia/Sydney', 'Europe/London')])
|
||||
ASCII_STRATEGY = st.characters(max_codepoint=127)
|
||||
|
||||
|
||||
@pytest.mark.isoparser
|
||||
@given(dt=st.datetimes(timezones=TIME_ZONE_STRATEGY), sep=ASCII_STRATEGY)
|
||||
def test_timespec_auto(dt, sep):
|
||||
if dt.tzinfo is not None:
|
||||
# Assume offset has no sub-second components
|
||||
assume(dt.utcoffset().total_seconds() % 60 == 0)
|
||||
|
||||
sep = str(sep) # Python 2.7 requires bytes
|
||||
dtstr = dt.isoformat(sep=sep)
|
||||
dt_rt = isoparse(dtstr)
|
||||
|
||||
assert dt_rt == dt
|
|
@ -1,22 +0,0 @@
|
|||
from hypothesis.strategies import integers
|
||||
from hypothesis import given
|
||||
|
||||
import pytest
|
||||
|
||||
from dateutil.parser import parserinfo
|
||||
|
||||
|
||||
@pytest.mark.parserinfo
|
||||
@given(integers(min_value=100, max_value=9999))
|
||||
def test_convertyear(n):
|
||||
assert n == parserinfo().convertyear(n)
|
||||
|
||||
|
||||
@pytest.mark.parserinfo
|
||||
@given(integers(min_value=-50,
|
||||
max_value=49))
|
||||
def test_convertyear_no_specified_century(n):
|
||||
p = parserinfo()
|
||||
new_year = p._year + n
|
||||
result = p.convertyear(new_year % 100, century_specified=False)
|
||||
assert result == new_year
|
|
@ -1,95 +0,0 @@
|
|||
from dateutil.easter import easter
|
||||
from dateutil.easter import EASTER_WESTERN, EASTER_ORTHODOX, EASTER_JULIAN
|
||||
|
||||
from datetime import date
|
||||
import unittest
|
||||
|
||||
# List of easters between 1990 and 2050
|
||||
western_easter_dates = [
|
||||
date(1990, 4, 15), date(1991, 3, 31), date(1992, 4, 19), date(1993, 4, 11),
|
||||
date(1994, 4, 3), date(1995, 4, 16), date(1996, 4, 7), date(1997, 3, 30),
|
||||
date(1998, 4, 12), date(1999, 4, 4),
|
||||
|
||||
date(2000, 4, 23), date(2001, 4, 15), date(2002, 3, 31), date(2003, 4, 20),
|
||||
date(2004, 4, 11), date(2005, 3, 27), date(2006, 4, 16), date(2007, 4, 8),
|
||||
date(2008, 3, 23), date(2009, 4, 12),
|
||||
|
||||
date(2010, 4, 4), date(2011, 4, 24), date(2012, 4, 8), date(2013, 3, 31),
|
||||
date(2014, 4, 20), date(2015, 4, 5), date(2016, 3, 27), date(2017, 4, 16),
|
||||
date(2018, 4, 1), date(2019, 4, 21),
|
||||
|
||||
date(2020, 4, 12), date(2021, 4, 4), date(2022, 4, 17), date(2023, 4, 9),
|
||||
date(2024, 3, 31), date(2025, 4, 20), date(2026, 4, 5), date(2027, 3, 28),
|
||||
date(2028, 4, 16), date(2029, 4, 1),
|
||||
|
||||
date(2030, 4, 21), date(2031, 4, 13), date(2032, 3, 28), date(2033, 4, 17),
|
||||
date(2034, 4, 9), date(2035, 3, 25), date(2036, 4, 13), date(2037, 4, 5),
|
||||
date(2038, 4, 25), date(2039, 4, 10),
|
||||
|
||||
date(2040, 4, 1), date(2041, 4, 21), date(2042, 4, 6), date(2043, 3, 29),
|
||||
date(2044, 4, 17), date(2045, 4, 9), date(2046, 3, 25), date(2047, 4, 14),
|
||||
date(2048, 4, 5), date(2049, 4, 18), date(2050, 4, 10)
|
||||
]
|
||||
|
||||
orthodox_easter_dates = [
|
||||
date(1990, 4, 15), date(1991, 4, 7), date(1992, 4, 26), date(1993, 4, 18),
|
||||
date(1994, 5, 1), date(1995, 4, 23), date(1996, 4, 14), date(1997, 4, 27),
|
||||
date(1998, 4, 19), date(1999, 4, 11),
|
||||
|
||||
date(2000, 4, 30), date(2001, 4, 15), date(2002, 5, 5), date(2003, 4, 27),
|
||||
date(2004, 4, 11), date(2005, 5, 1), date(2006, 4, 23), date(2007, 4, 8),
|
||||
date(2008, 4, 27), date(2009, 4, 19),
|
||||
|
||||
date(2010, 4, 4), date(2011, 4, 24), date(2012, 4, 15), date(2013, 5, 5),
|
||||
date(2014, 4, 20), date(2015, 4, 12), date(2016, 5, 1), date(2017, 4, 16),
|
||||
date(2018, 4, 8), date(2019, 4, 28),
|
||||
|
||||
date(2020, 4, 19), date(2021, 5, 2), date(2022, 4, 24), date(2023, 4, 16),
|
||||
date(2024, 5, 5), date(2025, 4, 20), date(2026, 4, 12), date(2027, 5, 2),
|
||||
date(2028, 4, 16), date(2029, 4, 8),
|
||||
|
||||
date(2030, 4, 28), date(2031, 4, 13), date(2032, 5, 2), date(2033, 4, 24),
|
||||
date(2034, 4, 9), date(2035, 4, 29), date(2036, 4, 20), date(2037, 4, 5),
|
||||
date(2038, 4, 25), date(2039, 4, 17),
|
||||
|
||||
date(2040, 5, 6), date(2041, 4, 21), date(2042, 4, 13), date(2043, 5, 3),
|
||||
date(2044, 4, 24), date(2045, 4, 9), date(2046, 4, 29), date(2047, 4, 21),
|
||||
date(2048, 4, 5), date(2049, 4, 25), date(2050, 4, 17)
|
||||
]
|
||||
|
||||
# A random smattering of Julian dates.
|
||||
# Pulled values from http://www.kevinlaughery.com/east4099.html
|
||||
julian_easter_dates = [
|
||||
date( 326, 4, 3), date( 375, 4, 5), date( 492, 4, 5), date( 552, 3, 31),
|
||||
date( 562, 4, 9), date( 569, 4, 21), date( 597, 4, 14), date( 621, 4, 19),
|
||||
date( 636, 3, 31), date( 655, 3, 29), date( 700, 4, 11), date( 725, 4, 8),
|
||||
date( 750, 3, 29), date( 782, 4, 7), date( 835, 4, 18), date( 849, 4, 14),
|
||||
date( 867, 3, 30), date( 890, 4, 12), date( 922, 4, 21), date( 934, 4, 6),
|
||||
date(1049, 3, 26), date(1058, 4, 19), date(1113, 4, 6), date(1119, 3, 30),
|
||||
date(1242, 4, 20), date(1255, 3, 28), date(1257, 4, 8), date(1258, 3, 24),
|
||||
date(1261, 4, 24), date(1278, 4, 17), date(1333, 4, 4), date(1351, 4, 17),
|
||||
date(1371, 4, 6), date(1391, 3, 26), date(1402, 3, 26), date(1412, 4, 3),
|
||||
date(1439, 4, 5), date(1445, 3, 28), date(1531, 4, 9), date(1555, 4, 14)
|
||||
]
|
||||
|
||||
|
||||
class EasterTest(unittest.TestCase):
|
||||
def testEasterWestern(self):
|
||||
for easter_date in western_easter_dates:
|
||||
self.assertEqual(easter_date,
|
||||
easter(easter_date.year, EASTER_WESTERN))
|
||||
|
||||
def testEasterOrthodox(self):
|
||||
for easter_date in orthodox_easter_dates:
|
||||
self.assertEqual(easter_date,
|
||||
easter(easter_date.year, EASTER_ORTHODOX))
|
||||
|
||||
def testEasterJulian(self):
|
||||
for easter_date in julian_easter_dates:
|
||||
self.assertEqual(easter_date,
|
||||
easter(easter_date.year, EASTER_JULIAN))
|
||||
|
||||
def testEasterBadMethod(self):
|
||||
# Invalid methods raise ValueError
|
||||
with self.assertRaises(ValueError):
|
||||
easter(1975, 4)
|
|
@ -1,33 +0,0 @@
|
|||
"""Test for the "import *" functionality.
|
||||
|
||||
As imort * can be only done at module level, it has been added in a separate file
|
||||
"""
|
||||
import unittest
|
||||
|
||||
prev_locals = list(locals())
|
||||
from dateutil import *
|
||||
new_locals = {name:value for name,value in locals().items()
|
||||
if name not in prev_locals}
|
||||
new_locals.pop('prev_locals')
|
||||
|
||||
class ImportStarTest(unittest.TestCase):
|
||||
""" Test that `from dateutil import *` adds the modules in __all__ locally"""
|
||||
|
||||
def testImportedModules(self):
|
||||
import dateutil.easter
|
||||
import dateutil.parser
|
||||
import dateutil.relativedelta
|
||||
import dateutil.rrule
|
||||
import dateutil.tz
|
||||
import dateutil.utils
|
||||
import dateutil.zoneinfo
|
||||
|
||||
self.assertEquals(dateutil.easter, new_locals.pop("easter"))
|
||||
self.assertEquals(dateutil.parser, new_locals.pop("parser"))
|
||||
self.assertEquals(dateutil.relativedelta, new_locals.pop("relativedelta"))
|
||||
self.assertEquals(dateutil.rrule, new_locals.pop("rrule"))
|
||||
self.assertEquals(dateutil.tz, new_locals.pop("tz"))
|
||||
self.assertEquals(dateutil.utils, new_locals.pop("utils"))
|
||||
self.assertEquals(dateutil.zoneinfo, new_locals.pop("zoneinfo"))
|
||||
|
||||
self.assertFalse(new_locals)
|
|
@ -1,166 +0,0 @@
|
|||
import sys
|
||||
import unittest
|
||||
|
||||
class ImportVersionTest(unittest.TestCase):
|
||||
""" Test that dateutil.__version__ can be imported"""
|
||||
|
||||
def testImportVersionStr(self):
|
||||
from dateutil import __version__
|
||||
|
||||
def testImportRoot(self):
|
||||
import dateutil
|
||||
|
||||
self.assertTrue(hasattr(dateutil, '__version__'))
|
||||
|
||||
|
||||
class ImportEasterTest(unittest.TestCase):
|
||||
""" Test that dateutil.easter-related imports work properly """
|
||||
|
||||
def testEasterDirect(self):
|
||||
import dateutil.easter
|
||||
|
||||
def testEasterFrom(self):
|
||||
from dateutil import easter
|
||||
|
||||
def testEasterStar(self):
|
||||
from dateutil.easter import easter
|
||||
|
||||
|
||||
class ImportParserTest(unittest.TestCase):
|
||||
""" Test that dateutil.parser-related imports work properly """
|
||||
def testParserDirect(self):
|
||||
import dateutil.parser
|
||||
|
||||
def testParserFrom(self):
|
||||
from dateutil import parser
|
||||
|
||||
def testParserAll(self):
|
||||
# All interface
|
||||
from dateutil.parser import parse
|
||||
from dateutil.parser import parserinfo
|
||||
|
||||
# Other public classes
|
||||
from dateutil.parser import parser
|
||||
|
||||
for var in (parse, parserinfo, parser):
|
||||
self.assertIsNot(var, None)
|
||||
|
||||
|
||||
class ImportRelativeDeltaTest(unittest.TestCase):
|
||||
""" Test that dateutil.relativedelta-related imports work properly """
|
||||
def testRelativeDeltaDirect(self):
|
||||
import dateutil.relativedelta
|
||||
|
||||
def testRelativeDeltaFrom(self):
|
||||
from dateutil import relativedelta
|
||||
|
||||
def testRelativeDeltaAll(self):
|
||||
from dateutil.relativedelta import relativedelta
|
||||
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
|
||||
|
||||
for var in (relativedelta, MO, TU, WE, TH, FR, SA, SU):
|
||||
self.assertIsNot(var, None)
|
||||
|
||||
# In the public interface but not in all
|
||||
from dateutil.relativedelta import weekday
|
||||
self.assertIsNot(weekday, None)
|
||||
|
||||
|
||||
class ImportRRuleTest(unittest.TestCase):
|
||||
""" Test that dateutil.rrule related imports work properly """
|
||||
def testRRuleDirect(self):
|
||||
import dateutil.rrule
|
||||
|
||||
def testRRuleFrom(self):
|
||||
from dateutil import rrule
|
||||
|
||||
def testRRuleAll(self):
|
||||
from dateutil.rrule import rrule
|
||||
from dateutil.rrule import rruleset
|
||||
from dateutil.rrule import rrulestr
|
||||
from dateutil.rrule import YEARLY, MONTHLY, WEEKLY, DAILY
|
||||
from dateutil.rrule import HOURLY, MINUTELY, SECONDLY
|
||||
from dateutil.rrule import MO, TU, WE, TH, FR, SA, SU
|
||||
|
||||
rr_all = (rrule, rruleset, rrulestr,
|
||||
YEARLY, MONTHLY, WEEKLY, DAILY,
|
||||
HOURLY, MINUTELY, SECONDLY,
|
||||
MO, TU, WE, TH, FR, SA, SU)
|
||||
|
||||
for var in rr_all:
|
||||
self.assertIsNot(var, None)
|
||||
|
||||
# In the public interface but not in all
|
||||
from dateutil.rrule import weekday
|
||||
self.assertIsNot(weekday, None)
|
||||
|
||||
|
||||
class ImportTZTest(unittest.TestCase):
|
||||
""" Test that dateutil.tz related imports work properly """
|
||||
def testTzDirect(self):
|
||||
import dateutil.tz
|
||||
|
||||
def testTzFrom(self):
|
||||
from dateutil import tz
|
||||
|
||||
def testTzAll(self):
|
||||
from dateutil.tz import tzutc
|
||||
from dateutil.tz import tzoffset
|
||||
from dateutil.tz import tzlocal
|
||||
from dateutil.tz import tzfile
|
||||
from dateutil.tz import tzrange
|
||||
from dateutil.tz import tzstr
|
||||
from dateutil.tz import tzical
|
||||
from dateutil.tz import gettz
|
||||
from dateutil.tz import tzwin
|
||||
from dateutil.tz import tzwinlocal
|
||||
from dateutil.tz import UTC
|
||||
from dateutil.tz import datetime_ambiguous
|
||||
from dateutil.tz import datetime_exists
|
||||
from dateutil.tz import resolve_imaginary
|
||||
|
||||
tz_all = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
|
||||
"tzstr", "tzical", "gettz", "datetime_ambiguous",
|
||||
"datetime_exists", "resolve_imaginary", "UTC"]
|
||||
|
||||
tz_all += ["tzwin", "tzwinlocal"] if sys.platform.startswith("win") else []
|
||||
lvars = locals()
|
||||
|
||||
for var in tz_all:
|
||||
self.assertIsNot(lvars[var], None)
|
||||
|
||||
@unittest.skipUnless(sys.platform.startswith('win'), "Requires Windows")
|
||||
class ImportTZWinTest(unittest.TestCase):
|
||||
""" Test that dateutil.tzwin related imports work properly """
|
||||
def testTzwinDirect(self):
|
||||
import dateutil.tzwin
|
||||
|
||||
def testTzwinFrom(self):
|
||||
from dateutil import tzwin
|
||||
|
||||
def testTzwinStar(self):
|
||||
from dateutil.tzwin import tzwin
|
||||
from dateutil.tzwin import tzwinlocal
|
||||
|
||||
tzwin_all = [tzwin, tzwinlocal]
|
||||
|
||||
for var in tzwin_all:
|
||||
self.assertIsNot(var, None)
|
||||
|
||||
|
||||
class ImportZoneInfoTest(unittest.TestCase):
|
||||
def testZoneinfoDirect(self):
|
||||
import dateutil.zoneinfo
|
||||
|
||||
def testZoneinfoFrom(self):
|
||||
from dateutil import zoneinfo
|
||||
|
||||
def testZoneinfoStar(self):
|
||||
from dateutil.zoneinfo import gettz
|
||||
from dateutil.zoneinfo import gettz_db_metadata
|
||||
from dateutil.zoneinfo import rebuild
|
||||
|
||||
zi_all = (gettz, gettz_db_metadata, rebuild)
|
||||
|
||||
for var in zi_all:
|
||||
self.assertIsNot(var, None)
|
|
@ -1,95 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Tests for implementation details, not necessarily part of the user-facing
|
||||
API.
|
||||
|
||||
The motivating case for these tests is #483, where we want to smoke-test
|
||||
code that may be difficult to reach through the standard API calls.
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
from dateutil.parser._parser import _ymd
|
||||
from dateutil import tz
|
||||
|
||||
IS_PY32 = sys.version_info[0:2] == (3, 2)
|
||||
|
||||
|
||||
class TestYMD(unittest.TestCase):
|
||||
|
||||
# @pytest.mark.smoke
|
||||
def test_could_be_day(self):
|
||||
ymd = _ymd('foo bar 124 baz')
|
||||
|
||||
ymd.append(2, 'M')
|
||||
assert ymd.has_month
|
||||
assert not ymd.has_year
|
||||
assert ymd.could_be_day(4)
|
||||
assert not ymd.could_be_day(-6)
|
||||
assert not ymd.could_be_day(32)
|
||||
|
||||
# Assumes leapyear
|
||||
assert ymd.could_be_day(29)
|
||||
|
||||
ymd.append(1999)
|
||||
assert ymd.has_year
|
||||
assert not ymd.could_be_day(29)
|
||||
|
||||
ymd.append(16, 'D')
|
||||
assert ymd.has_day
|
||||
assert not ymd.could_be_day(1)
|
||||
|
||||
ymd = _ymd('foo bar 124 baz')
|
||||
ymd.append(1999)
|
||||
assert ymd.could_be_day(31)
|
||||
|
||||
|
||||
###
|
||||
# Test that private interfaces in _parser are deprecated properly
|
||||
@pytest.mark.skipif(IS_PY32, reason='pytest.warns not supported on Python 3.2')
|
||||
def test_parser_private_warns():
|
||||
from dateutil.parser import _timelex, _tzparser
|
||||
from dateutil.parser import _parsetz
|
||||
|
||||
with pytest.warns(DeprecationWarning):
|
||||
_tzparser()
|
||||
|
||||
with pytest.warns(DeprecationWarning):
|
||||
_timelex('2014-03-03')
|
||||
|
||||
with pytest.warns(DeprecationWarning):
|
||||
_parsetz('+05:00')
|
||||
|
||||
|
||||
@pytest.mark.skipif(IS_PY32, reason='pytest.warns not supported on Python 3.2')
|
||||
def test_parser_parser_private_not_warns():
|
||||
from dateutil.parser._parser import _timelex, _tzparser
|
||||
from dateutil.parser._parser import _parsetz
|
||||
|
||||
with pytest.warns(None) as recorder:
|
||||
_tzparser()
|
||||
assert len(recorder) == 0
|
||||
|
||||
with pytest.warns(None) as recorder:
|
||||
_timelex('2014-03-03')
|
||||
|
||||
assert len(recorder) == 0
|
||||
|
||||
with pytest.warns(None) as recorder:
|
||||
_parsetz('+05:00')
|
||||
assert len(recorder) == 0
|
||||
|
||||
|
||||
@pytest.mark.tzstr
|
||||
def test_tzstr_internal_timedeltas():
|
||||
with pytest.warns(tz.DeprecatedTzFormatWarning):
|
||||
tz1 = tz.tzstr("EST5EDT,5,4,0,7200,11,-3,0,7200")
|
||||
|
||||
with pytest.warns(tz.DeprecatedTzFormatWarning):
|
||||
tz2 = tz.tzstr("EST5EDT,4,1,0,7200,10,-1,0,7200")
|
||||
|
||||
assert tz1._start_delta != tz2._start_delta
|
||||
assert tz1._end_delta != tz2._end_delta
|
|
@ -1,482 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from datetime import datetime, timedelta, date, time
|
||||
import itertools as it
|
||||
|
||||
from dateutil.tz import tz
|
||||
from dateutil.parser import isoparser, isoparse
|
||||
|
||||
import pytest
|
||||
import six
|
||||
|
||||
UTC = tz.tzutc()
|
||||
|
||||
def _generate_tzoffsets(limited):
|
||||
def _mkoffset(hmtuple, fmt):
|
||||
h, m = hmtuple
|
||||
m_td = (-1 if h < 0 else 1) * m
|
||||
|
||||
tzo = tz.tzoffset(None, timedelta(hours=h, minutes=m_td))
|
||||
return tzo, fmt.format(h, m)
|
||||
|
||||
out = []
|
||||
if not limited:
|
||||
# The subset that's just hours
|
||||
hm_out_h = [(h, 0) for h in (-23, -5, 0, 5, 23)]
|
||||
out.extend([_mkoffset(hm, '{:+03d}') for hm in hm_out_h])
|
||||
|
||||
# Ones that have hours and minutes
|
||||
hm_out = [] + hm_out_h
|
||||
hm_out += [(-12, 15), (11, 30), (10, 2), (5, 15), (-5, 30)]
|
||||
else:
|
||||
hm_out = [(-5, -0)]
|
||||
|
||||
fmts = ['{:+03d}:{:02d}', '{:+03d}{:02d}']
|
||||
out += [_mkoffset(hm, fmt) for hm in hm_out for fmt in fmts]
|
||||
|
||||
# Also add in UTC and naive
|
||||
out.append((tz.tzutc(), 'Z'))
|
||||
out.append((None, ''))
|
||||
|
||||
return out
|
||||
|
||||
FULL_TZOFFSETS = _generate_tzoffsets(False)
|
||||
FULL_TZOFFSETS_AWARE = [x for x in FULL_TZOFFSETS if x[1]]
|
||||
TZOFFSETS = _generate_tzoffsets(True)
|
||||
|
||||
DATES = [datetime(1996, 1, 1), datetime(2017, 1, 1)]
|
||||
@pytest.mark.parametrize('dt', tuple(DATES))
|
||||
def test_year_only(dt):
|
||||
dtstr = dt.strftime('%Y')
|
||||
|
||||
assert isoparse(dtstr) == dt
|
||||
|
||||
DATES += [datetime(2000, 2, 1), datetime(2017, 4, 1)]
|
||||
@pytest.mark.parametrize('dt', tuple(DATES))
|
||||
def test_year_month(dt):
|
||||
fmt = '%Y-%m'
|
||||
dtstr = dt.strftime(fmt)
|
||||
|
||||
assert isoparse(dtstr) == dt
|
||||
|
||||
DATES += [datetime(2016, 2, 29), datetime(2018, 3, 15)]
|
||||
YMD_FMTS = ('%Y%m%d', '%Y-%m-%d')
|
||||
@pytest.mark.parametrize('dt', tuple(DATES))
|
||||
@pytest.mark.parametrize('fmt', YMD_FMTS)
|
||||
def test_year_month_day(dt, fmt):
|
||||
dtstr = dt.strftime(fmt)
|
||||
|
||||
assert isoparse(dtstr) == dt
|
||||
|
||||
def _isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset,
|
||||
microsecond_precision=None):
|
||||
tzi, offset_str = tzoffset
|
||||
fmt = date_fmt + 'T' + time_fmt
|
||||
dt = dt.replace(tzinfo=tzi)
|
||||
dtstr = dt.strftime(fmt)
|
||||
|
||||
if microsecond_precision is not None:
|
||||
if not fmt.endswith('%f'):
|
||||
raise ValueError('Time format has no microseconds!')
|
||||
|
||||
if microsecond_precision != 6:
|
||||
dtstr = dtstr[:-(6 - microsecond_precision)]
|
||||
elif microsecond_precision > 6:
|
||||
raise ValueError('Precision must be 1-6')
|
||||
|
||||
dtstr += offset_str
|
||||
|
||||
assert isoparse(dtstr) == dt
|
||||
|
||||
DATETIMES = [datetime(1998, 4, 16, 12),
|
||||
datetime(2019, 11, 18, 23),
|
||||
datetime(2014, 12, 16, 4)]
|
||||
@pytest.mark.parametrize('dt', tuple(DATETIMES))
|
||||
@pytest.mark.parametrize('date_fmt', YMD_FMTS)
|
||||
@pytest.mark.parametrize('tzoffset', TZOFFSETS)
|
||||
def test_ymd_h(dt, date_fmt, tzoffset):
|
||||
_isoparse_date_and_time(dt, date_fmt, '%H', tzoffset)
|
||||
|
||||
DATETIMES = [datetime(2012, 1, 6, 9, 37)]
|
||||
@pytest.mark.parametrize('dt', tuple(DATETIMES))
|
||||
@pytest.mark.parametrize('date_fmt', YMD_FMTS)
|
||||
@pytest.mark.parametrize('time_fmt', ('%H%M', '%H:%M'))
|
||||
@pytest.mark.parametrize('tzoffset', TZOFFSETS)
|
||||
def test_ymd_hm(dt, date_fmt, time_fmt, tzoffset):
|
||||
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
|
||||
|
||||
DATETIMES = [datetime(2003, 9, 2, 22, 14, 2),
|
||||
datetime(2003, 8, 8, 14, 9, 14),
|
||||
datetime(2003, 4, 7, 6, 14, 59)]
|
||||
HMS_FMTS = ('%H%M%S', '%H:%M:%S')
|
||||
@pytest.mark.parametrize('dt', tuple(DATETIMES))
|
||||
@pytest.mark.parametrize('date_fmt', YMD_FMTS)
|
||||
@pytest.mark.parametrize('time_fmt', HMS_FMTS)
|
||||
@pytest.mark.parametrize('tzoffset', TZOFFSETS)
|
||||
def test_ymd_hms(dt, date_fmt, time_fmt, tzoffset):
|
||||
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
|
||||
|
||||
DATETIMES = [datetime(2017, 11, 27, 6, 14, 30, 123456)]
|
||||
@pytest.mark.parametrize('dt', tuple(DATETIMES))
|
||||
@pytest.mark.parametrize('date_fmt', YMD_FMTS)
|
||||
@pytest.mark.parametrize('time_fmt', (x + '.%f' for x in HMS_FMTS))
|
||||
@pytest.mark.parametrize('tzoffset', TZOFFSETS)
|
||||
@pytest.mark.parametrize('precision', list(range(3, 7)))
|
||||
def test_ymd_hms_micro(dt, date_fmt, time_fmt, tzoffset, precision):
|
||||
# Truncate the microseconds to the desired precision for the representation
|
||||
dt = dt.replace(microsecond=int(round(dt.microsecond, precision-6)))
|
||||
|
||||
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset, precision)
|
||||
|
||||
@pytest.mark.parametrize('tzoffset', FULL_TZOFFSETS)
|
||||
def test_full_tzoffsets(tzoffset):
|
||||
dt = datetime(2017, 11, 27, 6, 14, 30, 123456)
|
||||
date_fmt = '%Y-%m-%d'
|
||||
time_fmt = '%H:%M:%S.%f'
|
||||
|
||||
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
|
||||
|
||||
@pytest.mark.parametrize('dt_str', [
|
||||
'2014-04-11T00',
|
||||
'2014-04-11T24',
|
||||
'2014-04-11T00:00',
|
||||
'2014-04-11T24:00',
|
||||
'2014-04-11T00:00:00',
|
||||
'2014-04-11T24:00:00',
|
||||
'2014-04-11T00:00:00.000',
|
||||
'2014-04-11T24:00:00.000',
|
||||
'2014-04-11T00:00:00.000000',
|
||||
'2014-04-11T24:00:00.000000']
|
||||
)
|
||||
def test_datetime_midnight(dt_str):
|
||||
assert isoparse(dt_str) == datetime(2014, 4, 11, 0, 0, 0, 0)
|
||||
|
||||
@pytest.mark.parametrize('datestr', [
|
||||
'2014-01-01',
|
||||
'20140101',
|
||||
])
|
||||
@pytest.mark.parametrize('sep', [' ', 'a', 'T', '_', '-'])
|
||||
def test_isoparse_sep_none(datestr, sep):
|
||||
isostr = datestr + sep + '14:33:09'
|
||||
assert isoparse(isostr) == datetime(2014, 1, 1, 14, 33, 9)
|
||||
|
||||
##
|
||||
# Uncommon date formats
|
||||
TIME_ARGS = ('time_args',
|
||||
((None, time(0), None), ) + tuple(('%H:%M:%S.%f', _t, _tz)
|
||||
for _t, _tz in it.product([time(0), time(9, 30), time(14, 47)],
|
||||
TZOFFSETS)))
|
||||
|
||||
@pytest.mark.parametrize('isocal,dt_expected',[
|
||||
((2017, 10), datetime(2017, 3, 6)),
|
||||
((2020, 1), datetime(2019, 12, 30)), # ISO year != Cal year
|
||||
((2004, 53), datetime(2004, 12, 27)), # Only half the week is in 2014
|
||||
])
|
||||
def test_isoweek(isocal, dt_expected):
|
||||
# TODO: Figure out how to parametrize this on formats, too
|
||||
for fmt in ('{:04d}-W{:02d}', '{:04d}W{:02d}'):
|
||||
dtstr = fmt.format(*isocal)
|
||||
assert isoparse(dtstr) == dt_expected
|
||||
|
||||
@pytest.mark.parametrize('isocal,dt_expected',[
|
||||
((2016, 13, 7), datetime(2016, 4, 3)),
|
||||
((2004, 53, 7), datetime(2005, 1, 2)), # ISO year != Cal year
|
||||
((2009, 1, 2), datetime(2008, 12, 30)), # ISO year < Cal year
|
||||
((2009, 53, 6), datetime(2010, 1, 2)) # ISO year > Cal year
|
||||
])
|
||||
def test_isoweek_day(isocal, dt_expected):
|
||||
# TODO: Figure out how to parametrize this on formats, too
|
||||
for fmt in ('{:04d}-W{:02d}-{:d}', '{:04d}W{:02d}{:d}'):
|
||||
dtstr = fmt.format(*isocal)
|
||||
assert isoparse(dtstr) == dt_expected
|
||||
|
||||
@pytest.mark.parametrize('isoord,dt_expected', [
|
||||
((2004, 1), datetime(2004, 1, 1)),
|
||||
((2016, 60), datetime(2016, 2, 29)),
|
||||
((2017, 60), datetime(2017, 3, 1)),
|
||||
((2016, 366), datetime(2016, 12, 31)),
|
||||
((2017, 365), datetime(2017, 12, 31))
|
||||
])
|
||||
def test_iso_ordinal(isoord, dt_expected):
|
||||
for fmt in ('{:04d}-{:03d}', '{:04d}{:03d}'):
|
||||
dtstr = fmt.format(*isoord)
|
||||
|
||||
assert isoparse(dtstr) == dt_expected
|
||||
|
||||
|
||||
###
|
||||
# Acceptance of bytes
|
||||
@pytest.mark.parametrize('isostr,dt', [
|
||||
(b'2014', datetime(2014, 1, 1)),
|
||||
(b'20140204', datetime(2014, 2, 4)),
|
||||
(b'2014-02-04', datetime(2014, 2, 4)),
|
||||
(b'2014-02-04T12', datetime(2014, 2, 4, 12)),
|
||||
(b'2014-02-04T12:30', datetime(2014, 2, 4, 12, 30)),
|
||||
(b'2014-02-04T12:30:15', datetime(2014, 2, 4, 12, 30, 15)),
|
||||
(b'2014-02-04T12:30:15.224', datetime(2014, 2, 4, 12, 30, 15, 224000)),
|
||||
(b'20140204T123015.224', datetime(2014, 2, 4, 12, 30, 15, 224000)),
|
||||
(b'2014-02-04T12:30:15.224Z', datetime(2014, 2, 4, 12, 30, 15, 224000,
|
||||
tz.tzutc())),
|
||||
(b'2014-02-04T12:30:15.224+05:00',
|
||||
datetime(2014, 2, 4, 12, 30, 15, 224000,
|
||||
tzinfo=tz.tzoffset(None, timedelta(hours=5))))])
|
||||
def test_bytes(isostr, dt):
|
||||
assert isoparse(isostr) == dt
|
||||
|
||||
|
||||
###
|
||||
# Invalid ISO strings
|
||||
@pytest.mark.parametrize('isostr,exception', [
|
||||
('201', ValueError), # ISO string too short
|
||||
('2012-0425', ValueError), # Inconsistent date separators
|
||||
('201204-25', ValueError), # Inconsistent date separators
|
||||
('20120425T0120:00', ValueError), # Inconsistent time separators
|
||||
('20120425T012500-334', ValueError), # Wrong microsecond separator
|
||||
('2001-1', ValueError), # YYYY-M not valid
|
||||
('2012-04-9', ValueError), # YYYY-MM-D not valid
|
||||
('201204', ValueError), # YYYYMM not valid
|
||||
('20120411T03:30+', ValueError), # Time zone too short
|
||||
('20120411T03:30+1234567', ValueError), # Time zone too long
|
||||
('20120411T03:30-25:40', ValueError), # Time zone invalid
|
||||
('2012-1a', ValueError), # Invalid month
|
||||
('20120411T03:30+00:60', ValueError), # Time zone invalid minutes
|
||||
('20120411T03:30+00:61', ValueError), # Time zone invalid minutes
|
||||
('20120411T033030.123456012:00', # No sign in time zone
|
||||
ValueError),
|
||||
('2012-W00', ValueError), # Invalid ISO week
|
||||
('2012-W55', ValueError), # Invalid ISO week
|
||||
('2012-W01-0', ValueError), # Invalid ISO week day
|
||||
('2012-W01-8', ValueError), # Invalid ISO week day
|
||||
('2013-000', ValueError), # Invalid ordinal day
|
||||
('2013-366', ValueError), # Invalid ordinal day
|
||||
('2013366', ValueError), # Invalid ordinal day
|
||||
('2014-03-12Т12:30:14', ValueError), # Cyrillic T
|
||||
('2014-04-21T24:00:01', ValueError), # Invalid use of 24 for midnight
|
||||
('2014_W01-1', ValueError), # Invalid separator
|
||||
('2014W01-1', ValueError), # Inconsistent use of dashes
|
||||
('2014-W011', ValueError), # Inconsistent use of dashes
|
||||
|
||||
])
|
||||
def test_iso_raises(isostr, exception):
|
||||
with pytest.raises(exception):
|
||||
isoparse(isostr)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('sep_act,valid_sep', [
|
||||
('C', 'T'),
|
||||
('T', 'C')
|
||||
])
|
||||
def test_iso_raises_sep(sep_act, valid_sep):
|
||||
isostr = '2012-04-25' + sep_act + '01:25:00'
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
@pytest.mark.parametrize('isostr,exception', [
|
||||
('20120425T01:2000', ValueError), # Inconsistent time separators
|
||||
])
|
||||
def test_iso_raises_failing(isostr, exception):
|
||||
# These are test cases where the current implementation is too lenient
|
||||
# and need to be fixed
|
||||
with pytest.raises(exception):
|
||||
isoparse(isostr)
|
||||
|
||||
|
||||
###
|
||||
# Test ISOParser constructor
|
||||
@pytest.mark.parametrize('sep', [' ', '9', '🍛'])
|
||||
def test_isoparser_invalid_sep(sep):
|
||||
with pytest.raises(ValueError):
|
||||
isoparser(sep=sep)
|
||||
|
||||
|
||||
# This only fails on Python 3
|
||||
@pytest.mark.xfail(six.PY3, reason="Fails on Python 3 only")
|
||||
def test_isoparser_byte_sep():
|
||||
dt = datetime(2017, 12, 6, 12, 30, 45)
|
||||
dt_str = dt.isoformat(sep=str('T'))
|
||||
|
||||
dt_rt = isoparser(sep=b'T').isoparse(dt_str)
|
||||
|
||||
assert dt == dt_rt
|
||||
|
||||
|
||||
###
|
||||
# Test parse_tzstr
|
||||
@pytest.mark.parametrize('tzoffset', FULL_TZOFFSETS)
|
||||
def test_parse_tzstr(tzoffset):
|
||||
dt = datetime(2017, 11, 27, 6, 14, 30, 123456)
|
||||
date_fmt = '%Y-%m-%d'
|
||||
time_fmt = '%H:%M:%S.%f'
|
||||
|
||||
_isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('tzstr', [
|
||||
'-00:00', '+00:00', '+00', '-00', '+0000', '-0000'
|
||||
])
|
||||
@pytest.mark.parametrize('zero_as_utc', [True, False])
|
||||
def test_parse_tzstr_zero_as_utc(tzstr, zero_as_utc):
|
||||
tzi = isoparser().parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
|
||||
assert tzi == tz.tzutc()
|
||||
assert (type(tzi) == tz.tzutc) == zero_as_utc
|
||||
|
||||
|
||||
@pytest.mark.parametrize('tzstr,exception', [
|
||||
('00:00', ValueError), # No sign
|
||||
('05:00', ValueError), # No sign
|
||||
('_00:00', ValueError), # Invalid sign
|
||||
('+25:00', ValueError), # Offset too large
|
||||
('00:0000', ValueError), # String too long
|
||||
])
|
||||
def test_parse_tzstr_fails(tzstr, exception):
|
||||
with pytest.raises(exception):
|
||||
isoparser().parse_tzstr(tzstr)
|
||||
|
||||
###
|
||||
# Test parse_isodate
|
||||
def __make_date_examples():
|
||||
dates_no_day = [
|
||||
date(1999, 12, 1),
|
||||
date(2016, 2, 1)
|
||||
]
|
||||
|
||||
if six.PY3:
|
||||
# strftime does not support dates before 1900 in Python 2
|
||||
dates_no_day.append(date(1000, 11, 1))
|
||||
|
||||
# Only one supported format for dates with no day
|
||||
o = zip(dates_no_day, it.repeat('%Y-%m'))
|
||||
|
||||
dates_w_day = [
|
||||
date(1969, 12, 31),
|
||||
date(1900, 1, 1),
|
||||
date(2016, 2, 29),
|
||||
date(2017, 11, 14)
|
||||
]
|
||||
|
||||
dates_w_day_fmts = ('%Y%m%d', '%Y-%m-%d')
|
||||
o = it.chain(o, it.product(dates_w_day, dates_w_day_fmts))
|
||||
|
||||
return list(o)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('d,dt_fmt', __make_date_examples())
|
||||
@pytest.mark.parametrize('as_bytes', [True, False])
|
||||
def test_parse_isodate(d, dt_fmt, as_bytes):
|
||||
d_str = d.strftime(dt_fmt)
|
||||
if isinstance(d_str, six.text_type) and as_bytes:
|
||||
d_str = d_str.encode('ascii')
|
||||
elif isinstance(d_str, six.binary_type) and not as_bytes:
|
||||
d_str = d_str.decode('ascii')
|
||||
|
||||
iparser = isoparser()
|
||||
assert iparser.parse_isodate(d_str) == d
|
||||
|
||||
|
||||
@pytest.mark.parametrize('isostr,exception', [
|
||||
('243', ValueError), # ISO string too short
|
||||
('2014-0423', ValueError), # Inconsistent date separators
|
||||
('201404-23', ValueError), # Inconsistent date separators
|
||||
('2014日03月14', ValueError), # Not ASCII
|
||||
('2013-02-29', ValueError), # Not a leap year
|
||||
('2014/12/03', ValueError), # Wrong separators
|
||||
('2014-04-19T', ValueError), # Unknown components
|
||||
])
|
||||
def test_isodate_raises(isostr, exception):
|
||||
with pytest.raises(exception):
|
||||
isoparser().parse_isodate(isostr)
|
||||
|
||||
|
||||
###
|
||||
# Test parse_isotime
|
||||
def __make_time_examples():
|
||||
outputs = []
|
||||
|
||||
# HH
|
||||
time_h = [time(0), time(8), time(22)]
|
||||
time_h_fmts = ['%H']
|
||||
|
||||
outputs.append(it.product(time_h, time_h_fmts))
|
||||
|
||||
# HHMM / HH:MM
|
||||
time_hm = [time(0, 0), time(0, 30), time(8, 47), time(16, 1)]
|
||||
time_hm_fmts = ['%H%M', '%H:%M']
|
||||
|
||||
outputs.append(it.product(time_hm, time_hm_fmts))
|
||||
|
||||
# HHMMSS / HH:MM:SS
|
||||
time_hms = [time(0, 0, 0), time(0, 15, 30),
|
||||
time(8, 2, 16), time(12, 0), time(16, 2), time(20, 45)]
|
||||
|
||||
time_hms_fmts = ['%H%M%S', '%H:%M:%S']
|
||||
|
||||
outputs.append(it.product(time_hms, time_hms_fmts))
|
||||
|
||||
# HHMMSS.ffffff / HH:MM:SS.ffffff
|
||||
time_hmsu = [time(0, 0, 0, 0), time(4, 15, 3, 247993),
|
||||
time(14, 21, 59, 948730),
|
||||
time(23, 59, 59, 999999)]
|
||||
|
||||
time_hmsu_fmts = ['%H%M%S.%f', '%H:%M:%S.%f']
|
||||
|
||||
outputs.append(it.product(time_hmsu, time_hmsu_fmts))
|
||||
|
||||
outputs = list(map(list, outputs))
|
||||
|
||||
# Time zones
|
||||
ex_naive = list(it.chain.from_iterable(x[0:2] for x in outputs))
|
||||
o = it.product(ex_naive, TZOFFSETS) # ((time, fmt), (tzinfo, offsetstr))
|
||||
o = ((t.replace(tzinfo=tzi), fmt + off_str)
|
||||
for (t, fmt), (tzi, off_str) in o)
|
||||
|
||||
outputs.append(o)
|
||||
|
||||
return list(it.chain.from_iterable(outputs))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('time_val,time_fmt', __make_time_examples())
|
||||
@pytest.mark.parametrize('as_bytes', [True, False])
|
||||
def test_isotime(time_val, time_fmt, as_bytes):
|
||||
tstr = time_val.strftime(time_fmt)
|
||||
if isinstance(time_val, six.text_type) and as_bytes:
|
||||
tstr = tstr.encode('ascii')
|
||||
elif isinstance(time_val, six.binary_type) and not as_bytes:
|
||||
tstr = tstr.decode('ascii')
|
||||
|
||||
iparser = isoparser()
|
||||
|
||||
assert iparser.parse_isotime(tstr) == time_val
|
||||
|
||||
@pytest.mark.parametrize('isostr,exception', [
|
||||
('3', ValueError), # ISO string too short
|
||||
('14時30分15秒', ValueError), # Not ASCII
|
||||
('14_30_15', ValueError), # Invalid separators
|
||||
('1430:15', ValueError), # Inconsistent separator use
|
||||
('14:30:15.3684000309', ValueError), # Too much us precision
|
||||
('25', ValueError), # Invalid hours
|
||||
('25:15', ValueError), # Invalid hours
|
||||
('14:60', ValueError), # Invalid minutes
|
||||
('14:59:61', ValueError), # Invalid seconds
|
||||
('14:30:15.3446830500', ValueError), # No sign in time zone
|
||||
('14:30:15+', ValueError), # Time zone too short
|
||||
('14:30:15+1234567', ValueError), # Time zone invalid
|
||||
('14:59:59+25:00', ValueError), # Invalid tz hours
|
||||
('14:59:59+12:62', ValueError), # Invalid tz minutes
|
||||
('14:59:30_344583', ValueError), # Invalid microsecond separator
|
||||
])
|
||||
def test_isotime_raises(isostr, exception):
|
||||
iparser = isoparser()
|
||||
with pytest.raises(exception):
|
||||
iparser.parse_isotime(isostr)
|
||||
|
||||
|
||||
@pytest.mark.xfail()
|
||||
@pytest.mark.parametrize('isostr,exception', [
|
||||
('14:3015', ValueError), # Inconsistent separator use
|
||||
('201202', ValueError) # Invalid ISO format
|
||||
])
|
||||
def test_isotime_raises_xfail(isostr, exception):
|
||||
iparser = isoparser()
|
||||
with pytest.raises(exception):
|
||||
iparser.parse_isotime(isostr)
|
File diff suppressed because it is too large
Load diff
|
@ -1,678 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
from ._common import WarningTestMixin, NotAValue
|
||||
|
||||
import calendar
|
||||
from datetime import datetime, date, timedelta
|
||||
import unittest
|
||||
|
||||
from dateutil.relativedelta import relativedelta, MO, TU, WE, FR, SU
|
||||
|
||||
|
||||
class RelativeDeltaTest(WarningTestMixin, unittest.TestCase):
|
||||
now = datetime(2003, 9, 17, 20, 54, 47, 282310)
|
||||
today = date(2003, 9, 17)
|
||||
|
||||
def testInheritance(self):
|
||||
# Ensure that relativedelta is inheritance-friendly.
|
||||
class rdChildClass(relativedelta):
|
||||
pass
|
||||
|
||||
ccRD = rdChildClass(years=1, months=1, days=1, leapdays=1, weeks=1,
|
||||
hours=1, minutes=1, seconds=1, microseconds=1)
|
||||
|
||||
rd = relativedelta(years=1, months=1, days=1, leapdays=1, weeks=1,
|
||||
hours=1, minutes=1, seconds=1, microseconds=1)
|
||||
|
||||
self.assertEqual(type(ccRD + rd), type(ccRD),
|
||||
msg='Addition does not inherit type.')
|
||||
|
||||
self.assertEqual(type(ccRD - rd), type(ccRD),
|
||||
msg='Subtraction does not inherit type.')
|
||||
|
||||
self.assertEqual(type(-ccRD), type(ccRD),
|
||||
msg='Negation does not inherit type.')
|
||||
|
||||
self.assertEqual(type(ccRD * 5.0), type(ccRD),
|
||||
msg='Multiplication does not inherit type.')
|
||||
|
||||
self.assertEqual(type(ccRD / 5.0), type(ccRD),
|
||||
msg='Division does not inherit type.')
|
||||
|
||||
def testMonthEndMonthBeginning(self):
|
||||
self.assertEqual(relativedelta(datetime(2003, 1, 31, 23, 59, 59),
|
||||
datetime(2003, 3, 1, 0, 0, 0)),
|
||||
relativedelta(months=-1, seconds=-1))
|
||||
|
||||
self.assertEqual(relativedelta(datetime(2003, 3, 1, 0, 0, 0),
|
||||
datetime(2003, 1, 31, 23, 59, 59)),
|
||||
relativedelta(months=1, seconds=1))
|
||||
|
||||
def testMonthEndMonthBeginningLeapYear(self):
|
||||
self.assertEqual(relativedelta(datetime(2012, 1, 31, 23, 59, 59),
|
||||
datetime(2012, 3, 1, 0, 0, 0)),
|
||||
relativedelta(months=-1, seconds=-1))
|
||||
|
||||
self.assertEqual(relativedelta(datetime(2003, 3, 1, 0, 0, 0),
|
||||
datetime(2003, 1, 31, 23, 59, 59)),
|
||||
relativedelta(months=1, seconds=1))
|
||||
|
||||
def testNextMonth(self):
|
||||
self.assertEqual(self.now+relativedelta(months=+1),
|
||||
datetime(2003, 10, 17, 20, 54, 47, 282310))
|
||||
|
||||
def testNextMonthPlusOneWeek(self):
|
||||
self.assertEqual(self.now+relativedelta(months=+1, weeks=+1),
|
||||
datetime(2003, 10, 24, 20, 54, 47, 282310))
|
||||
|
||||
def testNextMonthPlusOneWeek10am(self):
|
||||
self.assertEqual(self.today +
|
||||
relativedelta(months=+1, weeks=+1, hour=10),
|
||||
datetime(2003, 10, 24, 10, 0))
|
||||
|
||||
def testNextMonthPlusOneWeek10amDiff(self):
|
||||
self.assertEqual(relativedelta(datetime(2003, 10, 24, 10, 0),
|
||||
self.today),
|
||||
relativedelta(months=+1, days=+7, hours=+10))
|
||||
|
||||
def testOneMonthBeforeOneYear(self):
|
||||
self.assertEqual(self.now+relativedelta(years=+1, months=-1),
|
||||
datetime(2004, 8, 17, 20, 54, 47, 282310))
|
||||
|
||||
def testMonthsOfDiffNumOfDays(self):
|
||||
self.assertEqual(date(2003, 1, 27)+relativedelta(months=+1),
|
||||
date(2003, 2, 27))
|
||||
self.assertEqual(date(2003, 1, 31)+relativedelta(months=+1),
|
||||
date(2003, 2, 28))
|
||||
self.assertEqual(date(2003, 1, 31)+relativedelta(months=+2),
|
||||
date(2003, 3, 31))
|
||||
|
||||
def testMonthsOfDiffNumOfDaysWithYears(self):
|
||||
self.assertEqual(date(2000, 2, 28)+relativedelta(years=+1),
|
||||
date(2001, 2, 28))
|
||||
self.assertEqual(date(2000, 2, 29)+relativedelta(years=+1),
|
||||
date(2001, 2, 28))
|
||||
|
||||
self.assertEqual(date(1999, 2, 28)+relativedelta(years=+1),
|
||||
date(2000, 2, 28))
|
||||
self.assertEqual(date(1999, 3, 1)+relativedelta(years=+1),
|
||||
date(2000, 3, 1))
|
||||
self.assertEqual(date(1999, 3, 1)+relativedelta(years=+1),
|
||||
date(2000, 3, 1))
|
||||
|
||||
self.assertEqual(date(2001, 2, 28)+relativedelta(years=-1),
|
||||
date(2000, 2, 28))
|
||||
self.assertEqual(date(2001, 3, 1)+relativedelta(years=-1),
|
||||
date(2000, 3, 1))
|
||||
|
||||
def testNextFriday(self):
|
||||
self.assertEqual(self.today+relativedelta(weekday=FR),
|
||||
date(2003, 9, 19))
|
||||
|
||||
def testNextFridayInt(self):
|
||||
self.assertEqual(self.today+relativedelta(weekday=calendar.FRIDAY),
|
||||
date(2003, 9, 19))
|
||||
|
||||
def testLastFridayInThisMonth(self):
|
||||
self.assertEqual(self.today+relativedelta(day=31, weekday=FR(-1)),
|
||||
date(2003, 9, 26))
|
||||
|
||||
def testNextWednesdayIsToday(self):
|
||||
self.assertEqual(self.today+relativedelta(weekday=WE),
|
||||
date(2003, 9, 17))
|
||||
|
||||
def testNextWenesdayNotToday(self):
|
||||
self.assertEqual(self.today+relativedelta(days=+1, weekday=WE),
|
||||
date(2003, 9, 24))
|
||||
|
||||
def test15thISOYearWeek(self):
|
||||
self.assertEqual(date(2003, 1, 1) +
|
||||
relativedelta(day=4, weeks=+14, weekday=MO(-1)),
|
||||
date(2003, 4, 7))
|
||||
|
||||
def testMillenniumAge(self):
|
||||
self.assertEqual(relativedelta(self.now, date(2001, 1, 1)),
|
||||
relativedelta(years=+2, months=+8, days=+16,
|
||||
hours=+20, minutes=+54, seconds=+47,
|
||||
microseconds=+282310))
|
||||
|
||||
def testJohnAge(self):
|
||||
self.assertEqual(relativedelta(self.now,
|
||||
datetime(1978, 4, 5, 12, 0)),
|
||||
relativedelta(years=+25, months=+5, days=+12,
|
||||
hours=+8, minutes=+54, seconds=+47,
|
||||
microseconds=+282310))
|
||||
|
||||
def testJohnAgeWithDate(self):
|
||||
self.assertEqual(relativedelta(self.today,
|
||||
datetime(1978, 4, 5, 12, 0)),
|
||||
relativedelta(years=+25, months=+5, days=+11,
|
||||
hours=+12))
|
||||
|
||||
def testYearDay(self):
|
||||
self.assertEqual(date(2003, 1, 1)+relativedelta(yearday=260),
|
||||
date(2003, 9, 17))
|
||||
self.assertEqual(date(2002, 1, 1)+relativedelta(yearday=260),
|
||||
date(2002, 9, 17))
|
||||
self.assertEqual(date(2000, 1, 1)+relativedelta(yearday=260),
|
||||
date(2000, 9, 16))
|
||||
self.assertEqual(self.today+relativedelta(yearday=261),
|
||||
date(2003, 9, 18))
|
||||
|
||||
def testYearDayBug(self):
|
||||
# Tests a problem reported by Adam Ryan.
|
||||
self.assertEqual(date(2010, 1, 1)+relativedelta(yearday=15),
|
||||
date(2010, 1, 15))
|
||||
|
||||
def testNonLeapYearDay(self):
|
||||
self.assertEqual(date(2003, 1, 1)+relativedelta(nlyearday=260),
|
||||
date(2003, 9, 17))
|
||||
self.assertEqual(date(2002, 1, 1)+relativedelta(nlyearday=260),
|
||||
date(2002, 9, 17))
|
||||
self.assertEqual(date(2000, 1, 1)+relativedelta(nlyearday=260),
|
||||
date(2000, 9, 17))
|
||||
self.assertEqual(self.today+relativedelta(yearday=261),
|
||||
date(2003, 9, 18))
|
||||
|
||||
def testAddition(self):
|
||||
self.assertEqual(relativedelta(days=10) +
|
||||
relativedelta(years=1, months=2, days=3, hours=4,
|
||||
minutes=5, microseconds=6),
|
||||
relativedelta(years=1, months=2, days=13, hours=4,
|
||||
minutes=5, microseconds=6))
|
||||
|
||||
def testAbsoluteAddition(self):
|
||||
self.assertEqual(relativedelta() + relativedelta(day=0, hour=0),
|
||||
relativedelta(day=0, hour=0))
|
||||
self.assertEqual(relativedelta(day=0, hour=0) + relativedelta(),
|
||||
relativedelta(day=0, hour=0))
|
||||
|
||||
def testAdditionToDatetime(self):
|
||||
self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=1),
|
||||
datetime(2000, 1, 2))
|
||||
|
||||
def testRightAdditionToDatetime(self):
|
||||
self.assertEqual(relativedelta(days=1) + datetime(2000, 1, 1),
|
||||
datetime(2000, 1, 2))
|
||||
|
||||
def testAdditionInvalidType(self):
|
||||
with self.assertRaises(TypeError):
|
||||
relativedelta(days=3) + 9
|
||||
|
||||
def testAdditionUnsupportedType(self):
|
||||
# For unsupported types that define their own comparators, etc.
|
||||
self.assertIs(relativedelta(days=1) + NotAValue, NotAValue)
|
||||
|
||||
def testAdditionFloatValue(self):
|
||||
self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=float(1)),
|
||||
datetime(2000, 1, 2))
|
||||
self.assertEqual(datetime(2000, 1, 1) + relativedelta(months=float(1)),
|
||||
datetime(2000, 2, 1))
|
||||
self.assertEqual(datetime(2000, 1, 1) + relativedelta(years=float(1)),
|
||||
datetime(2001, 1, 1))
|
||||
|
||||
def testAdditionFloatFractionals(self):
|
||||
self.assertEqual(datetime(2000, 1, 1, 0) +
|
||||
relativedelta(days=float(0.5)),
|
||||
datetime(2000, 1, 1, 12))
|
||||
self.assertEqual(datetime(2000, 1, 1, 0, 0) +
|
||||
relativedelta(hours=float(0.5)),
|
||||
datetime(2000, 1, 1, 0, 30))
|
||||
self.assertEqual(datetime(2000, 1, 1, 0, 0, 0) +
|
||||
relativedelta(minutes=float(0.5)),
|
||||
datetime(2000, 1, 1, 0, 0, 30))
|
||||
self.assertEqual(datetime(2000, 1, 1, 0, 0, 0, 0) +
|
||||
relativedelta(seconds=float(0.5)),
|
||||
datetime(2000, 1, 1, 0, 0, 0, 500000))
|
||||
self.assertEqual(datetime(2000, 1, 1, 0, 0, 0, 0) +
|
||||
relativedelta(microseconds=float(500000.25)),
|
||||
datetime(2000, 1, 1, 0, 0, 0, 500000))
|
||||
|
||||
def testSubtraction(self):
|
||||
self.assertEqual(relativedelta(days=10) -
|
||||
relativedelta(years=1, months=2, days=3, hours=4,
|
||||
minutes=5, microseconds=6),
|
||||
relativedelta(years=-1, months=-2, days=7, hours=-4,
|
||||
minutes=-5, microseconds=-6))
|
||||
|
||||
def testRightSubtractionFromDatetime(self):
|
||||
self.assertEqual(datetime(2000, 1, 2) - relativedelta(days=1),
|
||||
datetime(2000, 1, 1))
|
||||
|
||||
def testSubractionWithDatetime(self):
|
||||
self.assertRaises(TypeError, lambda x, y: x - y,
|
||||
(relativedelta(days=1), datetime(2000, 1, 1)))
|
||||
|
||||
def testSubtractionInvalidType(self):
|
||||
with self.assertRaises(TypeError):
|
||||
relativedelta(hours=12) - 14
|
||||
|
||||
def testSubtractionUnsupportedType(self):
|
||||
self.assertIs(relativedelta(days=1) + NotAValue, NotAValue)
|
||||
|
||||
def testMultiplication(self):
|
||||
self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=1) * 28,
|
||||
datetime(2000, 1, 29))
|
||||
self.assertEqual(datetime(2000, 1, 1) + 28 * relativedelta(days=1),
|
||||
datetime(2000, 1, 29))
|
||||
|
||||
def testMultiplicationUnsupportedType(self):
|
||||
self.assertIs(relativedelta(days=1) * NotAValue, NotAValue)
|
||||
|
||||
def testDivision(self):
|
||||
self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=28) / 28,
|
||||
datetime(2000, 1, 2))
|
||||
|
||||
def testDivisionUnsupportedType(self):
|
||||
self.assertIs(relativedelta(days=1) / NotAValue, NotAValue)
|
||||
|
||||
def testBoolean(self):
|
||||
self.assertFalse(relativedelta(days=0))
|
||||
self.assertTrue(relativedelta(days=1))
|
||||
|
||||
def testAbsoluteValueNegative(self):
|
||||
rd_base = relativedelta(years=-1, months=-5, days=-2, hours=-3,
|
||||
minutes=-5, seconds=-2, microseconds=-12)
|
||||
rd_expected = relativedelta(years=1, months=5, days=2, hours=3,
|
||||
minutes=5, seconds=2, microseconds=12)
|
||||
self.assertEqual(abs(rd_base), rd_expected)
|
||||
|
||||
def testAbsoluteValuePositive(self):
|
||||
rd_base = relativedelta(years=1, months=5, days=2, hours=3,
|
||||
minutes=5, seconds=2, microseconds=12)
|
||||
rd_expected = rd_base
|
||||
|
||||
self.assertEqual(abs(rd_base), rd_expected)
|
||||
|
||||
def testComparison(self):
|
||||
d1 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1,
|
||||
minutes=1, seconds=1, microseconds=1)
|
||||
d2 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1,
|
||||
minutes=1, seconds=1, microseconds=1)
|
||||
d3 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1,
|
||||
minutes=1, seconds=1, microseconds=2)
|
||||
|
||||
self.assertEqual(d1, d2)
|
||||
self.assertNotEqual(d1, d3)
|
||||
|
||||
def testInequalityTypeMismatch(self):
|
||||
# Different type
|
||||
self.assertFalse(relativedelta(year=1) == 19)
|
||||
|
||||
def testInequalityUnsupportedType(self):
|
||||
self.assertIs(relativedelta(hours=3) == NotAValue, NotAValue)
|
||||
|
||||
def testInequalityWeekdays(self):
|
||||
# Different weekdays
|
||||
no_wday = relativedelta(year=1997, month=4)
|
||||
wday_mo_1 = relativedelta(year=1997, month=4, weekday=MO(+1))
|
||||
wday_mo_2 = relativedelta(year=1997, month=4, weekday=MO(+2))
|
||||
wday_tu = relativedelta(year=1997, month=4, weekday=TU)
|
||||
|
||||
self.assertTrue(wday_mo_1 == wday_mo_1)
|
||||
|
||||
self.assertFalse(no_wday == wday_mo_1)
|
||||
self.assertFalse(wday_mo_1 == no_wday)
|
||||
|
||||
self.assertFalse(wday_mo_1 == wday_mo_2)
|
||||
self.assertFalse(wday_mo_2 == wday_mo_1)
|
||||
|
||||
self.assertFalse(wday_mo_1 == wday_tu)
|
||||
self.assertFalse(wday_tu == wday_mo_1)
|
||||
|
||||
def testMonthOverflow(self):
|
||||
self.assertEqual(relativedelta(months=273),
|
||||
relativedelta(years=22, months=9))
|
||||
|
||||
def testWeeks(self):
|
||||
# Test that the weeks property is working properly.
|
||||
rd = relativedelta(years=4, months=2, weeks=8, days=6)
|
||||
self.assertEqual((rd.weeks, rd.days), (8, 8 * 7 + 6))
|
||||
|
||||
rd.weeks = 3
|
||||
self.assertEqual((rd.weeks, rd.days), (3, 3 * 7 + 6))
|
||||
|
||||
def testRelativeDeltaRepr(self):
|
||||
self.assertEqual(repr(relativedelta(years=1, months=-1, days=15)),
|
||||
'relativedelta(years=+1, months=-1, days=+15)')
|
||||
|
||||
self.assertEqual(repr(relativedelta(months=14, seconds=-25)),
|
||||
'relativedelta(years=+1, months=+2, seconds=-25)')
|
||||
|
||||
self.assertEqual(repr(relativedelta(month=3, hour=3, weekday=SU(3))),
|
||||
'relativedelta(month=3, weekday=SU(+3), hour=3)')
|
||||
|
||||
def testRelativeDeltaFractionalYear(self):
|
||||
with self.assertRaises(ValueError):
|
||||
relativedelta(years=1.5)
|
||||
|
||||
def testRelativeDeltaFractionalMonth(self):
|
||||
with self.assertRaises(ValueError):
|
||||
relativedelta(months=1.5)
|
||||
|
||||
def testRelativeDeltaFractionalAbsolutes(self):
|
||||
# Fractional absolute values will soon be unsupported,
|
||||
# check for the deprecation warning.
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(year=2.86)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(month=1.29)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(day=0.44)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(hour=23.98)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(minute=45.21)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(second=13.2)
|
||||
|
||||
with self.assertWarns(DeprecationWarning):
|
||||
relativedelta(microsecond=157221.93)
|
||||
|
||||
def testRelativeDeltaFractionalRepr(self):
|
||||
rd = relativedelta(years=3, months=-2, days=1.25)
|
||||
|
||||
self.assertEqual(repr(rd),
|
||||
'relativedelta(years=+3, months=-2, days=+1.25)')
|
||||
|
||||
rd = relativedelta(hours=0.5, seconds=9.22)
|
||||
self.assertEqual(repr(rd),
|
||||
'relativedelta(hours=+0.5, seconds=+9.22)')
|
||||
|
||||
def testRelativeDeltaFractionalWeeks(self):
|
||||
# Equivalent to days=8, hours=18
|
||||
rd = relativedelta(weeks=1.25)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd,
|
||||
datetime(2009, 9, 11, 18))
|
||||
|
||||
def testRelativeDeltaFractionalDays(self):
|
||||
rd1 = relativedelta(days=1.48)
|
||||
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd1,
|
||||
datetime(2009, 9, 4, 11, 31, 12))
|
||||
|
||||
rd2 = relativedelta(days=1.5)
|
||||
self.assertEqual(d1 + rd2,
|
||||
datetime(2009, 9, 4, 12, 0, 0))
|
||||
|
||||
def testRelativeDeltaFractionalHours(self):
|
||||
rd = relativedelta(days=1, hours=12.5)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd,
|
||||
datetime(2009, 9, 4, 12, 30, 0))
|
||||
|
||||
def testRelativeDeltaFractionalMinutes(self):
|
||||
rd = relativedelta(hours=1, minutes=30.5)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd,
|
||||
datetime(2009, 9, 3, 1, 30, 30))
|
||||
|
||||
def testRelativeDeltaFractionalSeconds(self):
|
||||
rd = relativedelta(hours=5, minutes=30, seconds=30.5)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd,
|
||||
datetime(2009, 9, 3, 5, 30, 30, 500000))
|
||||
|
||||
def testRelativeDeltaFractionalPositiveOverflow(self):
|
||||
# Equivalent to (days=1, hours=14)
|
||||
rd1 = relativedelta(days=1.5, hours=2)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd1,
|
||||
datetime(2009, 9, 4, 14, 0, 0))
|
||||
|
||||
# Equivalent to (days=1, hours=14, minutes=45)
|
||||
rd2 = relativedelta(days=1.5, hours=2.5, minutes=15)
|
||||
d1 = datetime(2009, 9, 3, 0, 0)
|
||||
self.assertEqual(d1 + rd2,
|
||||
datetime(2009, 9, 4, 14, 45))
|
||||
|
||||
# Carry back up - equivalent to (days=2, hours=2, minutes=0, seconds=1)
|
||||
rd3 = relativedelta(days=1.5, hours=13, minutes=59.5, seconds=31)
|
||||
self.assertEqual(d1 + rd3,
|
||||
datetime(2009, 9, 5, 2, 0, 1))
|
||||
|
||||
def testRelativeDeltaFractionalNegativeDays(self):
|
||||
# Equivalent to (days=-1, hours=-1)
|
||||
rd1 = relativedelta(days=-1.5, hours=11)
|
||||
d1 = datetime(2009, 9, 3, 12, 0)
|
||||
self.assertEqual(d1 + rd1,
|
||||
datetime(2009, 9, 2, 11, 0, 0))
|
||||
|
||||
# Equivalent to (days=-1, hours=-9)
|
||||
rd2 = relativedelta(days=-1.25, hours=-3)
|
||||
self.assertEqual(d1 + rd2,
|
||||
datetime(2009, 9, 2, 3))
|
||||
|
||||
def testRelativeDeltaNormalizeFractionalDays(self):
|
||||
# Equivalent to (days=2, hours=18)
|
||||
rd1 = relativedelta(days=2.75)
|
||||
|
||||
self.assertEqual(rd1.normalized(), relativedelta(days=2, hours=18))
|
||||
|
||||
# Equvalent to (days=1, hours=11, minutes=31, seconds=12)
|
||||
rd2 = relativedelta(days=1.48)
|
||||
|
||||
self.assertEqual(rd2.normalized(),
|
||||
relativedelta(days=1, hours=11, minutes=31, seconds=12))
|
||||
|
||||
def testRelativeDeltaNormalizeFractionalDays2(self):
|
||||
# Equivalent to (hours=1, minutes=30)
|
||||
rd1 = relativedelta(hours=1.5)
|
||||
|
||||
self.assertEqual(rd1.normalized(), relativedelta(hours=1, minutes=30))
|
||||
|
||||
# Equivalent to (hours=3, minutes=17, seconds=5, microseconds=100)
|
||||
rd2 = relativedelta(hours=3.28472225)
|
||||
|
||||
self.assertEqual(rd2.normalized(),
|
||||
relativedelta(hours=3, minutes=17, seconds=5, microseconds=100))
|
||||
|
||||
def testRelativeDeltaNormalizeFractionalMinutes(self):
|
||||
# Equivalent to (minutes=15, seconds=36)
|
||||
rd1 = relativedelta(minutes=15.6)
|
||||
|
||||
self.assertEqual(rd1.normalized(),
|
||||
relativedelta(minutes=15, seconds=36))
|
||||
|
||||
# Equivalent to (minutes=25, seconds=20, microseconds=25000)
|
||||
rd2 = relativedelta(minutes=25.33375)
|
||||
|
||||
self.assertEqual(rd2.normalized(),
|
||||
relativedelta(minutes=25, seconds=20, microseconds=25000))
|
||||
|
||||
def testRelativeDeltaNormalizeFractionalSeconds(self):
|
||||
# Equivalent to (seconds=45, microseconds=25000)
|
||||
rd1 = relativedelta(seconds=45.025)
|
||||
self.assertEqual(rd1.normalized(),
|
||||
relativedelta(seconds=45, microseconds=25000))
|
||||
|
||||
def testRelativeDeltaFractionalPositiveOverflow2(self):
|
||||
# Equivalent to (days=1, hours=14)
|
||||
rd1 = relativedelta(days=1.5, hours=2)
|
||||
self.assertEqual(rd1.normalized(),
|
||||
relativedelta(days=1, hours=14))
|
||||
|
||||
# Equivalent to (days=1, hours=14, minutes=45)
|
||||
rd2 = relativedelta(days=1.5, hours=2.5, minutes=15)
|
||||
self.assertEqual(rd2.normalized(),
|
||||
relativedelta(days=1, hours=14, minutes=45))
|
||||
|
||||
# Carry back up - equivalent to:
|
||||
# (days=2, hours=2, minutes=0, seconds=2, microseconds=3)
|
||||
rd3 = relativedelta(days=1.5, hours=13, minutes=59.50045,
|
||||
seconds=31.473, microseconds=500003)
|
||||
self.assertEqual(rd3.normalized(),
|
||||
relativedelta(days=2, hours=2, minutes=0,
|
||||
seconds=2, microseconds=3))
|
||||
|
||||
def testRelativeDeltaFractionalNegativeOverflow(self):
|
||||
# Equivalent to (days=-1)
|
||||
rd1 = relativedelta(days=-0.5, hours=-12)
|
||||
self.assertEqual(rd1.normalized(),
|
||||
relativedelta(days=-1))
|
||||
|
||||
# Equivalent to (days=-1)
|
||||
rd2 = relativedelta(days=-1.5, hours=12)
|
||||
self.assertEqual(rd2.normalized(),
|
||||
relativedelta(days=-1))
|
||||
|
||||
# Equivalent to (days=-1, hours=-14, minutes=-45)
|
||||
rd3 = relativedelta(days=-1.5, hours=-2.5, minutes=-15)
|
||||
self.assertEqual(rd3.normalized(),
|
||||
relativedelta(days=-1, hours=-14, minutes=-45))
|
||||
|
||||
# Equivalent to (days=-1, hours=-14, minutes=+15)
|
||||
rd4 = relativedelta(days=-1.5, hours=-2.5, minutes=45)
|
||||
self.assertEqual(rd4.normalized(),
|
||||
relativedelta(days=-1, hours=-14, minutes=+15))
|
||||
|
||||
# Carry back up - equivalent to:
|
||||
# (days=-2, hours=-2, minutes=0, seconds=-2, microseconds=-3)
|
||||
rd3 = relativedelta(days=-1.5, hours=-13, minutes=-59.50045,
|
||||
seconds=-31.473, microseconds=-500003)
|
||||
self.assertEqual(rd3.normalized(),
|
||||
relativedelta(days=-2, hours=-2, minutes=0,
|
||||
seconds=-2, microseconds=-3))
|
||||
|
||||
def testInvalidYearDay(self):
|
||||
with self.assertRaises(ValueError):
|
||||
relativedelta(yearday=367)
|
||||
|
||||
def testAddTimedeltaToUnpopulatedRelativedelta(self):
|
||||
td = timedelta(
|
||||
days=1,
|
||||
seconds=1,
|
||||
microseconds=1,
|
||||
milliseconds=1,
|
||||
minutes=1,
|
||||
hours=1,
|
||||
weeks=1
|
||||
)
|
||||
|
||||
expected = relativedelta(
|
||||
weeks=1,
|
||||
days=1,
|
||||
hours=1,
|
||||
minutes=1,
|
||||
seconds=1,
|
||||
microseconds=1001
|
||||
)
|
||||
|
||||
self.assertEqual(expected, relativedelta() + td)
|
||||
|
||||
def testAddTimedeltaToPopulatedRelativeDelta(self):
|
||||
td = timedelta(
|
||||
days=1,
|
||||
seconds=1,
|
||||
microseconds=1,
|
||||
milliseconds=1,
|
||||
minutes=1,
|
||||
hours=1,
|
||||
weeks=1
|
||||
)
|
||||
|
||||
rd = relativedelta(
|
||||
year=1,
|
||||
month=1,
|
||||
day=1,
|
||||
hour=1,
|
||||
minute=1,
|
||||
second=1,
|
||||
microsecond=1,
|
||||
years=1,
|
||||
months=1,
|
||||
days=1,
|
||||
weeks=1,
|
||||
hours=1,
|
||||
minutes=1,
|
||||
seconds=1,
|
||||
microseconds=1
|
||||
)
|
||||
|
||||
expected = relativedelta(
|
||||
year=1,
|
||||
month=1,
|
||||
day=1,
|
||||
hour=1,
|
||||
minute=1,
|
||||
second=1,
|
||||
microsecond=1,
|
||||
years=1,
|
||||
months=1,
|
||||
weeks=2,
|
||||
days=2,
|
||||
hours=2,
|
||||
minutes=2,
|
||||
seconds=2,
|
||||
microseconds=1002,
|
||||
)
|
||||
|
||||
self.assertEqual(expected, rd + td)
|
||||
|
||||
def testHashable(self):
|
||||
try:
|
||||
{relativedelta(minute=1): 'test'}
|
||||
except:
|
||||
self.fail("relativedelta() failed to hash!")
|
||||
|
||||
|
||||
class RelativeDeltaWeeksPropertyGetterTest(unittest.TestCase):
|
||||
"""Test the weeks property getter"""
|
||||
|
||||
def test_one_day(self):
|
||||
rd = relativedelta(days=1)
|
||||
self.assertEqual(rd.days, 1)
|
||||
self.assertEqual(rd.weeks, 0)
|
||||
|
||||
def test_minus_one_day(self):
|
||||
rd = relativedelta(days=-1)
|
||||
self.assertEqual(rd.days, -1)
|
||||
self.assertEqual(rd.weeks, 0)
|
||||
|
||||
def test_height_days(self):
|
||||
rd = relativedelta(days=8)
|
||||
self.assertEqual(rd.days, 8)
|
||||
self.assertEqual(rd.weeks, 1)
|
||||
|
||||
def test_minus_height_days(self):
|
||||
rd = relativedelta(days=-8)
|
||||
self.assertEqual(rd.days, -8)
|
||||
self.assertEqual(rd.weeks, -1)
|
||||
|
||||
|
||||
class RelativeDeltaWeeksPropertySetterTest(unittest.TestCase):
|
||||
"""Test the weeks setter which makes a "smart" update of the days attribute"""
|
||||
|
||||
def test_one_day_set_one_week(self):
|
||||
rd = relativedelta(days=1)
|
||||
rd.weeks = 1 # add 7 days
|
||||
self.assertEqual(rd.days, 8)
|
||||
self.assertEqual(rd.weeks, 1)
|
||||
|
||||
def test_minus_one_day_set_one_week(self):
|
||||
rd = relativedelta(days=-1)
|
||||
rd.weeks = 1 # add 7 days
|
||||
self.assertEqual(rd.days, 6)
|
||||
self.assertEqual(rd.weeks, 0)
|
||||
|
||||
def test_height_days_set_minus_one_week(self):
|
||||
rd = relativedelta(days=8)
|
||||
rd.weeks = -1 # change from 1 week, 1 day to -1 week, 1 day
|
||||
self.assertEqual(rd.days, -6)
|
||||
self.assertEqual(rd.weeks, 0)
|
||||
|
||||
def test_minus_height_days_set_minus_one_week(self):
|
||||
rd = relativedelta(days=-8)
|
||||
rd.weeks = -1 # does not change anything
|
||||
self.assertEqual(rd.days, -8)
|
||||
self.assertEqual(rd.weeks, -1)
|
||||
|
||||
|
||||
# vim:ts=4:sw=4:et
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1,53 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
from datetime import timedelta, datetime
|
||||
|
||||
import unittest
|
||||
|
||||
from dateutil import tz
|
||||
from dateutil import utils
|
||||
from dateutil.utils import within_delta
|
||||
|
||||
from freezegun import freeze_time
|
||||
|
||||
UTC = tz.tzutc()
|
||||
NYC = tz.gettz("America/New_York")
|
||||
|
||||
|
||||
class UtilsTest(unittest.TestCase):
|
||||
@freeze_time(datetime(2014, 12, 15, 1, 21, 33, 4003))
|
||||
def testToday(self):
|
||||
self.assertEqual(utils.today(), datetime(2014, 12, 15, 0, 0, 0))
|
||||
|
||||
@freeze_time(datetime(2014, 12, 15, 12), tz_offset=5)
|
||||
def testTodayTzInfo(self):
|
||||
self.assertEqual(utils.today(NYC),
|
||||
datetime(2014, 12, 15, 0, 0, 0, tzinfo=NYC))
|
||||
|
||||
@freeze_time(datetime(2014, 12, 15, 23), tz_offset=5)
|
||||
def testTodayTzInfoDifferentDay(self):
|
||||
self.assertEqual(utils.today(UTC),
|
||||
datetime(2014, 12, 16, 0, 0, 0, tzinfo=UTC))
|
||||
|
||||
def testDefaultTZInfoNaive(self):
|
||||
dt = datetime(2014, 9, 14, 9, 30)
|
||||
self.assertIs(utils.default_tzinfo(dt, NYC).tzinfo,
|
||||
NYC)
|
||||
|
||||
def testDefaultTZInfoAware(self):
|
||||
dt = datetime(2014, 9, 14, 9, 30, tzinfo=UTC)
|
||||
self.assertIs(utils.default_tzinfo(dt, NYC).tzinfo,
|
||||
UTC)
|
||||
|
||||
def testWithinDelta(self):
|
||||
d1 = datetime(2016, 1, 1, 12, 14, 1, 9)
|
||||
d2 = d1.replace(microsecond=15)
|
||||
|
||||
self.assertTrue(within_delta(d1, d2, timedelta(seconds=1)))
|
||||
self.assertFalse(within_delta(d1, d2, timedelta(microseconds=1)))
|
||||
|
||||
def testWithinDeltaWithNegativeDelta(self):
|
||||
d1 = datetime(2016, 1, 1)
|
||||
d2 = datetime(2015, 12, 31)
|
||||
|
||||
self.assertTrue(within_delta(d2, d1, timedelta(days=-1)))
|
|
@ -1,17 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from .tz import *
|
||||
from .tz import __doc__
|
||||
|
||||
#: Convenience constant providing a :class:`tzutc()` instance
|
||||
#:
|
||||
#: .. versionadded:: 2.7.0
|
||||
UTC = tzutc()
|
||||
|
||||
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
|
||||
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz",
|
||||
"enfold", "datetime_ambiguous", "datetime_exists",
|
||||
"resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"]
|
||||
|
||||
|
||||
class DeprecatedTzFormatWarning(Warning):
|
||||
"""Warning raised when time zones are parsed from deprecated formats."""
|
|
@ -1,415 +0,0 @@
|
|||
from ..six import PY3
|
||||
|
||||
from functools import wraps
|
||||
|
||||
from datetime import datetime, timedelta, tzinfo
|
||||
|
||||
|
||||
ZERO = timedelta(0)
|
||||
|
||||
__all__ = ['tzname_in_python2', 'enfold']
|
||||
|
||||
|
||||
def tzname_in_python2(namefunc):
|
||||
"""Change unicode output into bytestrings in Python 2
|
||||
|
||||
tzname() API changed in Python 3. It used to return bytes, but was changed
|
||||
to unicode strings
|
||||
"""
|
||||
def adjust_encoding(*args, **kwargs):
|
||||
name = namefunc(*args, **kwargs)
|
||||
if name is not None and not PY3:
|
||||
name = name.encode()
|
||||
|
||||
return name
|
||||
|
||||
return adjust_encoding
|
||||
|
||||
|
||||
# The following is adapted from Alexander Belopolsky's tz library
|
||||
# https://github.com/abalkin/tz
|
||||
if hasattr(datetime, 'fold'):
|
||||
# This is the pre-python 3.6 fold situation
|
||||
def enfold(dt, fold=1):
|
||||
"""
|
||||
Provides a unified interface for assigning the ``fold`` attribute to
|
||||
datetimes both before and after the implementation of PEP-495.
|
||||
|
||||
:param fold:
|
||||
The value for the ``fold`` attribute in the returned datetime. This
|
||||
should be either 0 or 1.
|
||||
|
||||
:return:
|
||||
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
|
||||
``fold`` for all versions of Python. In versions prior to
|
||||
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
|
||||
subclass of :py:class:`datetime.datetime` with the ``fold``
|
||||
attribute added, if ``fold`` is 1.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
"""
|
||||
return dt.replace(fold=fold)
|
||||
|
||||
else:
|
||||
class _DatetimeWithFold(datetime):
|
||||
"""
|
||||
This is a class designed to provide a PEP 495-compliant interface for
|
||||
Python versions before 3.6. It is used only for dates in a fold, so
|
||||
the ``fold`` attribute is fixed at ``1``.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
"""
|
||||
__slots__ = ()
|
||||
|
||||
def replace(self, *args, **kwargs):
|
||||
"""
|
||||
Return a datetime with the same attributes, except for those
|
||||
attributes given new values by whichever keyword arguments are
|
||||
specified. Note that tzinfo=None can be specified to create a naive
|
||||
datetime from an aware datetime with no conversion of date and time
|
||||
data.
|
||||
|
||||
This is reimplemented in ``_DatetimeWithFold`` because pypy3 will
|
||||
return a ``datetime.datetime`` even if ``fold`` is unchanged.
|
||||
"""
|
||||
argnames = (
|
||||
'year', 'month', 'day', 'hour', 'minute', 'second',
|
||||
'microsecond', 'tzinfo'
|
||||
)
|
||||
|
||||
for arg, argname in zip(args, argnames):
|
||||
if argname in kwargs:
|
||||
raise TypeError('Duplicate argument: {}'.format(argname))
|
||||
|
||||
kwargs[argname] = arg
|
||||
|
||||
for argname in argnames:
|
||||
if argname not in kwargs:
|
||||
kwargs[argname] = getattr(self, argname)
|
||||
|
||||
dt_class = self.__class__ if kwargs.get('fold', 1) else datetime
|
||||
|
||||
return dt_class(**kwargs)
|
||||
|
||||
@property
|
||||
def fold(self):
|
||||
return 1
|
||||
|
||||
def enfold(dt, fold=1):
|
||||
"""
|
||||
Provides a unified interface for assigning the ``fold`` attribute to
|
||||
datetimes both before and after the implementation of PEP-495.
|
||||
|
||||
:param fold:
|
||||
The value for the ``fold`` attribute in the returned datetime. This
|
||||
should be either 0 or 1.
|
||||
|
||||
:return:
|
||||
Returns an object for which ``getattr(dt, 'fold', 0)`` returns
|
||||
``fold`` for all versions of Python. In versions prior to
|
||||
Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
|
||||
subclass of :py:class:`datetime.datetime` with the ``fold``
|
||||
attribute added, if ``fold`` is 1.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
"""
|
||||
if getattr(dt, 'fold', 0) == fold:
|
||||
return dt
|
||||
|
||||
args = dt.timetuple()[:6]
|
||||
args += (dt.microsecond, dt.tzinfo)
|
||||
|
||||
if fold:
|
||||
return _DatetimeWithFold(*args)
|
||||
else:
|
||||
return datetime(*args)
|
||||
|
||||
|
||||
def _validate_fromutc_inputs(f):
|
||||
"""
|
||||
The CPython version of ``fromutc`` checks that the input is a ``datetime``
|
||||
object and that ``self`` is attached as its ``tzinfo``.
|
||||
"""
|
||||
@wraps(f)
|
||||
def fromutc(self, dt):
|
||||
if not isinstance(dt, datetime):
|
||||
raise TypeError("fromutc() requires a datetime argument")
|
||||
if dt.tzinfo is not self:
|
||||
raise ValueError("dt.tzinfo is not self")
|
||||
|
||||
return f(self, dt)
|
||||
|
||||
return fromutc
|
||||
|
||||
|
||||
class _tzinfo(tzinfo):
|
||||
"""
|
||||
Base class for all ``dateutil`` ``tzinfo`` objects.
|
||||
"""
|
||||
|
||||
def is_ambiguous(self, dt):
|
||||
"""
|
||||
Whether or not the "wall time" of a given datetime is ambiguous in this
|
||||
zone.
|
||||
|
||||
:param dt:
|
||||
A :py:class:`datetime.datetime`, naive or time zone aware.
|
||||
|
||||
|
||||
:return:
|
||||
Returns ``True`` if ambiguous, ``False`` otherwise.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
"""
|
||||
|
||||
dt = dt.replace(tzinfo=self)
|
||||
|
||||
wall_0 = enfold(dt, fold=0)
|
||||
wall_1 = enfold(dt, fold=1)
|
||||
|
||||
same_offset = wall_0.utcoffset() == wall_1.utcoffset()
|
||||
same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
|
||||
|
||||
return same_dt and not same_offset
|
||||
|
||||
def _fold_status(self, dt_utc, dt_wall):
|
||||
"""
|
||||
Determine the fold status of a "wall" datetime, given a representation
|
||||
of the same datetime as a (naive) UTC datetime. This is calculated based
|
||||
on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
|
||||
datetimes, and that this offset is the actual number of hours separating
|
||||
``dt_utc`` and ``dt_wall``.
|
||||
|
||||
:param dt_utc:
|
||||
Representation of the datetime as UTC
|
||||
|
||||
:param dt_wall:
|
||||
Representation of the datetime as "wall time". This parameter must
|
||||
either have a `fold` attribute or have a fold-naive
|
||||
:class:`datetime.tzinfo` attached, otherwise the calculation may
|
||||
fail.
|
||||
"""
|
||||
if self.is_ambiguous(dt_wall):
|
||||
delta_wall = dt_wall - dt_utc
|
||||
_fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
|
||||
else:
|
||||
_fold = 0
|
||||
|
||||
return _fold
|
||||
|
||||
def _fold(self, dt):
|
||||
return getattr(dt, 'fold', 0)
|
||||
|
||||
def _fromutc(self, dt):
|
||||
"""
|
||||
Given a timezone-aware datetime in a given timezone, calculates a
|
||||
timezone-aware datetime in a new timezone.
|
||||
|
||||
Since this is the one time that we *know* we have an unambiguous
|
||||
datetime object, we take this opportunity to determine whether the
|
||||
datetime is ambiguous and in a "fold" state (e.g. if it's the first
|
||||
occurence, chronologically, of the ambiguous datetime).
|
||||
|
||||
:param dt:
|
||||
A timezone-aware :class:`datetime.datetime` object.
|
||||
"""
|
||||
|
||||
# Re-implement the algorithm from Python's datetime.py
|
||||
dtoff = dt.utcoffset()
|
||||
if dtoff is None:
|
||||
raise ValueError("fromutc() requires a non-None utcoffset() "
|
||||
"result")
|
||||
|
||||
# The original datetime.py code assumes that `dst()` defaults to
|
||||
# zero during ambiguous times. PEP 495 inverts this presumption, so
|
||||
# for pre-PEP 495 versions of python, we need to tweak the algorithm.
|
||||
dtdst = dt.dst()
|
||||
if dtdst is None:
|
||||
raise ValueError("fromutc() requires a non-None dst() result")
|
||||
delta = dtoff - dtdst
|
||||
|
||||
dt += delta
|
||||
# Set fold=1 so we can default to being in the fold for
|
||||
# ambiguous dates.
|
||||
dtdst = enfold(dt, fold=1).dst()
|
||||
if dtdst is None:
|
||||
raise ValueError("fromutc(): dt.dst gave inconsistent "
|
||||
"results; cannot convert")
|
||||
return dt + dtdst
|
||||
|
||||
@_validate_fromutc_inputs
|
||||
def fromutc(self, dt):
|
||||
"""
|
||||
Given a timezone-aware datetime in a given timezone, calculates a
|
||||
timezone-aware datetime in a new timezone.
|
||||
|
||||
Since this is the one time that we *know* we have an unambiguous
|
||||
datetime object, we take this opportunity to determine whether the
|
||||
datetime is ambiguous and in a "fold" state (e.g. if it's the first
|
||||
occurance, chronologically, of the ambiguous datetime).
|
||||
|
||||
:param dt:
|
||||
A timezone-aware :class:`datetime.datetime` object.
|
||||
"""
|
||||
dt_wall = self._fromutc(dt)
|
||||
|
||||
# Calculate the fold status given the two datetimes.
|
||||
_fold = self._fold_status(dt, dt_wall)
|
||||
|
||||
# Set the default fold value for ambiguous dates
|
||||
return enfold(dt_wall, fold=_fold)
|
||||
|
||||
|
||||
class tzrangebase(_tzinfo):
|
||||
"""
|
||||
This is an abstract base class for time zones represented by an annual
|
||||
transition into and out of DST. Child classes should implement the following
|
||||
methods:
|
||||
|
||||
* ``__init__(self, *args, **kwargs)``
|
||||
* ``transitions(self, year)`` - this is expected to return a tuple of
|
||||
datetimes representing the DST on and off transitions in standard
|
||||
time.
|
||||
|
||||
A fully initialized ``tzrangebase`` subclass should also provide the
|
||||
following attributes:
|
||||
* ``hasdst``: Boolean whether or not the zone uses DST.
|
||||
* ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
|
||||
representing the respective UTC offsets.
|
||||
* ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
|
||||
abbreviations in DST and STD, respectively.
|
||||
* ``_hasdst``: Whether or not the zone has DST.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
"""
|
||||
def __init__(self):
|
||||
raise NotImplementedError('tzrangebase is an abstract base class')
|
||||
|
||||
def utcoffset(self, dt):
|
||||
isdst = self._isdst(dt)
|
||||
|
||||
if isdst is None:
|
||||
return None
|
||||
elif isdst:
|
||||
return self._dst_offset
|
||||
else:
|
||||
return self._std_offset
|
||||
|
||||
def dst(self, dt):
|
||||
isdst = self._isdst(dt)
|
||||
|
||||
if isdst is None:
|
||||
return None
|
||||
elif isdst:
|
||||
return self._dst_base_offset
|
||||
else:
|
||||
return ZERO
|
||||
|
||||
@tzname_in_python2
|
||||
def tzname(self, dt):
|
||||
if self._isdst(dt):
|
||||
return self._dst_abbr
|
||||
else:
|
||||
return self._std_abbr
|
||||
|
||||
def fromutc(self, dt):
|
||||
""" Given a datetime in UTC, return local time """
|
||||
if not isinstance(dt, datetime):
|
||||
raise TypeError("fromutc() requires a datetime argument")
|
||||
|
||||
if dt.tzinfo is not self:
|
||||
raise ValueError("dt.tzinfo is not self")
|
||||
|
||||
# Get transitions - if there are none, fixed offset
|
||||
transitions = self.transitions(dt.year)
|
||||
if transitions is None:
|
||||
return dt + self.utcoffset(dt)
|
||||
|
||||
# Get the transition times in UTC
|
||||
dston, dstoff = transitions
|
||||
|
||||
dston -= self._std_offset
|
||||
dstoff -= self._std_offset
|
||||
|
||||
utc_transitions = (dston, dstoff)
|
||||
dt_utc = dt.replace(tzinfo=None)
|
||||
|
||||
isdst = self._naive_isdst(dt_utc, utc_transitions)
|
||||
|
||||
if isdst:
|
||||
dt_wall = dt + self._dst_offset
|
||||
else:
|
||||
dt_wall = dt + self._std_offset
|
||||
|
||||
_fold = int(not isdst and self.is_ambiguous(dt_wall))
|
||||
|
||||
return enfold(dt_wall, fold=_fold)
|
||||
|
||||
def is_ambiguous(self, dt):
|
||||
"""
|
||||
Whether or not the "wall time" of a given datetime is ambiguous in this
|
||||
zone.
|
||||
|
||||
:param dt:
|
||||
A :py:class:`datetime.datetime`, naive or time zone aware.
|
||||
|
||||
|
||||
:return:
|
||||
Returns ``True`` if ambiguous, ``False`` otherwise.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
"""
|
||||
if not self.hasdst:
|
||||
return False
|
||||
|
||||
start, end = self.transitions(dt.year)
|
||||
|
||||
dt = dt.replace(tzinfo=None)
|
||||
return (end <= dt < end + self._dst_base_offset)
|
||||
|
||||
def _isdst(self, dt):
|
||||
if not self.hasdst:
|
||||
return False
|
||||
elif dt is None:
|
||||
return None
|
||||
|
||||
transitions = self.transitions(dt.year)
|
||||
|
||||
if transitions is None:
|
||||
return False
|
||||
|
||||
dt = dt.replace(tzinfo=None)
|
||||
|
||||
isdst = self._naive_isdst(dt, transitions)
|
||||
|
||||
# Handle ambiguous dates
|
||||
if not isdst and self.is_ambiguous(dt):
|
||||
return not self._fold(dt)
|
||||
else:
|
||||
return isdst
|
||||
|
||||
def _naive_isdst(self, dt, transitions):
|
||||
dston, dstoff = transitions
|
||||
|
||||
dt = dt.replace(tzinfo=None)
|
||||
|
||||
if dston < dstoff:
|
||||
isdst = dston <= dt < dstoff
|
||||
else:
|
||||
isdst = not dstoff <= dt < dston
|
||||
|
||||
return isdst
|
||||
|
||||
@property
|
||||
def _dst_base_offset(self):
|
||||
return self._dst_offset - self._std_offset
|
||||
|
||||
__hash__ = None
|
||||
|
||||
def __ne__(self, other):
|
||||
return not (self == other)
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(...)" % self.__class__.__name__
|
||||
|
||||
__reduce__ = object.__reduce__
|
|
@ -1,49 +0,0 @@
|
|||
from datetime import timedelta
|
||||
|
||||
|
||||
class _TzSingleton(type):
|
||||
def __init__(cls, *args, **kwargs):
|
||||
cls.__instance = None
|
||||
super(_TzSingleton, cls).__init__(*args, **kwargs)
|
||||
|
||||
def __call__(cls):
|
||||
if cls.__instance is None:
|
||||
cls.__instance = super(_TzSingleton, cls).__call__()
|
||||
return cls.__instance
|
||||
|
||||
class _TzFactory(type):
|
||||
def instance(cls, *args, **kwargs):
|
||||
"""Alternate constructor that returns a fresh instance"""
|
||||
return type.__call__(cls, *args, **kwargs)
|
||||
|
||||
|
||||
class _TzOffsetFactory(_TzFactory):
|
||||
def __init__(cls, *args, **kwargs):
|
||||
cls.__instances = {}
|
||||
|
||||
def __call__(cls, name, offset):
|
||||
if isinstance(offset, timedelta):
|
||||
key = (name, offset.total_seconds())
|
||||
else:
|
||||
key = (name, offset)
|
||||
|
||||
instance = cls.__instances.get(key, None)
|
||||
if instance is None:
|
||||
instance = cls.__instances.setdefault(key,
|
||||
cls.instance(name, offset))
|
||||
return instance
|
||||
|
||||
|
||||
class _TzStrFactory(_TzFactory):
|
||||
def __init__(cls, *args, **kwargs):
|
||||
cls.__instances = {}
|
||||
|
||||
def __call__(cls, s, posix_offset=False):
|
||||
key = (s, posix_offset)
|
||||
instance = cls.__instances.get(key, None)
|
||||
|
||||
if instance is None:
|
||||
instance = cls.__instances.setdefault(key,
|
||||
cls.instance(s, posix_offset))
|
||||
return instance
|
||||
|
File diff suppressed because it is too large
Load diff
|
@ -1,331 +0,0 @@
|
|||
# This code was originally contributed by Jeffrey Harris.
|
||||
import datetime
|
||||
import struct
|
||||
|
||||
from six.moves import winreg
|
||||
from six import text_type
|
||||
|
||||
try:
|
||||
import ctypes
|
||||
from ctypes import wintypes
|
||||
except ValueError:
|
||||
# ValueError is raised on non-Windows systems for some horrible reason.
|
||||
raise ImportError("Running tzwin on non-Windows system")
|
||||
|
||||
from ._common import tzrangebase
|
||||
|
||||
__all__ = ["tzwin", "tzwinlocal", "tzres"]
|
||||
|
||||
ONEWEEK = datetime.timedelta(7)
|
||||
|
||||
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
|
||||
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
|
||||
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
|
||||
|
||||
|
||||
def _settzkeyname():
|
||||
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
|
||||
try:
|
||||
winreg.OpenKey(handle, TZKEYNAMENT).Close()
|
||||
TZKEYNAME = TZKEYNAMENT
|
||||
except WindowsError:
|
||||
TZKEYNAME = TZKEYNAME9X
|
||||
handle.Close()
|
||||
return TZKEYNAME
|
||||
|
||||
|
||||
TZKEYNAME = _settzkeyname()
|
||||
|
||||
|
||||
class tzres(object):
|
||||
"""
|
||||
Class for accessing `tzres.dll`, which contains timezone name related
|
||||
resources.
|
||||
|
||||
.. versionadded:: 2.5.0
|
||||
"""
|
||||
p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
|
||||
|
||||
def __init__(self, tzres_loc='tzres.dll'):
|
||||
# Load the user32 DLL so we can load strings from tzres
|
||||
user32 = ctypes.WinDLL('user32')
|
||||
|
||||
# Specify the LoadStringW function
|
||||
user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
|
||||
wintypes.UINT,
|
||||
wintypes.LPWSTR,
|
||||
ctypes.c_int)
|
||||
|
||||
self.LoadStringW = user32.LoadStringW
|
||||
self._tzres = ctypes.WinDLL(tzres_loc)
|
||||
self.tzres_loc = tzres_loc
|
||||
|
||||
def load_name(self, offset):
|
||||
"""
|
||||
Load a timezone name from a DLL offset (integer).
|
||||
|
||||
>>> from dateutil.tzwin import tzres
|
||||
>>> tzr = tzres()
|
||||
>>> print(tzr.load_name(112))
|
||||
'Eastern Standard Time'
|
||||
|
||||
:param offset:
|
||||
A positive integer value referring to a string from the tzres dll.
|
||||
|
||||
..note:
|
||||
Offsets found in the registry are generally of the form
|
||||
`@tzres.dll,-114`. The offset in this case if 114, not -114.
|
||||
|
||||
"""
|
||||
resource = self.p_wchar()
|
||||
lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
|
||||
nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
|
||||
return resource[:nchar]
|
||||
|
||||
def name_from_string(self, tzname_str):
|
||||
"""
|
||||
Parse strings as returned from the Windows registry into the time zone
|
||||
name as defined in the registry.
|
||||
|
||||
>>> from dateutil.tzwin import tzres
|
||||
>>> tzr = tzres()
|
||||
>>> print(tzr.name_from_string('@tzres.dll,-251'))
|
||||
'Dateline Daylight Time'
|
||||
>>> print(tzr.name_from_string('Eastern Standard Time'))
|
||||
'Eastern Standard Time'
|
||||
|
||||
:param tzname_str:
|
||||
A timezone name string as returned from a Windows registry key.
|
||||
|
||||
:return:
|
||||
Returns the localized timezone string from tzres.dll if the string
|
||||
is of the form `@tzres.dll,-offset`, else returns the input string.
|
||||
"""
|
||||
if not tzname_str.startswith('@'):
|
||||
return tzname_str
|
||||
|
||||
name_splt = tzname_str.split(',-')
|
||||
try:
|
||||
offset = int(name_splt[1])
|
||||
except:
|
||||
raise ValueError("Malformed timezone string.")
|
||||
|
||||
return self.load_name(offset)
|
||||
|
||||
|
||||
class tzwinbase(tzrangebase):
|
||||
"""tzinfo class based on win32's timezones available in the registry."""
|
||||
def __init__(self):
|
||||
raise NotImplementedError('tzwinbase is an abstract base class')
|
||||
|
||||
def __eq__(self, other):
|
||||
# Compare on all relevant dimensions, including name.
|
||||
if not isinstance(other, tzwinbase):
|
||||
return NotImplemented
|
||||
|
||||
return (self._std_offset == other._std_offset and
|
||||
self._dst_offset == other._dst_offset and
|
||||
self._stddayofweek == other._stddayofweek and
|
||||
self._dstdayofweek == other._dstdayofweek and
|
||||
self._stdweeknumber == other._stdweeknumber and
|
||||
self._dstweeknumber == other._dstweeknumber and
|
||||
self._stdhour == other._stdhour and
|
||||
self._dsthour == other._dsthour and
|
||||
self._stdminute == other._stdminute and
|
||||
self._dstminute == other._dstminute and
|
||||
self._std_abbr == other._std_abbr and
|
||||
self._dst_abbr == other._dst_abbr)
|
||||
|
||||
@staticmethod
|
||||
def list():
|
||||
"""Return a list of all time zones known to the system."""
|
||||
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
|
||||
with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
|
||||
result = [winreg.EnumKey(tzkey, i)
|
||||
for i in range(winreg.QueryInfoKey(tzkey)[0])]
|
||||
return result
|
||||
|
||||
def display(self):
|
||||
return self._display
|
||||
|
||||
def transitions(self, year):
|
||||
"""
|
||||
For a given year, get the DST on and off transition times, expressed
|
||||
always on the standard time side. For zones with no transitions, this
|
||||
function returns ``None``.
|
||||
|
||||
:param year:
|
||||
The year whose transitions you would like to query.
|
||||
|
||||
:return:
|
||||
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
|
||||
``(dston, dstoff)`` for zones with an annual DST transition, or
|
||||
``None`` for fixed offset zones.
|
||||
"""
|
||||
|
||||
if not self.hasdst:
|
||||
return None
|
||||
|
||||
dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
|
||||
self._dsthour, self._dstminute,
|
||||
self._dstweeknumber)
|
||||
|
||||
dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
|
||||
self._stdhour, self._stdminute,
|
||||
self._stdweeknumber)
|
||||
|
||||
# Ambiguous dates default to the STD side
|
||||
dstoff -= self._dst_base_offset
|
||||
|
||||
return dston, dstoff
|
||||
|
||||
def _get_hasdst(self):
|
||||
return self._dstmonth != 0
|
||||
|
||||
@property
|
||||
def _dst_base_offset(self):
|
||||
return self._dst_base_offset_
|
||||
|
||||
|
||||
class tzwin(tzwinbase):
|
||||
|
||||
def __init__(self, name):
|
||||
self._name = name
|
||||
|
||||
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
|
||||
tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name)
|
||||
with winreg.OpenKey(handle, tzkeyname) as tzkey:
|
||||
keydict = valuestodict(tzkey)
|
||||
|
||||
self._std_abbr = keydict["Std"]
|
||||
self._dst_abbr = keydict["Dlt"]
|
||||
|
||||
self._display = keydict["Display"]
|
||||
|
||||
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
|
||||
tup = struct.unpack("=3l16h", keydict["TZI"])
|
||||
stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
|
||||
dstoffset = stdoffset-tup[2] # + DaylightBias * -1
|
||||
self._std_offset = datetime.timedelta(minutes=stdoffset)
|
||||
self._dst_offset = datetime.timedelta(minutes=dstoffset)
|
||||
|
||||
# for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
|
||||
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
|
||||
(self._stdmonth,
|
||||
self._stddayofweek, # Sunday = 0
|
||||
self._stdweeknumber, # Last = 5
|
||||
self._stdhour,
|
||||
self._stdminute) = tup[4:9]
|
||||
|
||||
(self._dstmonth,
|
||||
self._dstdayofweek, # Sunday = 0
|
||||
self._dstweeknumber, # Last = 5
|
||||
self._dsthour,
|
||||
self._dstminute) = tup[12:17]
|
||||
|
||||
self._dst_base_offset_ = self._dst_offset - self._std_offset
|
||||
self.hasdst = self._get_hasdst()
|
||||
|
||||
def __repr__(self):
|
||||
return "tzwin(%s)" % repr(self._name)
|
||||
|
||||
def __reduce__(self):
|
||||
return (self.__class__, (self._name,))
|
||||
|
||||
|
||||
class tzwinlocal(tzwinbase):
|
||||
def __init__(self):
|
||||
with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
|
||||
with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
|
||||
keydict = valuestodict(tzlocalkey)
|
||||
|
||||
self._std_abbr = keydict["StandardName"]
|
||||
self._dst_abbr = keydict["DaylightName"]
|
||||
|
||||
try:
|
||||
tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME,
|
||||
sn=self._std_abbr)
|
||||
with winreg.OpenKey(handle, tzkeyname) as tzkey:
|
||||
_keydict = valuestodict(tzkey)
|
||||
self._display = _keydict["Display"]
|
||||
except OSError:
|
||||
self._display = None
|
||||
|
||||
stdoffset = -keydict["Bias"]-keydict["StandardBias"]
|
||||
dstoffset = stdoffset-keydict["DaylightBias"]
|
||||
|
||||
self._std_offset = datetime.timedelta(minutes=stdoffset)
|
||||
self._dst_offset = datetime.timedelta(minutes=dstoffset)
|
||||
|
||||
# For reasons unclear, in this particular key, the day of week has been
|
||||
# moved to the END of the SYSTEMTIME structure.
|
||||
tup = struct.unpack("=8h", keydict["StandardStart"])
|
||||
|
||||
(self._stdmonth,
|
||||
self._stdweeknumber, # Last = 5
|
||||
self._stdhour,
|
||||
self._stdminute) = tup[1:5]
|
||||
|
||||
self._stddayofweek = tup[7]
|
||||
|
||||
tup = struct.unpack("=8h", keydict["DaylightStart"])
|
||||
|
||||
(self._dstmonth,
|
||||
self._dstweeknumber, # Last = 5
|
||||
self._dsthour,
|
||||
self._dstminute) = tup[1:5]
|
||||
|
||||
self._dstdayofweek = tup[7]
|
||||
|
||||
self._dst_base_offset_ = self._dst_offset - self._std_offset
|
||||
self.hasdst = self._get_hasdst()
|
||||
|
||||
def __repr__(self):
|
||||
return "tzwinlocal()"
|
||||
|
||||
def __str__(self):
|
||||
# str will return the standard name, not the daylight name.
|
||||
return "tzwinlocal(%s)" % repr(self._std_abbr)
|
||||
|
||||
def __reduce__(self):
|
||||
return (self.__class__, ())
|
||||
|
||||
|
||||
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
|
||||
""" dayofweek == 0 means Sunday, whichweek 5 means last instance """
|
||||
first = datetime.datetime(year, month, 1, hour, minute)
|
||||
|
||||
# This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
|
||||
# Because 7 % 7 = 0
|
||||
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
|
||||
wd = weekdayone + ((whichweek - 1) * ONEWEEK)
|
||||
if (wd.month != month):
|
||||
wd -= ONEWEEK
|
||||
|
||||
return wd
|
||||
|
||||
|
||||
def valuestodict(key):
|
||||
"""Convert a registry key's values to a dictionary."""
|
||||
dout = {}
|
||||
size = winreg.QueryInfoKey(key)[1]
|
||||
tz_res = None
|
||||
|
||||
for i in range(size):
|
||||
key_name, value, dtype = winreg.EnumValue(key, i)
|
||||
if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
|
||||
# If it's a DWORD (32-bit integer), it's stored as unsigned - convert
|
||||
# that to a proper signed integer
|
||||
if value & (1 << 31):
|
||||
value = value - (1 << 32)
|
||||
elif dtype == winreg.REG_SZ:
|
||||
# If it's a reference to the tzres DLL, load the actual string
|
||||
if value.startswith('@tzres'):
|
||||
tz_res = tz_res or tzres()
|
||||
value = tz_res.name_from_string(value)
|
||||
|
||||
value = value.rstrip('\x00') # Remove trailing nulls
|
||||
|
||||
dout[key_name] = value
|
||||
|
||||
return dout
|
|
@ -1,2 +0,0 @@
|
|||
# tzwin has moved to dateutil.tz.win
|
||||
from .tz.win import *
|
|
@ -1,71 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
This module offers general convenience and utility functions for dealing with
|
||||
datetimes.
|
||||
|
||||
.. versionadded:: 2.7.0
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from datetime import datetime, time
|
||||
|
||||
|
||||
def today(tzinfo=None):
|
||||
"""
|
||||
Returns a :py:class:`datetime` representing the current day at midnight
|
||||
|
||||
:param tzinfo:
|
||||
The time zone to attach (also used to determine the current day).
|
||||
|
||||
:return:
|
||||
A :py:class:`datetime.datetime` object representing the current day
|
||||
at midnight.
|
||||
"""
|
||||
|
||||
dt = datetime.now(tzinfo)
|
||||
return datetime.combine(dt.date(), time(0, tzinfo=tzinfo))
|
||||
|
||||
|
||||
def default_tzinfo(dt, tzinfo):
|
||||
"""
|
||||
Sets the the ``tzinfo`` parameter on naive datetimes only
|
||||
|
||||
This is useful for example when you are provided a datetime that may have
|
||||
either an implicit or explicit time zone, such as when parsing a time zone
|
||||
string.
|
||||
|
||||
.. doctest::
|
||||
|
||||
>>> from dateutil.tz import tzoffset
|
||||
>>> from dateutil.parser import parse
|
||||
>>> from dateutil.utils import default_tzinfo
|
||||
>>> dflt_tz = tzoffset("EST", -18000)
|
||||
>>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz))
|
||||
2014-01-01 12:30:00+00:00
|
||||
>>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz))
|
||||
2014-01-01 12:30:00-05:00
|
||||
|
||||
:param dt:
|
||||
The datetime on which to replace the time zone
|
||||
|
||||
:param tzinfo:
|
||||
The :py:class:`datetime.tzinfo` subclass instance to assign to
|
||||
``dt`` if (and only if) it is naive.
|
||||
|
||||
:return:
|
||||
Returns an aware :py:class:`datetime.datetime`.
|
||||
"""
|
||||
if dt.tzinfo is not None:
|
||||
return dt
|
||||
else:
|
||||
return dt.replace(tzinfo=tzinfo)
|
||||
|
||||
|
||||
def within_delta(dt1, dt2, delta):
|
||||
"""
|
||||
Useful for comparing two datetimes that may a negilible difference
|
||||
to be considered equal.
|
||||
"""
|
||||
delta = abs(delta)
|
||||
difference = dt1 - dt2
|
||||
return -delta <= difference <= delta
|
|
@ -1,167 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import warnings
|
||||
import json
|
||||
|
||||
from tarfile import TarFile
|
||||
from pkgutil import get_data
|
||||
from io import BytesIO
|
||||
|
||||
from dateutil.tz import tzfile as _tzfile
|
||||
|
||||
__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"]
|
||||
|
||||
ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
|
||||
METADATA_FN = 'METADATA'
|
||||
|
||||
|
||||
class tzfile(_tzfile):
|
||||
def __reduce__(self):
|
||||
return (gettz, (self._filename,))
|
||||
|
||||
|
||||
def getzoneinfofile_stream():
|
||||
try:
|
||||
return BytesIO(get_data(__name__, ZONEFILENAME))
|
||||
except IOError as e: # TODO switch to FileNotFoundError?
|
||||
warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
|
||||
return None
|
||||
|
||||
|
||||
class ZoneInfoFile(object):
|
||||
def __init__(self, zonefile_stream=None):
|
||||
if zonefile_stream is not None:
|
||||
with TarFile.open(fileobj=zonefile_stream) as tf:
|
||||
self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name)
|
||||
for zf in tf.getmembers()
|
||||
if zf.isfile() and zf.name != METADATA_FN}
|
||||
# deal with links: They'll point to their parent object. Less
|
||||
# waste of memory
|
||||
links = {zl.name: self.zones[zl.linkname]
|
||||
for zl in tf.getmembers() if
|
||||
zl.islnk() or zl.issym()}
|
||||
self.zones.update(links)
|
||||
try:
|
||||
metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
|
||||
metadata_str = metadata_json.read().decode('UTF-8')
|
||||
self.metadata = json.loads(metadata_str)
|
||||
except KeyError:
|
||||
# no metadata in tar file
|
||||
self.metadata = None
|
||||
else:
|
||||
self.zones = {}
|
||||
self.metadata = None
|
||||
|
||||
def get(self, name, default=None):
|
||||
"""
|
||||
Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
|
||||
for retrieving zones from the zone dictionary.
|
||||
|
||||
:param name:
|
||||
The name of the zone to retrieve. (Generally IANA zone names)
|
||||
|
||||
:param default:
|
||||
The value to return in the event of a missing key.
|
||||
|
||||
.. versionadded:: 2.6.0
|
||||
|
||||
"""
|
||||
return self.zones.get(name, default)
|
||||
|
||||
|
||||
# The current API has gettz as a module function, although in fact it taps into
|
||||
# a stateful class. So as a workaround for now, without changing the API, we
|
||||
# will create a new "global" class instance the first time a user requests a
|
||||
# timezone. Ugly, but adheres to the api.
|
||||
#
|
||||
# TODO: Remove after deprecation period.
|
||||
_CLASS_ZONE_INSTANCE = []
|
||||
|
||||
|
||||
def get_zonefile_instance(new_instance=False):
|
||||
"""
|
||||
This is a convenience function which provides a :class:`ZoneInfoFile`
|
||||
instance using the data provided by the ``dateutil`` package. By default, it
|
||||
caches a single instance of the ZoneInfoFile object and returns that.
|
||||
|
||||
:param new_instance:
|
||||
If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
|
||||
used as the cached instance for the next call. Otherwise, new instances
|
||||
are created only as necessary.
|
||||
|
||||
:return:
|
||||
Returns a :class:`ZoneInfoFile` object.
|
||||
|
||||
.. versionadded:: 2.6
|
||||
"""
|
||||
if new_instance:
|
||||
zif = None
|
||||
else:
|
||||
zif = getattr(get_zonefile_instance, '_cached_instance', None)
|
||||
|
||||
if zif is None:
|
||||
zif = ZoneInfoFile(getzoneinfofile_stream())
|
||||
|
||||
get_zonefile_instance._cached_instance = zif
|
||||
|
||||
return zif
|
||||
|
||||
|
||||
def gettz(name):
|
||||
"""
|
||||
This retrieves a time zone from the local zoneinfo tarball that is packaged
|
||||
with dateutil.
|
||||
|
||||
:param name:
|
||||
An IANA-style time zone name, as found in the zoneinfo file.
|
||||
|
||||
:return:
|
||||
Returns a :class:`dateutil.tz.tzfile` time zone object.
|
||||
|
||||
.. warning::
|
||||
It is generally inadvisable to use this function, and it is only
|
||||
provided for API compatibility with earlier versions. This is *not*
|
||||
equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
|
||||
time zone based on the inputs, favoring system zoneinfo. This is ONLY
|
||||
for accessing the dateutil-specific zoneinfo (which may be out of
|
||||
date compared to the system zoneinfo).
|
||||
|
||||
.. deprecated:: 2.6
|
||||
If you need to use a specific zoneinfofile over the system zoneinfo,
|
||||
instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
|
||||
:func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
|
||||
|
||||
Use :func:`get_zonefile_instance` to retrieve an instance of the
|
||||
dateutil-provided zoneinfo.
|
||||
"""
|
||||
warnings.warn("zoneinfo.gettz() will be removed in future versions, "
|
||||
"to use the dateutil-provided zoneinfo files, instantiate a "
|
||||
"ZoneInfoFile object and use ZoneInfoFile.zones.get() "
|
||||
"instead. See the documentation for details.",
|
||||
DeprecationWarning)
|
||||
|
||||
if len(_CLASS_ZONE_INSTANCE) == 0:
|
||||
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
|
||||
return _CLASS_ZONE_INSTANCE[0].zones.get(name)
|
||||
|
||||
|
||||
def gettz_db_metadata():
|
||||
""" Get the zonefile metadata
|
||||
|
||||
See `zonefile_metadata`_
|
||||
|
||||
:returns:
|
||||
A dictionary with the database metadata
|
||||
|
||||
.. deprecated:: 2.6
|
||||
See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
|
||||
query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
|
||||
"""
|
||||
warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
|
||||
"versions, to use the dateutil-provided zoneinfo files, "
|
||||
"ZoneInfoFile object and query the 'metadata' attribute "
|
||||
"instead. See the documentation for details.",
|
||||
DeprecationWarning)
|
||||
|
||||
if len(_CLASS_ZONE_INSTANCE) == 0:
|
||||
_CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
|
||||
return _CLASS_ZONE_INSTANCE[0].metadata
|
Binary file not shown.
|
@ -1,53 +0,0 @@
|
|||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
import json
|
||||
from subprocess import check_call
|
||||
from tarfile import TarFile
|
||||
|
||||
from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME
|
||||
|
||||
|
||||
def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
|
||||
"""Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
|
||||
|
||||
filename is the timezone tarball from ``ftp.iana.org/tz``.
|
||||
|
||||
"""
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
zonedir = os.path.join(tmpdir, "zoneinfo")
|
||||
moduledir = os.path.dirname(__file__)
|
||||
try:
|
||||
with TarFile.open(filename) as tf:
|
||||
for name in zonegroups:
|
||||
tf.extract(name, tmpdir)
|
||||
filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
|
||||
try:
|
||||
check_call(["zic", "-d", zonedir] + filepaths)
|
||||
except OSError as e:
|
||||
_print_on_nosuchfile(e)
|
||||
raise
|
||||
# write metadata file
|
||||
with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
|
||||
json.dump(metadata, f, indent=4, sort_keys=True)
|
||||
target = os.path.join(moduledir, ZONEFILENAME)
|
||||
with TarFile.open(target, "w:%s" % format) as tf:
|
||||
for entry in os.listdir(zonedir):
|
||||
entrypath = os.path.join(zonedir, entry)
|
||||
tf.add(entrypath, entry)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
|
||||
|
||||
def _print_on_nosuchfile(e):
|
||||
"""Print helpful troubleshooting message
|
||||
|
||||
e is an exception raised by subprocess.check_call()
|
||||
|
||||
"""
|
||||
if e.errno == 2:
|
||||
logging.error(
|
||||
"Could not find zic. Perhaps you need to install "
|
||||
"libc-bin or some other package that provides it, "
|
||||
"or it's not in your PATH?")
|
|
@ -1,43 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2005 Michael Urman
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
|
||||
"""Mutagen aims to be an all purpose multimedia tagging library.
|
||||
|
||||
::
|
||||
|
||||
import mutagen.[format]
|
||||
metadata = mutagen.[format].Open(filename)
|
||||
|
||||
`metadata` acts like a dictionary of tags in the file. Tags are generally a
|
||||
list of string-like values, but may have additional methods available
|
||||
depending on tag or format. They may also be entirely different objects
|
||||
for certain keys, again depending on format.
|
||||
"""
|
||||
|
||||
from mutagen._util import MutagenError
|
||||
from mutagen._file import FileType, StreamInfo, File
|
||||
from mutagen._tags import Metadata, PaddingInfo
|
||||
|
||||
version = (1, 31)
|
||||
"""Version tuple."""
|
||||
|
||||
version_string = ".".join(map(str, version))
|
||||
"""Version string."""
|
||||
|
||||
MutagenError
|
||||
|
||||
FileType
|
||||
|
||||
StreamInfo
|
||||
|
||||
File
|
||||
|
||||
Metadata
|
||||
|
||||
PaddingInfo
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,86 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2013 Christoph Reiter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
PY3 = not PY2
|
||||
|
||||
if PY2:
|
||||
from StringIO import StringIO
|
||||
BytesIO = StringIO
|
||||
from cStringIO import StringIO as cBytesIO
|
||||
from itertools import izip
|
||||
|
||||
long_ = long
|
||||
integer_types = (int, long)
|
||||
string_types = (str, unicode)
|
||||
text_type = unicode
|
||||
|
||||
xrange = xrange
|
||||
cmp = cmp
|
||||
chr_ = chr
|
||||
|
||||
def endswith(text, end):
|
||||
return text.endswith(end)
|
||||
|
||||
iteritems = lambda d: d.iteritems()
|
||||
itervalues = lambda d: d.itervalues()
|
||||
iterkeys = lambda d: d.iterkeys()
|
||||
|
||||
iterbytes = lambda b: iter(b)
|
||||
|
||||
exec("def reraise(tp, value, tb):\n raise tp, value, tb")
|
||||
|
||||
def swap_to_string(cls):
|
||||
if "__str__" in cls.__dict__:
|
||||
cls.__unicode__ = cls.__str__
|
||||
|
||||
if "__bytes__" in cls.__dict__:
|
||||
cls.__str__ = cls.__bytes__
|
||||
|
||||
return cls
|
||||
|
||||
elif PY3:
|
||||
from io import StringIO
|
||||
StringIO = StringIO
|
||||
from io import BytesIO
|
||||
cBytesIO = BytesIO
|
||||
|
||||
long_ = int
|
||||
integer_types = (int,)
|
||||
string_types = (str,)
|
||||
text_type = str
|
||||
|
||||
izip = zip
|
||||
xrange = range
|
||||
cmp = lambda a, b: (a > b) - (a < b)
|
||||
chr_ = lambda x: bytes([x])
|
||||
|
||||
def endswith(text, end):
|
||||
# usefull for paths which can be both, str and bytes
|
||||
if isinstance(text, str):
|
||||
if not isinstance(end, str):
|
||||
end = end.decode("ascii")
|
||||
else:
|
||||
if not isinstance(end, bytes):
|
||||
end = end.encode("ascii")
|
||||
return text.endswith(end)
|
||||
|
||||
iteritems = lambda d: iter(d.items())
|
||||
itervalues = lambda d: iter(d.values())
|
||||
iterkeys = lambda d: iter(d.keys())
|
||||
|
||||
iterbytes = lambda b: (bytes([v]) for v in b)
|
||||
|
||||
def reraise(tp, value, tb):
|
||||
raise tp(value).with_traceback(tb)
|
||||
|
||||
def swap_to_string(cls):
|
||||
return cls
|
|
@ -1,199 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""Constants used by Mutagen."""
|
||||
|
||||
GENRES = [
|
||||
u"Blues",
|
||||
u"Classic Rock",
|
||||
u"Country",
|
||||
u"Dance",
|
||||
u"Disco",
|
||||
u"Funk",
|
||||
u"Grunge",
|
||||
u"Hip-Hop",
|
||||
u"Jazz",
|
||||
u"Metal",
|
||||
u"New Age",
|
||||
u"Oldies",
|
||||
u"Other",
|
||||
u"Pop",
|
||||
u"R&B",
|
||||
u"Rap",
|
||||
u"Reggae",
|
||||
u"Rock",
|
||||
u"Techno",
|
||||
u"Industrial",
|
||||
u"Alternative",
|
||||
u"Ska",
|
||||
u"Death Metal",
|
||||
u"Pranks",
|
||||
u"Soundtrack",
|
||||
u"Euro-Techno",
|
||||
u"Ambient",
|
||||
u"Trip-Hop",
|
||||
u"Vocal",
|
||||
u"Jazz+Funk",
|
||||
u"Fusion",
|
||||
u"Trance",
|
||||
u"Classical",
|
||||
u"Instrumental",
|
||||
u"Acid",
|
||||
u"House",
|
||||
u"Game",
|
||||
u"Sound Clip",
|
||||
u"Gospel",
|
||||
u"Noise",
|
||||
u"Alt. Rock",
|
||||
u"Bass",
|
||||
u"Soul",
|
||||
u"Punk",
|
||||
u"Space",
|
||||
u"Meditative",
|
||||
u"Instrumental Pop",
|
||||
u"Instrumental Rock",
|
||||
u"Ethnic",
|
||||
u"Gothic",
|
||||
u"Darkwave",
|
||||
u"Techno-Industrial",
|
||||
u"Electronic",
|
||||
u"Pop-Folk",
|
||||
u"Eurodance",
|
||||
u"Dream",
|
||||
u"Southern Rock",
|
||||
u"Comedy",
|
||||
u"Cult",
|
||||
u"Gangsta Rap",
|
||||
u"Top 40",
|
||||
u"Christian Rap",
|
||||
u"Pop/Funk",
|
||||
u"Jungle",
|
||||
u"Native American",
|
||||
u"Cabaret",
|
||||
u"New Wave",
|
||||
u"Psychedelic",
|
||||
u"Rave",
|
||||
u"Showtunes",
|
||||
u"Trailer",
|
||||
u"Lo-Fi",
|
||||
u"Tribal",
|
||||
u"Acid Punk",
|
||||
u"Acid Jazz",
|
||||
u"Polka",
|
||||
u"Retro",
|
||||
u"Musical",
|
||||
u"Rock & Roll",
|
||||
u"Hard Rock",
|
||||
u"Folk",
|
||||
u"Folk-Rock",
|
||||
u"National Folk",
|
||||
u"Swing",
|
||||
u"Fast-Fusion",
|
||||
u"Bebop",
|
||||
u"Latin",
|
||||
u"Revival",
|
||||
u"Celtic",
|
||||
u"Bluegrass",
|
||||
u"Avantgarde",
|
||||
u"Gothic Rock",
|
||||
u"Progressive Rock",
|
||||
u"Psychedelic Rock",
|
||||
u"Symphonic Rock",
|
||||
u"Slow Rock",
|
||||
u"Big Band",
|
||||
u"Chorus",
|
||||
u"Easy Listening",
|
||||
u"Acoustic",
|
||||
u"Humour",
|
||||
u"Speech",
|
||||
u"Chanson",
|
||||
u"Opera",
|
||||
u"Chamber Music",
|
||||
u"Sonata",
|
||||
u"Symphony",
|
||||
u"Booty Bass",
|
||||
u"Primus",
|
||||
u"Porn Groove",
|
||||
u"Satire",
|
||||
u"Slow Jam",
|
||||
u"Club",
|
||||
u"Tango",
|
||||
u"Samba",
|
||||
u"Folklore",
|
||||
u"Ballad",
|
||||
u"Power Ballad",
|
||||
u"Rhythmic Soul",
|
||||
u"Freestyle",
|
||||
u"Duet",
|
||||
u"Punk Rock",
|
||||
u"Drum Solo",
|
||||
u"A Cappella",
|
||||
u"Euro-House",
|
||||
u"Dance Hall",
|
||||
u"Goa",
|
||||
u"Drum & Bass",
|
||||
u"Club-House",
|
||||
u"Hardcore",
|
||||
u"Terror",
|
||||
u"Indie",
|
||||
u"BritPop",
|
||||
u"Afro-Punk",
|
||||
u"Polsk Punk",
|
||||
u"Beat",
|
||||
u"Christian Gangsta Rap",
|
||||
u"Heavy Metal",
|
||||
u"Black Metal",
|
||||
u"Crossover",
|
||||
u"Contemporary Christian",
|
||||
u"Christian Rock",
|
||||
u"Merengue",
|
||||
u"Salsa",
|
||||
u"Thrash Metal",
|
||||
u"Anime",
|
||||
u"JPop",
|
||||
u"Synthpop",
|
||||
u"Abstract",
|
||||
u"Art Rock",
|
||||
u"Baroque",
|
||||
u"Bhangra",
|
||||
u"Big Beat",
|
||||
u"Breakbeat",
|
||||
u"Chillout",
|
||||
u"Downtempo",
|
||||
u"Dub",
|
||||
u"EBM",
|
||||
u"Eclectic",
|
||||
u"Electro",
|
||||
u"Electroclash",
|
||||
u"Emo",
|
||||
u"Experimental",
|
||||
u"Garage",
|
||||
u"Global",
|
||||
u"IDM",
|
||||
u"Illbient",
|
||||
u"Industro-Goth",
|
||||
u"Jam Band",
|
||||
u"Krautrock",
|
||||
u"Leftfield",
|
||||
u"Lounge",
|
||||
u"Math Rock",
|
||||
u"New Romantic",
|
||||
u"Nu-Breakz",
|
||||
u"Post-Punk",
|
||||
u"Post-Rock",
|
||||
u"Psytrance",
|
||||
u"Shoegaze",
|
||||
u"Space Rock",
|
||||
u"Trop Rock",
|
||||
u"World Music",
|
||||
u"Neoclassical",
|
||||
u"Audiobook",
|
||||
u"Audio Theatre",
|
||||
u"Neue Deutsche Welle",
|
||||
u"Podcast",
|
||||
u"Indie Rock",
|
||||
u"G-Funk",
|
||||
u"Dubstep",
|
||||
u"Garage Rock",
|
||||
u"Psybient",
|
||||
]
|
||||
"""The ID3v1 genre list."""
|
|
@ -1,253 +0,0 @@
|
|||
# Copyright (C) 2005 Michael Urman
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
import warnings
|
||||
|
||||
from mutagen._util import DictMixin
|
||||
from mutagen._compat import izip
|
||||
|
||||
|
||||
class FileType(DictMixin):
|
||||
"""An abstract object wrapping tags and audio stream information.
|
||||
|
||||
Attributes:
|
||||
|
||||
* info -- stream information (length, bitrate, sample rate)
|
||||
* tags -- metadata tags, if any
|
||||
|
||||
Each file format has different potential tags and stream
|
||||
information.
|
||||
|
||||
FileTypes implement an interface very similar to Metadata; the
|
||||
dict interface, save, load, and delete calls on a FileType call
|
||||
the appropriate methods on its tag data.
|
||||
"""
|
||||
|
||||
__module__ = "mutagen"
|
||||
|
||||
info = None
|
||||
tags = None
|
||||
filename = None
|
||||
_mimes = ["application/octet-stream"]
|
||||
|
||||
def __init__(self, filename=None, *args, **kwargs):
|
||||
if filename is None:
|
||||
warnings.warn("FileType constructor requires a filename",
|
||||
DeprecationWarning)
|
||||
else:
|
||||
self.load(filename, *args, **kwargs)
|
||||
|
||||
def load(self, filename, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Look up a metadata tag key.
|
||||
|
||||
If the file has no tags at all, a KeyError is raised.
|
||||
"""
|
||||
|
||||
if self.tags is None:
|
||||
raise KeyError(key)
|
||||
else:
|
||||
return self.tags[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Set a metadata tag.
|
||||
|
||||
If the file has no tags, an appropriate format is added (but
|
||||
not written until save is called).
|
||||
"""
|
||||
|
||||
if self.tags is None:
|
||||
self.add_tags()
|
||||
self.tags[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
"""Delete a metadata tag key.
|
||||
|
||||
If the file has no tags at all, a KeyError is raised.
|
||||
"""
|
||||
|
||||
if self.tags is None:
|
||||
raise KeyError(key)
|
||||
else:
|
||||
del(self.tags[key])
|
||||
|
||||
def keys(self):
|
||||
"""Return a list of keys in the metadata tag.
|
||||
|
||||
If the file has no tags at all, an empty list is returned.
|
||||
"""
|
||||
|
||||
if self.tags is None:
|
||||
return []
|
||||
else:
|
||||
return self.tags.keys()
|
||||
|
||||
def delete(self, filename=None):
|
||||
"""Remove tags from a file.
|
||||
|
||||
In cases where the tagging format is independent of the file type
|
||||
(for example `mutagen.ID3`) all traces of the tagging format will
|
||||
be removed.
|
||||
In cases where the tag is part of the file type, all tags and
|
||||
padding will be removed.
|
||||
|
||||
The tags attribute will be cleared as well if there is one.
|
||||
|
||||
Does nothing if the file has no tags.
|
||||
|
||||
:raises mutagen.MutagenError: if deleting wasn't possible
|
||||
"""
|
||||
|
||||
if self.tags is not None:
|
||||
if filename is None:
|
||||
filename = self.filename
|
||||
else:
|
||||
warnings.warn(
|
||||
"delete(filename=...) is deprecated, reload the file",
|
||||
DeprecationWarning)
|
||||
return self.tags.delete(filename)
|
||||
|
||||
def save(self, filename=None, **kwargs):
|
||||
"""Save metadata tags.
|
||||
|
||||
:raises mutagen.MutagenError: if saving wasn't possible
|
||||
"""
|
||||
|
||||
if filename is None:
|
||||
filename = self.filename
|
||||
else:
|
||||
warnings.warn(
|
||||
"save(filename=...) is deprecated, reload the file",
|
||||
DeprecationWarning)
|
||||
|
||||
if self.tags is not None:
|
||||
return self.tags.save(filename, **kwargs)
|
||||
|
||||
def pprint(self):
|
||||
"""Print stream information and comment key=value pairs."""
|
||||
|
||||
stream = "%s (%s)" % (self.info.pprint(), self.mime[0])
|
||||
try:
|
||||
tags = self.tags.pprint()
|
||||
except AttributeError:
|
||||
return stream
|
||||
else:
|
||||
return stream + ((tags and "\n" + tags) or "")
|
||||
|
||||
def add_tags(self):
|
||||
"""Adds new tags to the file.
|
||||
|
||||
:raises mutagen.MutagenError: if tags already exist or adding is not
|
||||
possible.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def mime(self):
|
||||
"""A list of mime types"""
|
||||
|
||||
mimes = []
|
||||
for Kind in type(self).__mro__:
|
||||
for mime in getattr(Kind, '_mimes', []):
|
||||
if mime not in mimes:
|
||||
mimes.append(mime)
|
||||
return mimes
|
||||
|
||||
@staticmethod
|
||||
def score(filename, fileobj, header):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class StreamInfo(object):
|
||||
"""Abstract stream information object.
|
||||
|
||||
Provides attributes for length, bitrate, sample rate etc.
|
||||
|
||||
See the implementations for details.
|
||||
"""
|
||||
|
||||
__module__ = "mutagen"
|
||||
|
||||
def pprint(self):
|
||||
"""Print stream information"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
def File(filename, options=None, easy=False):
|
||||
"""Guess the type of the file and try to open it.
|
||||
|
||||
The file type is decided by several things, such as the first 128
|
||||
bytes (which usually contains a file type identifier), the
|
||||
filename extension, and the presence of existing tags.
|
||||
|
||||
If no appropriate type could be found, None is returned.
|
||||
|
||||
:param options: Sequence of :class:`FileType` implementations, defaults to
|
||||
all included ones.
|
||||
|
||||
:param easy: If the easy wrappers should be returnd if available.
|
||||
For example :class:`EasyMP3 <mp3.EasyMP3>` instead
|
||||
of :class:`MP3 <mp3.MP3>`.
|
||||
"""
|
||||
|
||||
if options is None:
|
||||
from mutagen.asf import ASF
|
||||
from mutagen.apev2 import APEv2File
|
||||
from mutagen.flac import FLAC
|
||||
if easy:
|
||||
from mutagen.easyid3 import EasyID3FileType as ID3FileType
|
||||
else:
|
||||
from mutagen.id3 import ID3FileType
|
||||
if easy:
|
||||
from mutagen.mp3 import EasyMP3 as MP3
|
||||
else:
|
||||
from mutagen.mp3 import MP3
|
||||
from mutagen.oggflac import OggFLAC
|
||||
from mutagen.oggspeex import OggSpeex
|
||||
from mutagen.oggtheora import OggTheora
|
||||
from mutagen.oggvorbis import OggVorbis
|
||||
from mutagen.oggopus import OggOpus
|
||||
if easy:
|
||||
from mutagen.trueaudio import EasyTrueAudio as TrueAudio
|
||||
else:
|
||||
from mutagen.trueaudio import TrueAudio
|
||||
from mutagen.wavpack import WavPack
|
||||
if easy:
|
||||
from mutagen.easymp4 import EasyMP4 as MP4
|
||||
else:
|
||||
from mutagen.mp4 import MP4
|
||||
from mutagen.musepack import Musepack
|
||||
from mutagen.monkeysaudio import MonkeysAudio
|
||||
from mutagen.optimfrog import OptimFROG
|
||||
from mutagen.aiff import AIFF
|
||||
from mutagen.aac import AAC
|
||||
options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC,
|
||||
FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack,
|
||||
Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC]
|
||||
|
||||
if not options:
|
||||
return None
|
||||
|
||||
with open(filename, "rb") as fileobj:
|
||||
header = fileobj.read(128)
|
||||
# Sort by name after score. Otherwise import order affects
|
||||
# Kind sort order, which affects treatment of things with
|
||||
# equals scores.
|
||||
results = [(Kind.score(filename, fileobj, header), Kind.__name__)
|
||||
for Kind in options]
|
||||
|
||||
results = list(izip(results, options))
|
||||
results.sort()
|
||||
(score, name), Kind = results[-1]
|
||||
if score > 0:
|
||||
return Kind(filename)
|
||||
else:
|
||||
return None
|
|
@ -1,420 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 Christoph Reiter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
"""
|
||||
http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header
|
||||
http://wiki.hydrogenaud.io/index.php?title=MP3
|
||||
"""
|
||||
|
||||
from functools import partial
|
||||
|
||||
from ._util import cdata, BitReader
|
||||
from ._compat import xrange, iterbytes, cBytesIO
|
||||
|
||||
|
||||
class LAMEError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class LAMEHeader(object):
|
||||
"""http://gabriel.mp3-tech.org/mp3infotag.html"""
|
||||
|
||||
vbr_method = 0
|
||||
"""0: unknown, 1: CBR, 2: ABR, 3/4/5: VBR, others: see the docs"""
|
||||
|
||||
lowpass_filter = 0
|
||||
"""lowpass filter value in Hz. 0 means unknown"""
|
||||
|
||||
quality = -1
|
||||
"""Encoding quality: 0..9"""
|
||||
|
||||
vbr_quality = -1
|
||||
"""VBR quality: 0..9"""
|
||||
|
||||
track_peak = None
|
||||
"""Peak signal amplitude as float. None if unknown."""
|
||||
|
||||
track_gain_origin = 0
|
||||
"""see the docs"""
|
||||
|
||||
track_gain_adjustment = None
|
||||
"""Track gain adjustment as float (for 89db replay gain) or None"""
|
||||
|
||||
album_gain_origin = 0
|
||||
"""see the docs"""
|
||||
|
||||
album_gain_adjustment = None
|
||||
"""Album gain adjustment as float (for 89db replay gain) or None"""
|
||||
|
||||
encoding_flags = 0
|
||||
"""see docs"""
|
||||
|
||||
ath_type = -1
|
||||
"""see docs"""
|
||||
|
||||
bitrate = -1
|
||||
"""Bitrate in kbps. For VBR the minimum bitrate, for anything else
|
||||
(CBR, ABR, ..) the target bitrate.
|
||||
"""
|
||||
|
||||
encoder_delay_start = 0
|
||||
"""Encoder delay in samples"""
|
||||
|
||||
encoder_padding_end = 0
|
||||
"""Padding in samples added at the end"""
|
||||
|
||||
source_sample_frequency_enum = -1
|
||||
"""see docs"""
|
||||
|
||||
unwise_setting_used = False
|
||||
"""see docs"""
|
||||
|
||||
stereo_mode = 0
|
||||
"""see docs"""
|
||||
|
||||
noise_shaping = 0
|
||||
"""see docs"""
|
||||
|
||||
mp3_gain = 0
|
||||
"""Applied MP3 gain -127..127. Factor is 2 ** (mp3_gain / 4)"""
|
||||
|
||||
surround_info = 0
|
||||
"""see docs"""
|
||||
|
||||
preset_used = 0
|
||||
"""lame preset"""
|
||||
|
||||
music_length = 0
|
||||
"""Length in bytes excluding any ID3 tags"""
|
||||
|
||||
music_crc = -1
|
||||
"""CRC16 of the data specified by music_length"""
|
||||
|
||||
header_crc = -1
|
||||
"""CRC16 of this header and everything before (not checked)"""
|
||||
|
||||
def __init__(self, xing, fileobj):
|
||||
"""Raises LAMEError if parsing fails"""
|
||||
|
||||
payload = fileobj.read(27)
|
||||
if len(payload) != 27:
|
||||
raise LAMEError("Not enough data")
|
||||
|
||||
# extended lame header
|
||||
r = BitReader(cBytesIO(payload))
|
||||
revision = r.bits(4)
|
||||
if revision != 0:
|
||||
raise LAMEError("unsupported header revision %d" % revision)
|
||||
|
||||
self.vbr_method = r.bits(4)
|
||||
self.lowpass_filter = r.bits(8) * 100
|
||||
|
||||
# these have a different meaning for lame; expose them again here
|
||||
self.quality = (100 - xing.vbr_scale) % 10
|
||||
self.vbr_quality = (100 - xing.vbr_scale) // 10
|
||||
|
||||
track_peak_data = r.bytes(4)
|
||||
if track_peak_data == b"\x00\x00\x00\x00":
|
||||
self.track_peak = None
|
||||
else:
|
||||
# see PutLameVBR() in LAME's VbrTag.c
|
||||
self.track_peak = (
|
||||
cdata.uint32_be(track_peak_data) - 0.5) / 2 ** 23
|
||||
track_gain_type = r.bits(3)
|
||||
self.track_gain_origin = r.bits(3)
|
||||
sign = r.bits(1)
|
||||
gain_adj = r.bits(9) / 10.0
|
||||
if sign:
|
||||
gain_adj *= -1
|
||||
if track_gain_type == 1:
|
||||
self.track_gain_adjustment = gain_adj
|
||||
else:
|
||||
self.track_gain_adjustment = None
|
||||
assert r.is_aligned()
|
||||
|
||||
album_gain_type = r.bits(3)
|
||||
self.album_gain_origin = r.bits(3)
|
||||
sign = r.bits(1)
|
||||
album_gain_adj = r.bits(9) / 10.0
|
||||
if album_gain_type == 2:
|
||||
self.album_gain_adjustment = album_gain_adj
|
||||
else:
|
||||
self.album_gain_adjustment = None
|
||||
|
||||
self.encoding_flags = r.bits(4)
|
||||
self.ath_type = r.bits(4)
|
||||
|
||||
self.bitrate = r.bits(8)
|
||||
|
||||
self.encoder_delay_start = r.bits(12)
|
||||
self.encoder_padding_end = r.bits(12)
|
||||
|
||||
self.source_sample_frequency_enum = r.bits(2)
|
||||
self.unwise_setting_used = r.bits(1)
|
||||
self.stereo_mode = r.bits(3)
|
||||
self.noise_shaping = r.bits(2)
|
||||
|
||||
sign = r.bits(1)
|
||||
mp3_gain = r.bits(7)
|
||||
if sign:
|
||||
mp3_gain *= -1
|
||||
self.mp3_gain = mp3_gain
|
||||
|
||||
r.skip(2)
|
||||
self.surround_info = r.bits(3)
|
||||
self.preset_used = r.bits(11)
|
||||
self.music_length = r.bits(32)
|
||||
self.music_crc = r.bits(16)
|
||||
|
||||
self.header_crc = r.bits(16)
|
||||
assert r.is_aligned()
|
||||
|
||||
@classmethod
|
||||
def parse_version(cls, fileobj):
|
||||
"""Returns a version string and True if a LAMEHeader follows.
|
||||
The passed file object will be positioned right before the
|
||||
lame header if True.
|
||||
|
||||
Raises LAMEError if there is no lame version info.
|
||||
"""
|
||||
|
||||
# http://wiki.hydrogenaud.io/index.php?title=LAME_version_string
|
||||
|
||||
data = fileobj.read(20)
|
||||
if len(data) != 20:
|
||||
raise LAMEError("Not a lame header")
|
||||
if not data.startswith((b"LAME", b"L3.99")):
|
||||
raise LAMEError("Not a lame header")
|
||||
|
||||
data = data.lstrip(b"EMAL")
|
||||
major, data = data[0:1], data[1:].lstrip(b".")
|
||||
minor = b""
|
||||
for c in iterbytes(data):
|
||||
if not c.isdigit():
|
||||
break
|
||||
minor += c
|
||||
data = data[len(minor):]
|
||||
|
||||
try:
|
||||
major = int(major.decode("ascii"))
|
||||
minor = int(minor.decode("ascii"))
|
||||
except ValueError:
|
||||
raise LAMEError
|
||||
|
||||
# the extended header was added sometimes in the 3.90 cycle
|
||||
# e.g. "LAME3.90 (alpha)" should still stop here.
|
||||
# (I have seen such a file)
|
||||
if (major, minor) < (3, 90) or (
|
||||
(major, minor) == (3, 90) and data[-11:-10] == b"("):
|
||||
flag = data.strip(b"\x00").rstrip().decode("ascii")
|
||||
return u"%d.%d%s" % (major, minor, flag), False
|
||||
|
||||
if len(data) <= 11:
|
||||
raise LAMEError("Invalid version: too long")
|
||||
|
||||
flag = data[:-11].rstrip(b"\x00")
|
||||
|
||||
flag_string = u""
|
||||
patch = u""
|
||||
if flag == b"a":
|
||||
flag_string = u" (alpha)"
|
||||
elif flag == b"b":
|
||||
flag_string = u" (beta)"
|
||||
elif flag == b"r":
|
||||
patch = u".1+"
|
||||
elif flag == b" ":
|
||||
if (major, minor) > (3, 96):
|
||||
patch = u".0"
|
||||
else:
|
||||
patch = u".0+"
|
||||
elif flag == b"" or flag == b".":
|
||||
patch = u".0+"
|
||||
else:
|
||||
flag_string = u" (?)"
|
||||
|
||||
# extended header, seek back to 9 bytes for the caller
|
||||
fileobj.seek(-11, 1)
|
||||
|
||||
return u"%d.%d%s%s" % (major, minor, patch, flag_string), True
|
||||
|
||||
|
||||
class XingHeaderError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class XingHeaderFlags(object):
|
||||
FRAMES = 0x1
|
||||
BYTES = 0x2
|
||||
TOC = 0x4
|
||||
VBR_SCALE = 0x8
|
||||
|
||||
|
||||
class XingHeader(object):
|
||||
|
||||
frames = -1
|
||||
"""Number of frames, -1 if unknown"""
|
||||
|
||||
bytes = -1
|
||||
"""Number of bytes, -1 if unknown"""
|
||||
|
||||
toc = []
|
||||
"""List of 100 file offsets in percent encoded as 0-255. E.g. entry
|
||||
50 contains the file offset in percent at 50% play time.
|
||||
Empty if unknown.
|
||||
"""
|
||||
|
||||
vbr_scale = -1
|
||||
"""VBR quality indicator 0-100. -1 if unknown"""
|
||||
|
||||
lame_header = None
|
||||
"""A LAMEHeader instance or None"""
|
||||
|
||||
lame_version = u""
|
||||
"""The version of the LAME encoder e.g. '3.99.0'. Empty if unknown"""
|
||||
|
||||
is_info = False
|
||||
"""If the header started with 'Info' and not 'Xing'"""
|
||||
|
||||
def __init__(self, fileobj):
|
||||
"""Parses the Xing header or raises XingHeaderError.
|
||||
|
||||
The file position after this returns is undefined.
|
||||
"""
|
||||
|
||||
data = fileobj.read(8)
|
||||
if len(data) != 8 or data[:4] not in (b"Xing", b"Info"):
|
||||
raise XingHeaderError("Not a Xing header")
|
||||
|
||||
self.is_info = (data[:4] == b"Info")
|
||||
|
||||
flags = cdata.uint32_be_from(data, 4)[0]
|
||||
|
||||
if flags & XingHeaderFlags.FRAMES:
|
||||
data = fileobj.read(4)
|
||||
if len(data) != 4:
|
||||
raise XingHeaderError("Xing header truncated")
|
||||
self.frames = cdata.uint32_be(data)
|
||||
|
||||
if flags & XingHeaderFlags.BYTES:
|
||||
data = fileobj.read(4)
|
||||
if len(data) != 4:
|
||||
raise XingHeaderError("Xing header truncated")
|
||||
self.bytes = cdata.uint32_be(data)
|
||||
|
||||
if flags & XingHeaderFlags.TOC:
|
||||
data = fileobj.read(100)
|
||||
if len(data) != 100:
|
||||
raise XingHeaderError("Xing header truncated")
|
||||
self.toc = list(bytearray(data))
|
||||
|
||||
if flags & XingHeaderFlags.VBR_SCALE:
|
||||
data = fileobj.read(4)
|
||||
if len(data) != 4:
|
||||
raise XingHeaderError("Xing header truncated")
|
||||
self.vbr_scale = cdata.uint32_be(data)
|
||||
|
||||
try:
|
||||
self.lame_version, has_header = LAMEHeader.parse_version(fileobj)
|
||||
if has_header:
|
||||
self.lame_header = LAMEHeader(self, fileobj)
|
||||
except LAMEError:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def get_offset(cls, info):
|
||||
"""Calculate the offset to the Xing header from the start of the
|
||||
MPEG header including sync based on the MPEG header's content.
|
||||
"""
|
||||
|
||||
assert info.layer == 3
|
||||
|
||||
if info.version == 1:
|
||||
if info.mode != 3:
|
||||
return 36
|
||||
else:
|
||||
return 21
|
||||
else:
|
||||
if info.mode != 3:
|
||||
return 21
|
||||
else:
|
||||
return 13
|
||||
|
||||
|
||||
class VBRIHeaderError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class VBRIHeader(object):
|
||||
|
||||
version = 0
|
||||
"""VBRI header version"""
|
||||
|
||||
quality = 0
|
||||
"""Quality indicator"""
|
||||
|
||||
bytes = 0
|
||||
"""Number of bytes"""
|
||||
|
||||
frames = 0
|
||||
"""Number of frames"""
|
||||
|
||||
toc_scale_factor = 0
|
||||
"""Scale factor of TOC entries"""
|
||||
|
||||
toc_frames = 0
|
||||
"""Number of frames per table entry"""
|
||||
|
||||
toc = []
|
||||
"""TOC"""
|
||||
|
||||
def __init__(self, fileobj):
|
||||
"""Reads the VBRI header or raises VBRIHeaderError.
|
||||
|
||||
The file position is undefined after this returns
|
||||
"""
|
||||
|
||||
data = fileobj.read(26)
|
||||
if len(data) != 26 or not data.startswith(b"VBRI"):
|
||||
raise VBRIHeaderError("Not a VBRI header")
|
||||
|
||||
offset = 4
|
||||
self.version, offset = cdata.uint16_be_from(data, offset)
|
||||
if self.version != 1:
|
||||
raise VBRIHeaderError(
|
||||
"Unsupported header version: %r" % self.version)
|
||||
|
||||
offset += 2 # float16.. can't do
|
||||
self.quality, offset = cdata.uint16_be_from(data, offset)
|
||||
self.bytes, offset = cdata.uint32_be_from(data, offset)
|
||||
self.frames, offset = cdata.uint32_be_from(data, offset)
|
||||
|
||||
toc_num_entries, offset = cdata.uint16_be_from(data, offset)
|
||||
self.toc_scale_factor, offset = cdata.uint16_be_from(data, offset)
|
||||
toc_entry_size, offset = cdata.uint16_be_from(data, offset)
|
||||
self.toc_frames, offset = cdata.uint16_be_from(data, offset)
|
||||
toc_size = toc_entry_size * toc_num_entries
|
||||
toc_data = fileobj.read(toc_size)
|
||||
if len(toc_data) != toc_size:
|
||||
raise VBRIHeaderError("VBRI header truncated")
|
||||
|
||||
self.toc = []
|
||||
if toc_entry_size == 2:
|
||||
unpack = partial(cdata.uint16_be_from, toc_data)
|
||||
elif toc_entry_size == 4:
|
||||
unpack = partial(cdata.uint32_be_from, toc_data)
|
||||
else:
|
||||
raise VBRIHeaderError("Invalid TOC entry size")
|
||||
|
||||
self.toc = [unpack(i)[0] for i in xrange(0, toc_size, toc_entry_size)]
|
||||
|
||||
@classmethod
|
||||
def get_offset(cls, info):
|
||||
"""Offset in bytes from the start of the MPEG header including sync"""
|
||||
|
||||
assert info.layer == 3
|
||||
|
||||
return 36
|
|
@ -1,101 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2005 Michael Urman
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
|
||||
class PaddingInfo(object):
|
||||
"""Abstract padding information object.
|
||||
|
||||
This will be passed to the callback function that can be used
|
||||
for saving tags.
|
||||
|
||||
::
|
||||
|
||||
def my_callback(info: PaddingInfo):
|
||||
return info.get_default_padding()
|
||||
|
||||
The callback should return the amount of padding to use (>= 0) based on
|
||||
the content size and the padding of the file after saving. The actual used
|
||||
amount of padding might vary depending on the file format (due to
|
||||
alignment etc.)
|
||||
|
||||
The default implementation can be accessed using the
|
||||
:meth:`get_default_padding` method in the callback.
|
||||
"""
|
||||
|
||||
padding = 0
|
||||
"""The amount of padding left after saving in bytes (can be negative if
|
||||
more data needs to be added as padding is available)
|
||||
"""
|
||||
|
||||
size = 0
|
||||
"""The amount of data following the padding"""
|
||||
|
||||
def __init__(self, padding, size):
|
||||
self.padding = padding
|
||||
self.size = size
|
||||
|
||||
def get_default_padding(self):
|
||||
"""The default implementation which tries to select a reasonable
|
||||
amount of padding and which might change in future versions.
|
||||
|
||||
:return: Amount of padding after saving
|
||||
:rtype: int
|
||||
"""
|
||||
|
||||
high = 1024 * 10 + self.size // 100 # 10 KiB + 1% of trailing data
|
||||
low = 1024 + self.size // 1000 # 1 KiB + 0.1% of trailing data
|
||||
|
||||
if self.padding >= 0:
|
||||
# enough padding left
|
||||
if self.padding > high:
|
||||
# padding too large, reduce
|
||||
return low
|
||||
# just use existing padding as is
|
||||
return self.padding
|
||||
else:
|
||||
# not enough padding, add some
|
||||
return low
|
||||
|
||||
def _get_padding(self, user_func):
|
||||
if user_func is None:
|
||||
return self.get_default_padding()
|
||||
else:
|
||||
return user_func(self)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s size=%d padding=%d>" % (
|
||||
type(self).__name__, self.size, self.padding)
|
||||
|
||||
|
||||
class Metadata(object):
|
||||
"""An abstract dict-like object.
|
||||
|
||||
Metadata is the base class for many of the tag objects in Mutagen.
|
||||
"""
|
||||
|
||||
__module__ = "mutagen"
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if args or kwargs:
|
||||
self.load(*args, **kwargs)
|
||||
|
||||
def load(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def save(self, filename=None):
|
||||
"""Save changes to a file."""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def delete(self, filename=None):
|
||||
"""Remove tags from a file.
|
||||
|
||||
In most cases this means any traces of the tag will be removed
|
||||
from the file.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
|
@ -1,231 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2015 Christoph Reiter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import locale
|
||||
import contextlib
|
||||
import optparse
|
||||
import ctypes
|
||||
|
||||
from ._compat import text_type, PY2, PY3, iterbytes
|
||||
|
||||
|
||||
def split_escape(string, sep, maxsplit=None, escape_char="\\"):
|
||||
"""Like unicode/str/bytes.split but allows for the separator to be escaped
|
||||
|
||||
If passed unicode/str/bytes will only return list of unicode/str/bytes.
|
||||
"""
|
||||
|
||||
assert len(sep) == 1
|
||||
assert len(escape_char) == 1
|
||||
|
||||
if isinstance(string, bytes):
|
||||
if isinstance(escape_char, text_type):
|
||||
escape_char = escape_char.encode("ascii")
|
||||
iter_ = iterbytes
|
||||
else:
|
||||
iter_ = iter
|
||||
|
||||
if maxsplit is None:
|
||||
maxsplit = len(string)
|
||||
|
||||
empty = string[:0]
|
||||
result = []
|
||||
current = empty
|
||||
escaped = False
|
||||
for char in iter_(string):
|
||||
if escaped:
|
||||
if char != escape_char and char != sep:
|
||||
current += escape_char
|
||||
current += char
|
||||
escaped = False
|
||||
else:
|
||||
if char == escape_char:
|
||||
escaped = True
|
||||
elif char == sep and len(result) < maxsplit:
|
||||
result.append(current)
|
||||
current = empty
|
||||
else:
|
||||
current += char
|
||||
result.append(current)
|
||||
return result
|
||||
|
||||
|
||||
class SignalHandler(object):
|
||||
|
||||
def __init__(self):
|
||||
self._interrupted = False
|
||||
self._nosig = False
|
||||
self._init = False
|
||||
|
||||
def init(self):
|
||||
signal.signal(signal.SIGINT, self._handler)
|
||||
signal.signal(signal.SIGTERM, self._handler)
|
||||
if os.name != "nt":
|
||||
signal.signal(signal.SIGHUP, self._handler)
|
||||
|
||||
def _handler(self, signum, frame):
|
||||
self._interrupted = True
|
||||
if not self._nosig:
|
||||
raise SystemExit("Aborted...")
|
||||
|
||||
@contextlib.contextmanager
|
||||
def block(self):
|
||||
"""While this context manager is active any signals for aborting
|
||||
the process will be queued and exit the program once the context
|
||||
is left.
|
||||
"""
|
||||
|
||||
self._nosig = True
|
||||
yield
|
||||
self._nosig = False
|
||||
if self._interrupted:
|
||||
raise SystemExit("Aborted...")
|
||||
|
||||
|
||||
def get_win32_unicode_argv():
|
||||
"""Returns a unicode argv under Windows and standard sys.argv otherwise"""
|
||||
|
||||
if os.name != "nt" or not PY2:
|
||||
return sys.argv
|
||||
|
||||
import ctypes
|
||||
from ctypes import cdll, windll, wintypes
|
||||
|
||||
GetCommandLineW = cdll.kernel32.GetCommandLineW
|
||||
GetCommandLineW.argtypes = []
|
||||
GetCommandLineW.restype = wintypes.LPCWSTR
|
||||
|
||||
CommandLineToArgvW = windll.shell32.CommandLineToArgvW
|
||||
CommandLineToArgvW.argtypes = [
|
||||
wintypes.LPCWSTR, ctypes.POINTER(ctypes.c_int)]
|
||||
CommandLineToArgvW.restype = ctypes.POINTER(wintypes.LPWSTR)
|
||||
|
||||
LocalFree = windll.kernel32.LocalFree
|
||||
LocalFree.argtypes = [wintypes.HLOCAL]
|
||||
LocalFree.restype = wintypes.HLOCAL
|
||||
|
||||
argc = ctypes.c_int()
|
||||
argv = CommandLineToArgvW(GetCommandLineW(), ctypes.byref(argc))
|
||||
if not argv:
|
||||
return
|
||||
|
||||
res = argv[max(0, argc.value - len(sys.argv)):argc.value]
|
||||
|
||||
LocalFree(argv)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def fsencoding():
|
||||
"""The encoding used for paths, argv, environ, stdout and stdin"""
|
||||
|
||||
if os.name == "nt":
|
||||
return ""
|
||||
|
||||
return locale.getpreferredencoding() or "utf-8"
|
||||
|
||||
|
||||
def fsnative(text=u""):
|
||||
"""Returns the passed text converted to the preferred path type
|
||||
for each platform.
|
||||
"""
|
||||
|
||||
assert isinstance(text, text_type)
|
||||
|
||||
if os.name == "nt" or PY3:
|
||||
return text
|
||||
else:
|
||||
return text.encode(fsencoding(), "replace")
|
||||
return text
|
||||
|
||||
|
||||
def is_fsnative(arg):
|
||||
"""If the passed value is of the preferred path type for each platform.
|
||||
Note that on Python3+linux, paths can be bytes or str but this returns
|
||||
False for bytes there.
|
||||
"""
|
||||
|
||||
if PY3 or os.name == "nt":
|
||||
return isinstance(arg, text_type)
|
||||
else:
|
||||
return isinstance(arg, bytes)
|
||||
|
||||
|
||||
def print_(*objects, **kwargs):
|
||||
"""A print which supports bytes and str+surrogates under python3.
|
||||
|
||||
Needed so we can print anything passed to us through argv and environ.
|
||||
Under Windows only text_type is allowed.
|
||||
|
||||
Arguments:
|
||||
objects: one or more bytes/text
|
||||
linesep (bool): whether a line separator should be appended
|
||||
sep (bool): whether objects should be printed separated by spaces
|
||||
"""
|
||||
|
||||
linesep = kwargs.pop("linesep", True)
|
||||
sep = kwargs.pop("sep", True)
|
||||
file_ = kwargs.pop("file", None)
|
||||
if file_ is None:
|
||||
file_ = sys.stdout
|
||||
|
||||
old_cp = None
|
||||
if os.name == "nt":
|
||||
# Try to force the output to cp65001 aka utf-8.
|
||||
# If that fails use the current one (most likely cp850, so
|
||||
# most of unicode will be replaced with '?')
|
||||
encoding = "utf-8"
|
||||
old_cp = ctypes.windll.kernel32.GetConsoleOutputCP()
|
||||
if ctypes.windll.kernel32.SetConsoleOutputCP(65001) == 0:
|
||||
encoding = getattr(sys.stdout, "encoding", None) or "utf-8"
|
||||
old_cp = None
|
||||
else:
|
||||
encoding = fsencoding()
|
||||
|
||||
try:
|
||||
if linesep:
|
||||
objects = list(objects) + [os.linesep]
|
||||
|
||||
parts = []
|
||||
for text in objects:
|
||||
if isinstance(text, text_type):
|
||||
if PY3:
|
||||
try:
|
||||
text = text.encode(encoding, 'surrogateescape')
|
||||
except UnicodeEncodeError:
|
||||
text = text.encode(encoding, 'replace')
|
||||
else:
|
||||
text = text.encode(encoding, 'replace')
|
||||
parts.append(text)
|
||||
|
||||
data = (b" " if sep else b"").join(parts)
|
||||
try:
|
||||
fileno = file_.fileno()
|
||||
except (AttributeError, OSError, ValueError):
|
||||
# for tests when stdout is replaced
|
||||
try:
|
||||
file_.write(data)
|
||||
except TypeError:
|
||||
file_.write(data.decode(encoding, "replace"))
|
||||
else:
|
||||
file_.flush()
|
||||
os.write(fileno, data)
|
||||
finally:
|
||||
# reset the code page to what we had before
|
||||
if old_cp is not None:
|
||||
ctypes.windll.kernel32.SetConsoleOutputCP(old_cp)
|
||||
|
||||
|
||||
class OptionParser(optparse.OptionParser):
|
||||
"""OptionParser subclass which supports printing Unicode under Windows"""
|
||||
|
||||
def print_help(self, file=None):
|
||||
print_(self.format_help(), file=file)
|
|
@ -1,550 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2006 Joe Wreschnig
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
"""Utility classes for Mutagen.
|
||||
|
||||
You should not rely on the interfaces here being stable. They are
|
||||
intended for internal use in Mutagen only.
|
||||
"""
|
||||
|
||||
import struct
|
||||
import codecs
|
||||
|
||||
from fnmatch import fnmatchcase
|
||||
|
||||
from ._compat import chr_, PY2, iteritems, iterbytes, integer_types, xrange, \
|
||||
izip
|
||||
|
||||
|
||||
class MutagenError(Exception):
|
||||
"""Base class for all custom exceptions in mutagen
|
||||
|
||||
.. versionadded:: 1.25
|
||||
"""
|
||||
|
||||
__module__ = "mutagen"
|
||||
|
||||
|
||||
def total_ordering(cls):
|
||||
assert "__eq__" in cls.__dict__
|
||||
assert "__lt__" in cls.__dict__
|
||||
|
||||
cls.__le__ = lambda self, other: self == other or self < other
|
||||
cls.__gt__ = lambda self, other: not (self == other or self < other)
|
||||
cls.__ge__ = lambda self, other: not self < other
|
||||
cls.__ne__ = lambda self, other: not self.__eq__(other)
|
||||
|
||||
return cls
|
||||
|
||||
|
||||
def hashable(cls):
|
||||
"""Makes sure the class is hashable.
|
||||
|
||||
Needs a working __eq__ and __hash__ and will add a __ne__.
|
||||
"""
|
||||
|
||||
# py2
|
||||
assert "__hash__" in cls.__dict__
|
||||
# py3
|
||||
assert cls.__dict__["__hash__"] is not None
|
||||
assert "__eq__" in cls.__dict__
|
||||
|
||||
cls.__ne__ = lambda self, other: not self.__eq__(other)
|
||||
|
||||
return cls
|
||||
|
||||
|
||||
def enum(cls):
|
||||
assert cls.__bases__ == (object,)
|
||||
|
||||
d = dict(cls.__dict__)
|
||||
new_type = type(cls.__name__, (int,), d)
|
||||
new_type.__module__ = cls.__module__
|
||||
|
||||
map_ = {}
|
||||
for key, value in iteritems(d):
|
||||
if key.upper() == key and isinstance(value, integer_types):
|
||||
value_instance = new_type(value)
|
||||
setattr(new_type, key, value_instance)
|
||||
map_[value] = key
|
||||
|
||||
def str_(self):
|
||||
if self in map_:
|
||||
return "%s.%s" % (type(self).__name__, map_[self])
|
||||
return "%d" % int(self)
|
||||
|
||||
def repr_(self):
|
||||
if self in map_:
|
||||
return "<%s.%s: %d>" % (type(self).__name__, map_[self], int(self))
|
||||
return "%d" % int(self)
|
||||
|
||||
setattr(new_type, "__repr__", repr_)
|
||||
setattr(new_type, "__str__", str_)
|
||||
|
||||
return new_type
|
||||
|
||||
|
||||
@total_ordering
|
||||
class DictMixin(object):
|
||||
"""Implement the dict API using keys() and __*item__ methods.
|
||||
|
||||
Similar to UserDict.DictMixin, this takes a class that defines
|
||||
__getitem__, __setitem__, __delitem__, and keys(), and turns it
|
||||
into a full dict-like object.
|
||||
|
||||
UserDict.DictMixin is not suitable for this purpose because it's
|
||||
an old-style class.
|
||||
|
||||
This class is not optimized for very large dictionaries; many
|
||||
functions have linear memory requirements. I recommend you
|
||||
override some of these functions if speed is required.
|
||||
"""
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.keys())
|
||||
|
||||
def __has_key(self, key):
|
||||
try:
|
||||
self[key]
|
||||
except KeyError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
if PY2:
|
||||
has_key = __has_key
|
||||
|
||||
__contains__ = __has_key
|
||||
|
||||
if PY2:
|
||||
iterkeys = lambda self: iter(self.keys())
|
||||
|
||||
def values(self):
|
||||
return [self[k] for k in self.keys()]
|
||||
|
||||
if PY2:
|
||||
itervalues = lambda self: iter(self.values())
|
||||
|
||||
def items(self):
|
||||
return list(izip(self.keys(), self.values()))
|
||||
|
||||
if PY2:
|
||||
iteritems = lambda s: iter(s.items())
|
||||
|
||||
def clear(self):
|
||||
for key in list(self.keys()):
|
||||
self.__delitem__(key)
|
||||
|
||||
def pop(self, key, *args):
|
||||
if len(args) > 1:
|
||||
raise TypeError("pop takes at most two arguments")
|
||||
try:
|
||||
value = self[key]
|
||||
except KeyError:
|
||||
if args:
|
||||
return args[0]
|
||||
else:
|
||||
raise
|
||||
del(self[key])
|
||||
return value
|
||||
|
||||
def popitem(self):
|
||||
for key in self.keys():
|
||||
break
|
||||
else:
|
||||
raise KeyError("dictionary is empty")
|
||||
return key, self.pop(key)
|
||||
|
||||
def update(self, other=None, **kwargs):
|
||||
if other is None:
|
||||
self.update(kwargs)
|
||||
other = {}
|
||||
|
||||
try:
|
||||
for key, value in other.items():
|
||||
self.__setitem__(key, value)
|
||||
except AttributeError:
|
||||
for key, value in other:
|
||||
self[key] = value
|
||||
|
||||
def setdefault(self, key, default=None):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
self[key] = default
|
||||
return default
|
||||
|
||||
def get(self, key, default=None):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def __repr__(self):
|
||||
return repr(dict(self.items()))
|
||||
|
||||
def __eq__(self, other):
|
||||
return dict(self.items()) == other
|
||||
|
||||
def __lt__(self, other):
|
||||
return dict(self.items()) < other
|
||||
|
||||
__hash__ = object.__hash__
|
||||
|
||||
def __len__(self):
|
||||
return len(self.keys())
|
||||
|
||||
|
||||
class DictProxy(DictMixin):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.__dict = {}
|
||||
super(DictProxy, self).__init__(*args, **kwargs)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.__dict[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.__dict[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
del(self.__dict[key])
|
||||
|
||||
def keys(self):
|
||||
return self.__dict.keys()
|
||||
|
||||
|
||||
def _fill_cdata(cls):
|
||||
"""Add struct pack/unpack functions"""
|
||||
|
||||
funcs = {}
|
||||
for key, name in [("b", "char"), ("h", "short"),
|
||||
("i", "int"), ("q", "longlong")]:
|
||||
for echar, esuffix in [("<", "le"), (">", "be")]:
|
||||
esuffix = "_" + esuffix
|
||||
for unsigned in [True, False]:
|
||||
s = struct.Struct(echar + (key.upper() if unsigned else key))
|
||||
get_wrapper = lambda f: lambda *a, **k: f(*a, **k)[0]
|
||||
unpack = get_wrapper(s.unpack)
|
||||
unpack_from = get_wrapper(s.unpack_from)
|
||||
|
||||
def get_unpack_from(s):
|
||||
def unpack_from(data, offset=0):
|
||||
return s.unpack_from(data, offset)[0], offset + s.size
|
||||
return unpack_from
|
||||
|
||||
unpack_from = get_unpack_from(s)
|
||||
pack = s.pack
|
||||
|
||||
prefix = "u" if unsigned else ""
|
||||
if s.size == 1:
|
||||
esuffix = ""
|
||||
bits = str(s.size * 8)
|
||||
funcs["%s%s%s" % (prefix, name, esuffix)] = unpack
|
||||
funcs["%sint%s%s" % (prefix, bits, esuffix)] = unpack
|
||||
funcs["%s%s%s_from" % (prefix, name, esuffix)] = unpack_from
|
||||
funcs["%sint%s%s_from" % (prefix, bits, esuffix)] = unpack_from
|
||||
funcs["to_%s%s%s" % (prefix, name, esuffix)] = pack
|
||||
funcs["to_%sint%s%s" % (prefix, bits, esuffix)] = pack
|
||||
|
||||
for key, func in iteritems(funcs):
|
||||
setattr(cls, key, staticmethod(func))
|
||||
|
||||
|
||||
class cdata(object):
|
||||
"""C character buffer to Python numeric type conversions.
|
||||
|
||||
For each size/sign/endianness:
|
||||
uint32_le(data)/to_uint32_le(num)/uint32_le_from(data, offset=0)
|
||||
"""
|
||||
|
||||
from struct import error
|
||||
error = error
|
||||
|
||||
bitswap = b''.join(
|
||||
chr_(sum(((val >> i) & 1) << (7 - i) for i in xrange(8)))
|
||||
for val in xrange(256))
|
||||
|
||||
test_bit = staticmethod(lambda value, n: bool((value >> n) & 1))
|
||||
|
||||
|
||||
_fill_cdata(cdata)
|
||||
|
||||
|
||||
def get_size(fileobj):
|
||||
"""Returns the size of the file object. The position when passed in will
|
||||
be preserved if no error occurs.
|
||||
|
||||
In case of an error raises IOError.
|
||||
"""
|
||||
|
||||
old_pos = fileobj.tell()
|
||||
try:
|
||||
fileobj.seek(0, 2)
|
||||
return fileobj.tell()
|
||||
finally:
|
||||
fileobj.seek(old_pos, 0)
|
||||
|
||||
|
||||
def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
|
||||
"""Insert size bytes of empty space starting at offset.
|
||||
|
||||
fobj must be an open file object, open rb+ or
|
||||
equivalent. Mutagen tries to use mmap to resize the file, but
|
||||
falls back to a significantly slower method if mmap fails.
|
||||
"""
|
||||
|
||||
assert 0 < size
|
||||
assert 0 <= offset
|
||||
|
||||
fobj.seek(0, 2)
|
||||
filesize = fobj.tell()
|
||||
movesize = filesize - offset
|
||||
fobj.write(b'\x00' * size)
|
||||
fobj.flush()
|
||||
|
||||
try:
|
||||
import mmap
|
||||
file_map = mmap.mmap(fobj.fileno(), filesize + size)
|
||||
try:
|
||||
file_map.move(offset + size, offset, movesize)
|
||||
finally:
|
||||
file_map.close()
|
||||
except (ValueError, EnvironmentError, ImportError, AttributeError):
|
||||
# handle broken mmap scenarios, BytesIO()
|
||||
fobj.truncate(filesize)
|
||||
|
||||
fobj.seek(0, 2)
|
||||
padsize = size
|
||||
# Don't generate an enormous string if we need to pad
|
||||
# the file out several megs.
|
||||
while padsize:
|
||||
addsize = min(BUFFER_SIZE, padsize)
|
||||
fobj.write(b"\x00" * addsize)
|
||||
padsize -= addsize
|
||||
|
||||
fobj.seek(filesize, 0)
|
||||
while movesize:
|
||||
# At the start of this loop, fobj is pointing at the end
|
||||
# of the data we need to move, which is of movesize length.
|
||||
thismove = min(BUFFER_SIZE, movesize)
|
||||
# Seek back however much we're going to read this frame.
|
||||
fobj.seek(-thismove, 1)
|
||||
nextpos = fobj.tell()
|
||||
# Read it, so we're back at the end.
|
||||
data = fobj.read(thismove)
|
||||
# Seek back to where we need to write it.
|
||||
fobj.seek(-thismove + size, 1)
|
||||
# Write it.
|
||||
fobj.write(data)
|
||||
# And seek back to the end of the unmoved data.
|
||||
fobj.seek(nextpos)
|
||||
movesize -= thismove
|
||||
|
||||
fobj.flush()
|
||||
|
||||
|
||||
def delete_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16):
|
||||
"""Delete size bytes of empty space starting at offset.
|
||||
|
||||
fobj must be an open file object, open rb+ or
|
||||
equivalent. Mutagen tries to use mmap to resize the file, but
|
||||
falls back to a significantly slower method if mmap fails.
|
||||
"""
|
||||
|
||||
assert 0 < size
|
||||
assert 0 <= offset
|
||||
|
||||
fobj.seek(0, 2)
|
||||
filesize = fobj.tell()
|
||||
movesize = filesize - offset - size
|
||||
assert 0 <= movesize
|
||||
|
||||
if movesize > 0:
|
||||
fobj.flush()
|
||||
try:
|
||||
import mmap
|
||||
file_map = mmap.mmap(fobj.fileno(), filesize)
|
||||
try:
|
||||
file_map.move(offset, offset + size, movesize)
|
||||
finally:
|
||||
file_map.close()
|
||||
except (ValueError, EnvironmentError, ImportError, AttributeError):
|
||||
# handle broken mmap scenarios, BytesIO()
|
||||
fobj.seek(offset + size)
|
||||
buf = fobj.read(BUFFER_SIZE)
|
||||
while buf:
|
||||
fobj.seek(offset)
|
||||
fobj.write(buf)
|
||||
offset += len(buf)
|
||||
fobj.seek(offset + size)
|
||||
buf = fobj.read(BUFFER_SIZE)
|
||||
fobj.truncate(filesize - size)
|
||||
fobj.flush()
|
||||
|
||||
|
||||
def resize_bytes(fobj, old_size, new_size, offset):
|
||||
"""Resize an area in a file adding and deleting at the end of it.
|
||||
Does nothing if no resizing is needed.
|
||||
"""
|
||||
|
||||
if new_size < old_size:
|
||||
delete_size = old_size - new_size
|
||||
delete_at = offset + new_size
|
||||
delete_bytes(fobj, delete_size, delete_at)
|
||||
elif new_size > old_size:
|
||||
insert_size = new_size - old_size
|
||||
insert_at = offset + old_size
|
||||
insert_bytes(fobj, insert_size, insert_at)
|
||||
|
||||
|
||||
def dict_match(d, key, default=None):
|
||||
"""Like __getitem__ but works as if the keys() are all filename patterns.
|
||||
Returns the value of any dict key that matches the passed key.
|
||||
"""
|
||||
|
||||
if key in d and "[" not in key:
|
||||
return d[key]
|
||||
else:
|
||||
for pattern, value in iteritems(d):
|
||||
if fnmatchcase(key, pattern):
|
||||
return value
|
||||
return default
|
||||
|
||||
|
||||
def decode_terminated(data, encoding, strict=True):
|
||||
"""Returns the decoded data until the first NULL terminator
|
||||
and all data after it.
|
||||
|
||||
In case the data can't be decoded raises UnicodeError.
|
||||
In case the encoding is not found raises LookupError.
|
||||
In case the data isn't null terminated (even if it is encoded correctly)
|
||||
raises ValueError except if strict is False, then the decoded string
|
||||
will be returned anyway.
|
||||
"""
|
||||
|
||||
codec_info = codecs.lookup(encoding)
|
||||
|
||||
# normalize encoding name so we can compare by name
|
||||
encoding = codec_info.name
|
||||
|
||||
# fast path
|
||||
if encoding in ("utf-8", "iso8859-1"):
|
||||
index = data.find(b"\x00")
|
||||
if index == -1:
|
||||
# make sure we raise UnicodeError first, like in the slow path
|
||||
res = data.decode(encoding), b""
|
||||
if strict:
|
||||
raise ValueError("not null terminated")
|
||||
else:
|
||||
return res
|
||||
return data[:index].decode(encoding), data[index + 1:]
|
||||
|
||||
# slow path
|
||||
decoder = codec_info.incrementaldecoder()
|
||||
r = []
|
||||
for i, b in enumerate(iterbytes(data)):
|
||||
c = decoder.decode(b)
|
||||
if c == u"\x00":
|
||||
return u"".join(r), data[i + 1:]
|
||||
r.append(c)
|
||||
else:
|
||||
# make sure the decoder is finished
|
||||
r.append(decoder.decode(b"", True))
|
||||
if strict:
|
||||
raise ValueError("not null terminated")
|
||||
return u"".join(r), b""
|
||||
|
||||
|
||||
class BitReaderError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BitReader(object):
|
||||
|
||||
def __init__(self, fileobj):
|
||||
self._fileobj = fileobj
|
||||
self._buffer = 0
|
||||
self._bits = 0
|
||||
self._pos = fileobj.tell()
|
||||
|
||||
def bits(self, count):
|
||||
"""Reads `count` bits and returns an uint, MSB read first.
|
||||
|
||||
May raise BitReaderError if not enough data could be read or
|
||||
IOError by the underlying file object.
|
||||
"""
|
||||
|
||||
if count < 0:
|
||||
raise ValueError
|
||||
|
||||
if count > self._bits:
|
||||
n_bytes = (count - self._bits + 7) // 8
|
||||
data = self._fileobj.read(n_bytes)
|
||||
if len(data) != n_bytes:
|
||||
raise BitReaderError("not enough data")
|
||||
for b in bytearray(data):
|
||||
self._buffer = (self._buffer << 8) | b
|
||||
self._bits += n_bytes * 8
|
||||
|
||||
self._bits -= count
|
||||
value = self._buffer >> self._bits
|
||||
self._buffer &= (1 << self._bits) - 1
|
||||
assert self._bits < 8
|
||||
return value
|
||||
|
||||
def bytes(self, count):
|
||||
"""Returns a bytearray of length `count`. Works unaligned."""
|
||||
|
||||
if count < 0:
|
||||
raise ValueError
|
||||
|
||||
# fast path
|
||||
if self._bits == 0:
|
||||
data = self._fileobj.read(count)
|
||||
if len(data) != count:
|
||||
raise BitReaderError("not enough data")
|
||||
return data
|
||||
|
||||
return bytes(bytearray(self.bits(8) for _ in xrange(count)))
|
||||
|
||||
def skip(self, count):
|
||||
"""Skip `count` bits.
|
||||
|
||||
Might raise BitReaderError if there wasn't enough data to skip,
|
||||
but might also fail on the next bits() instead.
|
||||
"""
|
||||
|
||||
if count < 0:
|
||||
raise ValueError
|
||||
|
||||
if count <= self._bits:
|
||||
self.bits(count)
|
||||
else:
|
||||
count -= self.align()
|
||||
n_bytes = count // 8
|
||||
self._fileobj.seek(n_bytes, 1)
|
||||
count -= n_bytes * 8
|
||||
self.bits(count)
|
||||
|
||||
def get_position(self):
|
||||
"""Returns the amount of bits read or skipped so far"""
|
||||
|
||||
return (self._fileobj.tell() - self._pos) * 8 - self._bits
|
||||
|
||||
def align(self):
|
||||
"""Align to the next byte, returns the amount of bits skipped"""
|
||||
|
||||
bits = self._bits
|
||||
self._buffer = 0
|
||||
self._bits = 0
|
||||
return bits
|
||||
|
||||
def is_aligned(self):
|
||||
"""If we are currently aligned to bytes and nothing is buffered"""
|
||||
|
||||
return self._bits == 0
|
|
@ -1,330 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2005-2006 Joe Wreschnig
|
||||
# 2013 Christoph Reiter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
"""Read and write Vorbis comment data.
|
||||
|
||||
Vorbis comments are freeform key/value pairs; keys are
|
||||
case-insensitive ASCII and values are Unicode strings. A key may have
|
||||
multiple values.
|
||||
|
||||
The specification is at http://www.xiph.org/vorbis/doc/v-comment.html.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
import mutagen
|
||||
from ._compat import reraise, BytesIO, text_type, xrange, PY3, PY2
|
||||
from mutagen._util import DictMixin, cdata
|
||||
|
||||
|
||||
def is_valid_key(key):
|
||||
"""Return true if a string is a valid Vorbis comment key.
|
||||
|
||||
Valid Vorbis comment keys are printable ASCII between 0x20 (space)
|
||||
and 0x7D ('}'), excluding '='.
|
||||
|
||||
Takes str/unicode in Python 2, unicode in Python 3
|
||||
"""
|
||||
|
||||
if PY3 and isinstance(key, bytes):
|
||||
raise TypeError("needs to be str not bytes")
|
||||
|
||||
for c in key:
|
||||
if c < " " or c > "}" or c == "=":
|
||||
return False
|
||||
else:
|
||||
return bool(key)
|
||||
|
||||
|
||||
istag = is_valid_key
|
||||
|
||||
|
||||
class error(IOError):
|
||||
pass
|
||||
|
||||
|
||||
class VorbisUnsetFrameError(error):
|
||||
pass
|
||||
|
||||
|
||||
class VorbisEncodingError(error):
|
||||
pass
|
||||
|
||||
|
||||
class VComment(mutagen.Metadata, list):
|
||||
"""A Vorbis comment parser, accessor, and renderer.
|
||||
|
||||
All comment ordering is preserved. A VComment is a list of
|
||||
key/value pairs, and so any Python list method can be used on it.
|
||||
|
||||
Vorbis comments are always wrapped in something like an Ogg Vorbis
|
||||
bitstream or a FLAC metadata block, so this loads string data or a
|
||||
file-like object, not a filename.
|
||||
|
||||
Attributes:
|
||||
|
||||
* vendor -- the stream 'vendor' (i.e. writer); default 'Mutagen'
|
||||
"""
|
||||
|
||||
vendor = u"Mutagen " + mutagen.version_string
|
||||
|
||||
def __init__(self, data=None, *args, **kwargs):
|
||||
self._size = 0
|
||||
# Collect the args to pass to load, this lets child classes
|
||||
# override just load and get equivalent magic for the
|
||||
# constructor.
|
||||
if data is not None:
|
||||
if isinstance(data, bytes):
|
||||
data = BytesIO(data)
|
||||
elif not hasattr(data, 'read'):
|
||||
raise TypeError("VComment requires bytes or a file-like")
|
||||
start = data.tell()
|
||||
self.load(data, *args, **kwargs)
|
||||
self._size = data.tell() - start
|
||||
|
||||
def load(self, fileobj, errors='replace', framing=True):
|
||||
"""Parse a Vorbis comment from a file-like object.
|
||||
|
||||
Keyword arguments:
|
||||
|
||||
* errors:
|
||||
'strict', 'replace', or 'ignore'. This affects Unicode decoding
|
||||
and how other malformed content is interpreted.
|
||||
* framing -- if true, fail if a framing bit is not present
|
||||
|
||||
Framing bits are required by the Vorbis comment specification,
|
||||
but are not used in FLAC Vorbis comment blocks.
|
||||
"""
|
||||
|
||||
try:
|
||||
vendor_length = cdata.uint_le(fileobj.read(4))
|
||||
self.vendor = fileobj.read(vendor_length).decode('utf-8', errors)
|
||||
count = cdata.uint_le(fileobj.read(4))
|
||||
for i in xrange(count):
|
||||
length = cdata.uint_le(fileobj.read(4))
|
||||
try:
|
||||
string = fileobj.read(length).decode('utf-8', errors)
|
||||
except (OverflowError, MemoryError):
|
||||
raise error("cannot read %d bytes, too large" % length)
|
||||
try:
|
||||
tag, value = string.split('=', 1)
|
||||
except ValueError as err:
|
||||
if errors == "ignore":
|
||||
continue
|
||||
elif errors == "replace":
|
||||
tag, value = u"unknown%d" % i, string
|
||||
else:
|
||||
reraise(VorbisEncodingError, err, sys.exc_info()[2])
|
||||
try:
|
||||
tag = tag.encode('ascii', errors)
|
||||
except UnicodeEncodeError:
|
||||
raise VorbisEncodingError("invalid tag name %r" % tag)
|
||||
else:
|
||||
# string keys in py3k
|
||||
if PY3:
|
||||
tag = tag.decode("ascii")
|
||||
if is_valid_key(tag):
|
||||
self.append((tag, value))
|
||||
|
||||
if framing and not bytearray(fileobj.read(1))[0] & 0x01:
|
||||
raise VorbisUnsetFrameError("framing bit was unset")
|
||||
except (cdata.error, TypeError):
|
||||
raise error("file is not a valid Vorbis comment")
|
||||
|
||||
def validate(self):
|
||||
"""Validate keys and values.
|
||||
|
||||
Check to make sure every key used is a valid Vorbis key, and
|
||||
that every value used is a valid Unicode or UTF-8 string. If
|
||||
any invalid keys or values are found, a ValueError is raised.
|
||||
|
||||
In Python 3 all keys and values have to be a string.
|
||||
"""
|
||||
|
||||
if not isinstance(self.vendor, text_type):
|
||||
if PY3:
|
||||
raise ValueError("vendor needs to be str")
|
||||
|
||||
try:
|
||||
self.vendor.decode('utf-8')
|
||||
except UnicodeDecodeError:
|
||||
raise ValueError
|
||||
|
||||
for key, value in self:
|
||||
try:
|
||||
if not is_valid_key(key):
|
||||
raise ValueError
|
||||
except TypeError:
|
||||
raise ValueError("%r is not a valid key" % key)
|
||||
|
||||
if not isinstance(value, text_type):
|
||||
if PY3:
|
||||
raise ValueError("%r needs to be str" % key)
|
||||
|
||||
try:
|
||||
value.decode("utf-8")
|
||||
except:
|
||||
raise ValueError("%r is not a valid value" % value)
|
||||
|
||||
return True
|
||||
|
||||
def clear(self):
|
||||
"""Clear all keys from the comment."""
|
||||
|
||||
for i in list(self):
|
||||
self.remove(i)
|
||||
|
||||
def write(self, framing=True):
|
||||
"""Return a string representation of the data.
|
||||
|
||||
Validation is always performed, so calling this function on
|
||||
invalid data may raise a ValueError.
|
||||
|
||||
Keyword arguments:
|
||||
|
||||
* framing -- if true, append a framing bit (see load)
|
||||
"""
|
||||
|
||||
self.validate()
|
||||
|
||||
def _encode(value):
|
||||
if not isinstance(value, bytes):
|
||||
return value.encode('utf-8')
|
||||
return value
|
||||
|
||||
f = BytesIO()
|
||||
vendor = _encode(self.vendor)
|
||||
f.write(cdata.to_uint_le(len(vendor)))
|
||||
f.write(vendor)
|
||||
f.write(cdata.to_uint_le(len(self)))
|
||||
for tag, value in self:
|
||||
tag = _encode(tag)
|
||||
value = _encode(value)
|
||||
comment = tag + b"=" + value
|
||||
f.write(cdata.to_uint_le(len(comment)))
|
||||
f.write(comment)
|
||||
if framing:
|
||||
f.write(b"\x01")
|
||||
return f.getvalue()
|
||||
|
||||
def pprint(self):
|
||||
|
||||
def _decode(value):
|
||||
if not isinstance(value, text_type):
|
||||
return value.decode('utf-8', 'replace')
|
||||
return value
|
||||
|
||||
tags = [u"%s=%s" % (_decode(k), _decode(v)) for k, v in self]
|
||||
return u"\n".join(tags)
|
||||
|
||||
|
||||
class VCommentDict(VComment, DictMixin):
|
||||
"""A VComment that looks like a dictionary.
|
||||
|
||||
This object differs from a dictionary in two ways. First,
|
||||
len(comment) will still return the number of values, not the
|
||||
number of keys. Secondly, iterating through the object will
|
||||
iterate over (key, value) pairs, not keys. Since a key may have
|
||||
multiple values, the same value may appear multiple times while
|
||||
iterating.
|
||||
|
||||
Since Vorbis comment keys are case-insensitive, all keys are
|
||||
normalized to lowercase ASCII.
|
||||
"""
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""A list of values for the key.
|
||||
|
||||
This is a copy, so comment['title'].append('a title') will not
|
||||
work.
|
||||
"""
|
||||
|
||||
# PY3 only
|
||||
if isinstance(key, slice):
|
||||
return VComment.__getitem__(self, key)
|
||||
|
||||
if not is_valid_key(key):
|
||||
raise ValueError
|
||||
|
||||
key = key.lower()
|
||||
|
||||
values = [value for (k, value) in self if k.lower() == key]
|
||||
if not values:
|
||||
raise KeyError(key)
|
||||
else:
|
||||
return values
|
||||
|
||||
def __delitem__(self, key):
|
||||
"""Delete all values associated with the key."""
|
||||
|
||||
# PY3 only
|
||||
if isinstance(key, slice):
|
||||
return VComment.__delitem__(self, key)
|
||||
|
||||
if not is_valid_key(key):
|
||||
raise ValueError
|
||||
|
||||
key = key.lower()
|
||||
to_delete = [x for x in self if x[0].lower() == key]
|
||||
if not to_delete:
|
||||
raise KeyError(key)
|
||||
else:
|
||||
for item in to_delete:
|
||||
self.remove(item)
|
||||
|
||||
def __contains__(self, key):
|
||||
"""Return true if the key has any values."""
|
||||
|
||||
if not is_valid_key(key):
|
||||
raise ValueError
|
||||
|
||||
key = key.lower()
|
||||
for k, value in self:
|
||||
if k.lower() == key:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def __setitem__(self, key, values):
|
||||
"""Set a key's value or values.
|
||||
|
||||
Setting a value overwrites all old ones. The value may be a
|
||||
list of Unicode or UTF-8 strings, or a single Unicode or UTF-8
|
||||
string.
|
||||
"""
|
||||
|
||||
# PY3 only
|
||||
if isinstance(key, slice):
|
||||
return VComment.__setitem__(self, key, values)
|
||||
|
||||
if not is_valid_key(key):
|
||||
raise ValueError
|
||||
|
||||
if not isinstance(values, list):
|
||||
values = [values]
|
||||
try:
|
||||
del(self[key])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if PY2:
|
||||
key = key.encode('ascii')
|
||||
|
||||
for value in values:
|
||||
self.append((key, value))
|
||||
|
||||
def keys(self):
|
||||
"""Return all keys in the comment."""
|
||||
|
||||
return list(set([k.lower() for k, v in self]))
|
||||
|
||||
def as_dict(self):
|
||||
"""Return a copy of the comment data in a real dict."""
|
||||
|
||||
return dict([(key, self[key]) for key in self.keys()])
|
|
@ -1,410 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2014 Christoph Reiter
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
"""
|
||||
* ADTS - Audio Data Transport Stream
|
||||
* ADIF - Audio Data Interchange Format
|
||||
* See ISO/IEC 13818-7 / 14496-03
|
||||
"""
|
||||
|
||||
from mutagen import StreamInfo
|
||||
from mutagen._file import FileType
|
||||
from mutagen._util import BitReader, BitReaderError, MutagenError
|
||||
from mutagen._compat import endswith, xrange
|
||||
|
||||
|
||||
_FREQS = [
|
||||
96000, 88200, 64000, 48000,
|
||||
44100, 32000, 24000, 22050,
|
||||
16000, 12000, 11025, 8000,
|
||||
7350,
|
||||
]
|
||||
|
||||
|
||||
class _ADTSStream(object):
|
||||
"""Represents a series of frames belonging to the same stream"""
|
||||
|
||||
parsed_frames = 0
|
||||
"""Number of successfully parsed frames"""
|
||||
|
||||
offset = 0
|
||||
"""offset in bytes at which the stream starts (the first sync word)"""
|
||||
|
||||
@classmethod
|
||||
def find_stream(cls, fileobj, max_bytes):
|
||||
"""Returns a possibly valid _ADTSStream or None.
|
||||
|
||||
Args:
|
||||
max_bytes (int): maximum bytes to read
|
||||
"""
|
||||
|
||||
r = BitReader(fileobj)
|
||||
stream = cls(r)
|
||||
if stream.sync(max_bytes):
|
||||
stream.offset = (r.get_position() - 12) // 8
|
||||
return stream
|
||||
|
||||
def sync(self, max_bytes):
|
||||
"""Find the next sync.
|
||||
Returns True if found."""
|
||||
|
||||
# at least 2 bytes for the sync
|
||||
max_bytes = max(max_bytes, 2)
|
||||
|
||||
r = self._r
|
||||
r.align()
|
||||
while max_bytes > 0:
|
||||
try:
|
||||
b = r.bytes(1)
|
||||
if b == b"\xff":
|
||||
if r.bits(4) == 0xf:
|
||||
return True
|
||||
r.align()
|
||||
max_bytes -= 2
|
||||
else:
|
||||
max_bytes -= 1
|
||||
except BitReaderError:
|
||||
return False
|
||||
return False
|
||||
|
||||
def __init__(self, r):
|
||||
"""Use _ADTSStream.find_stream to create a stream"""
|
||||
|
||||
self._fixed_header_key = None
|
||||
self._r = r
|
||||
self.offset = -1
|
||||
self.parsed_frames = 0
|
||||
|
||||
self._samples = 0
|
||||
self._payload = 0
|
||||
self._start = r.get_position() / 8
|
||||
self._last = self._start
|
||||
|
||||
@property
|
||||
def bitrate(self):
|
||||
"""Bitrate of the raw aac blocks, excluding framing/crc"""
|
||||
|
||||
assert self.parsed_frames, "no frame parsed yet"
|
||||
|
||||
if self._samples == 0:
|
||||
return 0
|
||||
|
||||
return (8 * self._payload * self.frequency) // self._samples
|
||||
|
||||
@property
|
||||
def samples(self):
|
||||
"""samples so far"""
|
||||
|
||||
assert self.parsed_frames, "no frame parsed yet"
|
||||
|
||||
return self._samples
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
"""bytes read in the stream so far (including framing)"""
|
||||
|
||||
assert self.parsed_frames, "no frame parsed yet"
|
||||
|
||||
return self._last - self._start
|
||||
|
||||
@property
|
||||
def channels(self):
|
||||
"""0 means unknown"""
|
||||
|
||||
assert self.parsed_frames, "no frame parsed yet"
|
||||
|
||||
b_index = self._fixed_header_key[6]
|
||||
if b_index == 7:
|
||||
return 8
|
||||
elif b_index > 7:
|
||||
return 0
|
||||
else:
|
||||
return b_index
|
||||
|
||||
@property
|
||||
def frequency(self):
|
||||
"""0 means unknown"""
|
||||
|
||||
assert self.parsed_frames, "no frame parsed yet"
|
||||
|
||||
f_index = self._fixed_header_key[4]
|
||||
try:
|
||||
return _FREQS[f_index]
|
||||
except IndexError:
|
||||
return 0
|
||||
|
||||
def parse_frame(self):
|
||||
"""True if parsing was successful.
|
||||
Fails either because the frame wasn't valid or the stream ended.
|
||||
"""
|
||||
|
||||
try:
|
||||
return self._parse_frame()
|
||||
except BitReaderError:
|
||||
return False
|
||||
|
||||
def _parse_frame(self):
|
||||
r = self._r
|
||||
# start == position of sync word
|
||||
start = r.get_position() - 12
|
||||
|
||||
# adts_fixed_header
|
||||
id_ = r.bits(1)
|
||||
layer = r.bits(2)
|
||||
protection_absent = r.bits(1)
|
||||
|
||||
profile = r.bits(2)
|
||||
sampling_frequency_index = r.bits(4)
|
||||
private_bit = r.bits(1)
|
||||
# TODO: if 0 we could parse program_config_element()
|
||||
channel_configuration = r.bits(3)
|
||||
original_copy = r.bits(1)
|
||||
home = r.bits(1)
|
||||
|
||||
# the fixed header has to be the same for every frame in the stream
|
||||
fixed_header_key = (
|
||||
id_, layer, protection_absent, profile, sampling_frequency_index,
|
||||
private_bit, channel_configuration, original_copy, home,
|
||||
)
|
||||
|
||||
if self._fixed_header_key is None:
|
||||
self._fixed_header_key = fixed_header_key
|
||||
else:
|
||||
if self._fixed_header_key != fixed_header_key:
|
||||
return False
|
||||
|
||||
# adts_variable_header
|
||||
r.skip(2) # copyright_identification_bit/start
|
||||
frame_length = r.bits(13)
|
||||
r.skip(11) # adts_buffer_fullness
|
||||
nordbif = r.bits(2)
|
||||
# adts_variable_header end
|
||||
|
||||
crc_overhead = 0
|
||||
if not protection_absent:
|
||||
crc_overhead += (nordbif + 1) * 16
|
||||
if nordbif != 0:
|
||||
crc_overhead *= 2
|
||||
|
||||
left = (frame_length * 8) - (r.get_position() - start)
|
||||
if left < 0:
|
||||
return False
|
||||
r.skip(left)
|
||||
assert r.is_aligned()
|
||||
|
||||
self._payload += (left - crc_overhead) / 8
|
||||
self._samples += (nordbif + 1) * 1024
|
||||
self._last = r.get_position() / 8
|
||||
|
||||
self.parsed_frames += 1
|
||||
return True
|
||||
|
||||
|
||||
class ProgramConfigElement(object):
|
||||
|
||||
element_instance_tag = None
|
||||
object_type = None
|
||||
sampling_frequency_index = None
|
||||
channels = None
|
||||
|
||||
def __init__(self, r):
|
||||
"""Reads the program_config_element()
|
||||
|
||||
Raises BitReaderError
|
||||
"""
|
||||
|
||||
self.element_instance_tag = r.bits(4)
|
||||
self.object_type = r.bits(2)
|
||||
self.sampling_frequency_index = r.bits(4)
|
||||
num_front_channel_elements = r.bits(4)
|
||||
num_side_channel_elements = r.bits(4)
|
||||
num_back_channel_elements = r.bits(4)
|
||||
num_lfe_channel_elements = r.bits(2)
|
||||
num_assoc_data_elements = r.bits(3)
|
||||
num_valid_cc_elements = r.bits(4)
|
||||
|
||||
mono_mixdown_present = r.bits(1)
|
||||
if mono_mixdown_present == 1:
|
||||
r.skip(4)
|
||||
stereo_mixdown_present = r.bits(1)
|
||||
if stereo_mixdown_present == 1:
|
||||
r.skip(4)
|
||||
matrix_mixdown_idx_present = r.bits(1)
|
||||
if matrix_mixdown_idx_present == 1:
|
||||
r.skip(3)
|
||||
|
||||
elms = num_front_channel_elements + num_side_channel_elements + \
|
||||
num_back_channel_elements
|
||||
channels = 0
|
||||
for i in xrange(elms):
|
||||
channels += 1
|
||||
element_is_cpe = r.bits(1)
|
||||
if element_is_cpe:
|
||||
channels += 1
|
||||
r.skip(4)
|
||||
channels += num_lfe_channel_elements
|
||||
self.channels = channels
|
||||
|
||||
r.skip(4 * num_lfe_channel_elements)
|
||||
r.skip(4 * num_assoc_data_elements)
|
||||
r.skip(5 * num_valid_cc_elements)
|
||||
r.align()
|
||||
comment_field_bytes = r.bits(8)
|
||||
r.skip(8 * comment_field_bytes)
|
||||
|
||||
|
||||
class AACError(MutagenError):
|
||||
pass
|
||||
|
||||
|
||||
class AACInfo(StreamInfo):
|
||||
"""AAC stream information.
|
||||
|
||||
Attributes:
|
||||
|
||||
* channels -- number of audio channels
|
||||
* length -- file length in seconds, as a float
|
||||
* sample_rate -- audio sampling rate in Hz
|
||||
* bitrate -- audio bitrate, in bits per second
|
||||
|
||||
The length of the stream is just a guess and might not be correct.
|
||||
"""
|
||||
|
||||
channels = 0
|
||||
length = 0
|
||||
sample_rate = 0
|
||||
bitrate = 0
|
||||
|
||||
def __init__(self, fileobj):
|
||||
# skip id3v2 header
|
||||
start_offset = 0
|
||||
header = fileobj.read(10)
|
||||
from mutagen.id3 import BitPaddedInt
|
||||
if header.startswith(b"ID3"):
|
||||
size = BitPaddedInt(header[6:])
|
||||
start_offset = size + 10
|
||||
|
||||
fileobj.seek(start_offset)
|
||||
adif = fileobj.read(4)
|
||||
if adif == b"ADIF":
|
||||
self._parse_adif(fileobj)
|
||||
self._type = "ADIF"
|
||||
else:
|
||||
self._parse_adts(fileobj, start_offset)
|
||||
self._type = "ADTS"
|
||||
|
||||
def _parse_adif(self, fileobj):
|
||||
r = BitReader(fileobj)
|
||||
try:
|
||||
copyright_id_present = r.bits(1)
|
||||
if copyright_id_present:
|
||||
r.skip(72) # copyright_id
|
||||
r.skip(1 + 1) # original_copy, home
|
||||
bitstream_type = r.bits(1)
|
||||
self.bitrate = r.bits(23)
|
||||
npce = r.bits(4)
|
||||
if bitstream_type == 0:
|
||||
r.skip(20) # adif_buffer_fullness
|
||||
|
||||
pce = ProgramConfigElement(r)
|
||||
try:
|
||||
self.sample_rate = _FREQS[pce.sampling_frequency_index]
|
||||
except IndexError:
|
||||
pass
|
||||
self.channels = pce.channels
|
||||
|
||||
# other pces..
|
||||
for i in xrange(npce):
|
||||
ProgramConfigElement(r)
|
||||
r.align()
|
||||
except BitReaderError as e:
|
||||
raise AACError(e)
|
||||
|
||||
# use bitrate + data size to guess length
|
||||
start = fileobj.tell()
|
||||
fileobj.seek(0, 2)
|
||||
length = fileobj.tell() - start
|
||||
if self.bitrate != 0:
|
||||
self.length = (8.0 * length) / self.bitrate
|
||||
|
||||
def _parse_adts(self, fileobj, start_offset):
|
||||
max_initial_read = 512
|
||||
max_resync_read = 10
|
||||
max_sync_tries = 10
|
||||
|
||||
frames_max = 100
|
||||
frames_needed = 3
|
||||
|
||||
# Try up to X times to find a sync word and read up to Y frames.
|
||||
# If more than Z frames are valid we assume a valid stream
|
||||
offset = start_offset
|
||||
for i in xrange(max_sync_tries):
|
||||
fileobj.seek(offset)
|
||||
s = _ADTSStream.find_stream(fileobj, max_initial_read)
|
||||
if s is None:
|
||||
raise AACError("sync not found")
|
||||
# start right after the last found offset
|
||||
offset += s.offset + 1
|
||||
|
||||
for i in xrange(frames_max):
|
||||
if not s.parse_frame():
|
||||
break
|
||||
if not s.sync(max_resync_read):
|
||||
break
|
||||
|
||||
if s.parsed_frames >= frames_needed:
|
||||
break
|
||||
else:
|
||||
raise AACError(
|
||||
"no valid stream found (only %d frames)" % s.parsed_frames)
|
||||
|
||||
self.sample_rate = s.frequency
|
||||
self.channels = s.channels
|
||||
self.bitrate = s.bitrate
|
||||
|
||||
# size from stream start to end of file
|
||||
fileobj.seek(0, 2)
|
||||
stream_size = fileobj.tell() - (offset + s.offset)
|
||||
# approx
|
||||
self.length = float(s.samples * stream_size) / (s.size * s.frequency)
|
||||
|
||||
def pprint(self):
|
||||
return u"AAC (%s), %d Hz, %.2f seconds, %d channel(s), %d bps" % (
|
||||
self._type, self.sample_rate, self.length, self.channels,
|
||||
self.bitrate)
|
||||
|
||||
|
||||
class AAC(FileType):
|
||||
"""Load ADTS or ADIF streams containing AAC.
|
||||
|
||||
Tagging is not supported.
|
||||
Use the ID3/APEv2 classes directly instead.
|
||||
"""
|
||||
|
||||
_mimes = ["audio/x-aac"]
|
||||
|
||||
def load(self, filename):
|
||||
self.filename = filename
|
||||
with open(filename, "rb") as h:
|
||||
self.info = AACInfo(h)
|
||||
|
||||
def add_tags(self):
|
||||
raise AACError("doesn't support tags")
|
||||
|
||||
@staticmethod
|
||||
def score(filename, fileobj, header):
|
||||
filename = filename.lower()
|
||||
s = endswith(filename, ".aac") or endswith(filename, ".adts") or \
|
||||
endswith(filename, ".adif")
|
||||
s += b"ADIF" in header
|
||||
return s
|
||||
|
||||
|
||||
Open = AAC
|
||||
error = AACError
|
||||
|
||||
__all__ = ["AAC", "Open"]
|
|
@ -1,357 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2014 Evan Purkhiser
|
||||
# 2014 Ben Ockmore
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
"""AIFF audio stream information and tags."""
|
||||
|
||||
import sys
|
||||
import struct
|
||||
from struct import pack
|
||||
|
||||
from ._compat import endswith, text_type, reraise
|
||||
from mutagen import StreamInfo, FileType
|
||||
|
||||
from mutagen.id3 import ID3
|
||||
from mutagen.id3._util import ID3NoHeaderError, error as ID3Error
|
||||
from mutagen._util import resize_bytes, delete_bytes, MutagenError
|
||||
|
||||
__all__ = ["AIFF", "Open", "delete"]
|
||||
|
||||
|
||||
class error(MutagenError, RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidChunk(error, IOError):
|
||||
pass
|
||||
|
||||
|
||||
# based on stdlib's aifc
|
||||
_HUGE_VAL = 1.79769313486231e+308
|
||||
|
||||
|
||||
def is_valid_chunk_id(id):
|
||||
assert isinstance(id, text_type)
|
||||
|
||||
return ((len(id) <= 4) and (min(id) >= u' ') and
|
||||
(max(id) <= u'~'))
|
||||
|
||||
|
||||
def read_float(data): # 10 bytes
|
||||
expon, himant, lomant = struct.unpack('>hLL', data)
|
||||
sign = 1
|
||||
if expon < 0:
|
||||
sign = -1
|
||||
expon = expon + 0x8000
|
||||
if expon == himant == lomant == 0:
|
||||
f = 0.0
|
||||
elif expon == 0x7FFF:
|
||||
f = _HUGE_VAL
|
||||
else:
|
||||
expon = expon - 16383
|
||||
f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
|
||||
return sign * f
|
||||
|
||||
|
||||
class IFFChunk(object):
|
||||
"""Representation of a single IFF chunk"""
|
||||
|
||||
# Chunk headers are 8 bytes long (4 for ID and 4 for the size)
|
||||
HEADER_SIZE = 8
|
||||
|
||||
def __init__(self, fileobj, parent_chunk=None):
|
||||
self.__fileobj = fileobj
|
||||
self.parent_chunk = parent_chunk
|
||||
self.offset = fileobj.tell()
|
||||
|
||||
header = fileobj.read(self.HEADER_SIZE)
|
||||
if len(header) < self.HEADER_SIZE:
|
||||
raise InvalidChunk()
|
||||
|
||||
self.id, self.data_size = struct.unpack('>4si', header)
|
||||
|
||||
try:
|
||||
self.id = self.id.decode('ascii')
|
||||
except UnicodeDecodeError:
|
||||
raise InvalidChunk()
|
||||
|
||||
if not is_valid_chunk_id(self.id):
|
||||
raise InvalidChunk()
|
||||
|
||||
self.size = self.HEADER_SIZE + self.data_size
|
||||
self.data_offset = fileobj.tell()
|
||||
|
||||
def read(self):
|
||||
"""Read the chunks data"""
|
||||
|
||||
self.__fileobj.seek(self.data_offset)
|
||||
return self.__fileobj.read(self.data_size)
|
||||
|
||||
def write(self, data):
|
||||
"""Write the chunk data"""
|
||||
|
||||
if len(data) > self.data_size:
|
||||
raise ValueError
|
||||
|
||||
self.__fileobj.seek(self.data_offset)
|
||||
self.__fileobj.write(data)
|
||||
|
||||
def delete(self):
|
||||
"""Removes the chunk from the file"""
|
||||
|
||||
delete_bytes(self.__fileobj, self.size, self.offset)
|
||||
if self.parent_chunk is not None:
|
||||
self.parent_chunk._update_size(
|
||||
self.parent_chunk.data_size - self.size)
|
||||
|
||||
def _update_size(self, data_size):
|
||||
"""Update the size of the chunk"""
|
||||
|
||||
self.__fileobj.seek(self.offset + 4)
|
||||
self.__fileobj.write(pack('>I', data_size))
|
||||
if self.parent_chunk is not None:
|
||||
size_diff = self.data_size - data_size
|
||||
self.parent_chunk._update_size(
|
||||
self.parent_chunk.data_size - size_diff)
|
||||
self.data_size = data_size
|
||||
self.size = data_size + self.HEADER_SIZE
|
||||
|
||||
def resize(self, new_data_size):
|
||||
"""Resize the file and update the chunk sizes"""
|
||||
|
||||
resize_bytes(
|
||||
self.__fileobj, self.data_size, new_data_size, self.data_offset)
|
||||
self._update_size(new_data_size)
|
||||
|
||||
|
||||
class IFFFile(object):
|
||||
"""Representation of a IFF file"""
|
||||
|
||||
def __init__(self, fileobj):
|
||||
self.__fileobj = fileobj
|
||||
self.__chunks = {}
|
||||
|
||||
# AIFF Files always start with the FORM chunk which contains a 4 byte
|
||||
# ID before the start of other chunks
|
||||
fileobj.seek(0)
|
||||
self.__chunks[u'FORM'] = IFFChunk(fileobj)
|
||||
|
||||
# Skip past the 4 byte FORM id
|
||||
fileobj.seek(IFFChunk.HEADER_SIZE + 4)
|
||||
|
||||
# Where the next chunk can be located. We need to keep track of this
|
||||
# since the size indicated in the FORM header may not match up with the
|
||||
# offset determined from the size of the last chunk in the file
|
||||
self.__next_offset = fileobj.tell()
|
||||
|
||||
# Load all of the chunks
|
||||
while True:
|
||||
try:
|
||||
chunk = IFFChunk(fileobj, self[u'FORM'])
|
||||
except InvalidChunk:
|
||||
break
|
||||
self.__chunks[chunk.id.strip()] = chunk
|
||||
|
||||
# Calculate the location of the next chunk,
|
||||
# considering the pad byte
|
||||
self.__next_offset = chunk.offset + chunk.size
|
||||
self.__next_offset += self.__next_offset % 2
|
||||
fileobj.seek(self.__next_offset)
|
||||
|
||||
def __contains__(self, id_):
|
||||
"""Check if the IFF file contains a specific chunk"""
|
||||
|
||||
assert isinstance(id_, text_type)
|
||||
|
||||
if not is_valid_chunk_id(id_):
|
||||
raise KeyError("AIFF key must be four ASCII characters.")
|
||||
|
||||
return id_ in self.__chunks
|
||||
|
||||
def __getitem__(self, id_):
|
||||
"""Get a chunk from the IFF file"""
|
||||
|
||||
assert isinstance(id_, text_type)
|
||||
|
||||
if not is_valid_chunk_id(id_):
|
||||
raise KeyError("AIFF key must be four ASCII characters.")
|
||||
|
||||
try:
|
||||
return self.__chunks[id_]
|
||||
except KeyError:
|
||||
raise KeyError(
|
||||
"%r has no %r chunk" % (self.__fileobj.name, id_))
|
||||
|
||||
def __delitem__(self, id_):
|
||||
"""Remove a chunk from the IFF file"""
|
||||
|
||||
assert isinstance(id_, text_type)
|
||||
|
||||
if not is_valid_chunk_id(id_):
|
||||
raise KeyError("AIFF key must be four ASCII characters.")
|
||||
|
||||
self.__chunks.pop(id_).delete()
|
||||
|
||||
def insert_chunk(self, id_):
|
||||
"""Insert a new chunk at the end of the IFF file"""
|
||||
|
||||
assert isinstance(id_, text_type)
|
||||
|
||||
if not is_valid_chunk_id(id_):
|
||||
raise KeyError("AIFF key must be four ASCII characters.")
|
||||
|
||||
self.__fileobj.seek(self.__next_offset)
|
||||
self.__fileobj.write(pack('>4si', id_.ljust(4).encode('ascii'), 0))
|
||||
self.__fileobj.seek(self.__next_offset)
|
||||
chunk = IFFChunk(self.__fileobj, self[u'FORM'])
|
||||
self[u'FORM']._update_size(self[u'FORM'].data_size + chunk.size)
|
||||
|
||||
self.__chunks[id_] = chunk
|
||||
self.__next_offset = chunk.offset + chunk.size
|
||||
|
||||
|
||||
class AIFFInfo(StreamInfo):
|
||||
"""AIFF audio stream information.
|
||||
|
||||
Information is parsed from the COMM chunk of the AIFF file
|
||||
|
||||
Useful attributes:
|
||||
|
||||
* length -- audio length, in seconds
|
||||
* bitrate -- audio bitrate, in bits per second
|
||||
* channels -- The number of audio channels
|
||||
* sample_rate -- audio sample rate, in Hz
|
||||
* sample_size -- The audio sample size
|
||||
"""
|
||||
|
||||
length = 0
|
||||
bitrate = 0
|
||||
channels = 0
|
||||
sample_rate = 0
|
||||
|
||||
def __init__(self, fileobj):
|
||||
iff = IFFFile(fileobj)
|
||||
try:
|
||||
common_chunk = iff[u'COMM']
|
||||
except KeyError as e:
|
||||
raise error(str(e))
|
||||
|
||||
data = common_chunk.read()
|
||||
|
||||
info = struct.unpack('>hLh10s', data[:18])
|
||||
channels, frame_count, sample_size, sample_rate = info
|
||||
|
||||
self.sample_rate = int(read_float(sample_rate))
|
||||
self.sample_size = sample_size
|
||||
self.channels = channels
|
||||
self.bitrate = channels * sample_size * self.sample_rate
|
||||
self.length = frame_count / float(self.sample_rate)
|
||||
|
||||
def pprint(self):
|
||||
return u"%d channel AIFF @ %d bps, %s Hz, %.2f seconds" % (
|
||||
self.channels, self.bitrate, self.sample_rate, self.length)
|
||||
|
||||
|
||||
class _IFFID3(ID3):
|
||||
"""A AIFF file with ID3v2 tags"""
|
||||
|
||||
def _pre_load_header(self, fileobj):
|
||||
try:
|
||||
fileobj.seek(IFFFile(fileobj)[u'ID3'].data_offset)
|
||||
except (InvalidChunk, KeyError):
|
||||
raise ID3NoHeaderError("No ID3 chunk")
|
||||
|
||||
def save(self, filename=None, v2_version=4, v23_sep='/', padding=None):
|
||||
"""Save ID3v2 data to the AIFF file"""
|
||||
|
||||
if filename is None:
|
||||
filename = self.filename
|
||||
|
||||
# Unlike the parent ID3.save method, we won't save to a blank file
|
||||
# since we would have to construct a empty AIFF file
|
||||
with open(filename, 'rb+') as fileobj:
|
||||
iff_file = IFFFile(fileobj)
|
||||
|
||||
if u'ID3' not in iff_file:
|
||||
iff_file.insert_chunk(u'ID3')
|
||||
|
||||
chunk = iff_file[u'ID3']
|
||||
|
||||
try:
|
||||
data = self._prepare_data(
|
||||
fileobj, chunk.data_offset, chunk.data_size, v2_version,
|
||||
v23_sep, padding)
|
||||
except ID3Error as e:
|
||||
reraise(error, e, sys.exc_info()[2])
|
||||
|
||||
new_size = len(data)
|
||||
new_size += new_size % 2 # pad byte
|
||||
assert new_size % 2 == 0
|
||||
chunk.resize(new_size)
|
||||
data += (new_size - len(data)) * b'\x00'
|
||||
assert new_size == len(data)
|
||||
chunk.write(data)
|
||||
|
||||
def delete(self, filename=None):
|
||||
"""Completely removes the ID3 chunk from the AIFF file"""
|
||||
|
||||
if filename is None:
|
||||
filename = self.filename
|
||||
delete(filename)
|
||||
self.clear()
|
||||
|
||||
|
||||
def delete(filename):
|
||||
"""Completely removes the ID3 chunk from the AIFF file"""
|
||||
|
||||
with open(filename, "rb+") as file_:
|
||||
try:
|
||||
del IFFFile(file_)[u'ID3']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
class AIFF(FileType):
|
||||
"""An AIFF audio file.
|
||||
|
||||
:ivar info: :class:`AIFFInfo`
|
||||
:ivar tags: :class:`ID3`
|
||||
"""
|
||||
|
||||
_mimes = ["audio/aiff", "audio/x-aiff"]
|
||||
|
||||
@staticmethod
|
||||
def score(filename, fileobj, header):
|
||||
filename = filename.lower()
|
||||
|
||||
return (header.startswith(b"FORM") * 2 + endswith(filename, b".aif") +
|
||||
endswith(filename, b".aiff") + endswith(filename, b".aifc"))
|
||||
|
||||
def add_tags(self):
|
||||
"""Add an empty ID3 tag to the file."""
|
||||
if self.tags is None:
|
||||
self.tags = _IFFID3()
|
||||
else:
|
||||
raise error("an ID3 tag already exists")
|
||||
|
||||
def load(self, filename, **kwargs):
|
||||
"""Load stream and tag information from a file."""
|
||||
self.filename = filename
|
||||
|
||||
try:
|
||||
self.tags = _IFFID3(filename, **kwargs)
|
||||
except ID3NoHeaderError:
|
||||
self.tags = None
|
||||
except ID3Error as e:
|
||||
raise error(e)
|
||||
|
||||
with open(filename, "rb") as fileobj:
|
||||
self.info = AIFFInfo(fileobj)
|
||||
|
||||
|
||||
Open = AIFF
|
|
@ -1,710 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2005 Joe Wreschnig
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
"""APEv2 reading and writing.
|
||||
|
||||
The APEv2 format is most commonly used with Musepack files, but is
|
||||
also the format of choice for WavPack and other formats. Some MP3s
|
||||
also have APEv2 tags, but this can cause problems with many MP3
|
||||
decoders and taggers.
|
||||
|
||||
APEv2 tags, like Vorbis comments, are freeform key=value pairs. APEv2
|
||||
keys can be any ASCII string with characters from 0x20 to 0x7E,
|
||||
between 2 and 255 characters long. Keys are case-sensitive, but
|
||||
readers are recommended to be case insensitive, and it is forbidden to
|
||||
multiple keys which differ only in case. Keys are usually stored
|
||||
title-cased (e.g. 'Artist' rather than 'artist').
|
||||
|
||||
APEv2 values are slightly more structured than Vorbis comments; values
|
||||
are flagged as one of text, binary, or an external reference (usually
|
||||
a URI).
|
||||
|
||||
Based off the format specification found at
|
||||
http://wiki.hydrogenaudio.org/index.php?title=APEv2_specification.
|
||||
"""
|
||||
|
||||
__all__ = ["APEv2", "APEv2File", "Open", "delete"]
|
||||
|
||||
import sys
|
||||
import struct
|
||||
from collections import MutableSequence
|
||||
|
||||
from ._compat import (cBytesIO, PY3, text_type, PY2, reraise, swap_to_string,
|
||||
xrange)
|
||||
from mutagen import Metadata, FileType, StreamInfo
|
||||
from mutagen._util import (DictMixin, cdata, delete_bytes, total_ordering,
|
||||
MutagenError)
|
||||
|
||||
|
||||
def is_valid_apev2_key(key):
|
||||
if not isinstance(key, text_type):
|
||||
if PY3:
|
||||
raise TypeError("APEv2 key must be str")
|
||||
|
||||
try:
|
||||
key = key.decode('ascii')
|
||||
except UnicodeDecodeError:
|
||||
return False
|
||||
|
||||
# PY26 - Change to set literal syntax (since set is faster than list here)
|
||||
return ((2 <= len(key) <= 255) and (min(key) >= u' ') and
|
||||
(max(key) <= u'~') and
|
||||
(key not in [u"OggS", u"TAG", u"ID3", u"MP+"]))
|
||||
|
||||
# There are three different kinds of APE tag values.
|
||||
# "0: Item contains text information coded in UTF-8
|
||||
# 1: Item contains binary information
|
||||
# 2: Item is a locator of external stored information [e.g. URL]
|
||||
# 3: reserved"
|
||||
TEXT, BINARY, EXTERNAL = xrange(3)
|
||||
|
||||
HAS_HEADER = 1 << 31
|
||||
HAS_NO_FOOTER = 1 << 30
|
||||
IS_HEADER = 1 << 29
|
||||
|
||||
|
||||
class error(IOError, MutagenError):
|
||||
pass
|
||||
|
||||
|
||||
class APENoHeaderError(error, ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class APEUnsupportedVersionError(error, ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class APEBadItemError(error, ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class _APEv2Data(object):
|
||||
# Store offsets of the important parts of the file.
|
||||
start = header = data = footer = end = None
|
||||
# Footer or header; seek here and read 32 to get version/size/items/flags
|
||||
metadata = None
|
||||
# Actual tag data
|
||||
tag = None
|
||||
|
||||
version = None
|
||||
size = None
|
||||
items = None
|
||||
flags = 0
|
||||
|
||||
# The tag is at the start rather than the end. A tag at both
|
||||
# the start and end of the file (i.e. the tag is the whole file)
|
||||
# is not considered to be at the start.
|
||||
is_at_start = False
|
||||
|
||||
def __init__(self, fileobj):
|
||||
self.__find_metadata(fileobj)
|
||||
|
||||
if self.header is None:
|
||||
self.metadata = self.footer
|
||||
elif self.footer is None:
|
||||
self.metadata = self.header
|
||||
else:
|
||||
self.metadata = max(self.header, self.footer)
|
||||
|
||||
if self.metadata is None:
|
||||
return
|
||||
|
||||
self.__fill_missing(fileobj)
|
||||
self.__fix_brokenness(fileobj)
|
||||
if self.data is not None:
|
||||
fileobj.seek(self.data)
|
||||
self.tag = fileobj.read(self.size)
|
||||
|
||||
def __find_metadata(self, fileobj):
|
||||
# Try to find a header or footer.
|
||||
|
||||
# Check for a simple footer.
|
||||
try:
|
||||
fileobj.seek(-32, 2)
|
||||
except IOError:
|
||||
fileobj.seek(0, 2)
|
||||
return
|
||||
if fileobj.read(8) == b"APETAGEX":
|
||||
fileobj.seek(-8, 1)
|
||||
self.footer = self.metadata = fileobj.tell()
|
||||
return
|
||||
|
||||
# Check for an APEv2 tag followed by an ID3v1 tag at the end.
|
||||
try:
|
||||
fileobj.seek(-128, 2)
|
||||
if fileobj.read(3) == b"TAG":
|
||||
|
||||
fileobj.seek(-35, 1) # "TAG" + header length
|
||||
if fileobj.read(8) == b"APETAGEX":
|
||||
fileobj.seek(-8, 1)
|
||||
self.footer = fileobj.tell()
|
||||
return
|
||||
|
||||
# ID3v1 tag at the end, maybe preceded by Lyrics3v2.
|
||||
# (http://www.id3.org/lyrics3200.html)
|
||||
# (header length - "APETAGEX") - "LYRICS200"
|
||||
fileobj.seek(15, 1)
|
||||
if fileobj.read(9) == b'LYRICS200':
|
||||
fileobj.seek(-15, 1) # "LYRICS200" + size tag
|
||||
try:
|
||||
offset = int(fileobj.read(6))
|
||||
except ValueError:
|
||||
raise IOError
|
||||
|
||||
fileobj.seek(-32 - offset - 6, 1)
|
||||
if fileobj.read(8) == b"APETAGEX":
|
||||
fileobj.seek(-8, 1)
|
||||
self.footer = fileobj.tell()
|
||||
return
|
||||
|
||||
except IOError:
|
||||
pass
|
||||
|
||||
# Check for a tag at the start.
|
||||
fileobj.seek(0, 0)
|
||||
if fileobj.read(8) == b"APETAGEX":
|
||||
self.is_at_start = True
|
||||
self.header = 0
|
||||
|
||||
def __fill_missing(self, fileobj):
|
||||
fileobj.seek(self.metadata + 8)
|
||||
self.version = fileobj.read(4)
|
||||
self.size = cdata.uint_le(fileobj.read(4))
|
||||
self.items = cdata.uint_le(fileobj.read(4))
|
||||
self.flags = cdata.uint_le(fileobj.read(4))
|
||||
|
||||
if self.header is not None:
|
||||
self.data = self.header + 32
|
||||
# If we're reading the header, the size is the header
|
||||
# offset + the size, which includes the footer.
|
||||
self.end = self.data + self.size
|
||||
fileobj.seek(self.end - 32, 0)
|
||||
if fileobj.read(8) == b"APETAGEX":
|
||||
self.footer = self.end - 32
|
||||
elif self.footer is not None:
|
||||
self.end = self.footer + 32
|
||||
self.data = self.end - self.size
|
||||
if self.flags & HAS_HEADER:
|
||||
self.header = self.data - 32
|
||||
else:
|
||||
self.header = self.data
|
||||
else:
|
||||
raise APENoHeaderError("No APE tag found")
|
||||
|
||||
# exclude the footer from size
|
||||
if self.footer is not None:
|
||||
self.size -= 32
|
||||
|
||||
def __fix_brokenness(self, fileobj):
|
||||
# Fix broken tags written with PyMusepack.
|
||||
if self.header is not None:
|
||||
start = self.header
|
||||
else:
|
||||
start = self.data
|
||||
fileobj.seek(start)
|
||||
|
||||
while start > 0:
|
||||
# Clean up broken writing from pre-Mutagen PyMusepack.
|
||||
# It didn't remove the first 24 bytes of header.
|
||||
try:
|
||||
fileobj.seek(-24, 1)
|
||||
except IOError:
|
||||
break
|
||||
else:
|
||||
if fileobj.read(8) == b"APETAGEX":
|
||||
fileobj.seek(-8, 1)
|
||||
start = fileobj.tell()
|
||||
else:
|
||||
break
|
||||
self.start = start
|
||||
|
||||
|
||||
class _CIDictProxy(DictMixin):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.__casemap = {}
|
||||
self.__dict = {}
|
||||
super(_CIDictProxy, self).__init__(*args, **kwargs)
|
||||
# Internally all names are stored as lowercase, but the case
|
||||
# they were set with is remembered and used when saving. This
|
||||
# is roughly in line with the standard, which says that keys
|
||||
# are case-sensitive but two keys differing only in case are
|
||||
# not allowed, and recommends case-insensitive
|
||||
# implementations.
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.__dict[key.lower()]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
lower = key.lower()
|
||||
self.__casemap[lower] = key
|
||||
self.__dict[lower] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
lower = key.lower()
|
||||
del(self.__casemap[lower])
|
||||
del(self.__dict[lower])
|
||||
|
||||
def keys(self):
|
||||
return [self.__casemap.get(key, key) for key in self.__dict.keys()]
|
||||
|
||||
|
||||
class APEv2(_CIDictProxy, Metadata):
|
||||
"""A file with an APEv2 tag.
|
||||
|
||||
ID3v1 tags are silently ignored and overwritten.
|
||||
"""
|
||||
|
||||
filename = None
|
||||
|
||||
def pprint(self):
|
||||
"""Return tag key=value pairs in a human-readable format."""
|
||||
|
||||
items = sorted(self.items())
|
||||
return u"\n".join(u"%s=%s" % (k, v.pprint()) for k, v in items)
|
||||
|
||||
def load(self, filename):
|
||||
"""Load tags from a filename."""
|
||||
|
||||
self.filename = filename
|
||||
with open(filename, "rb") as fileobj:
|
||||
data = _APEv2Data(fileobj)
|
||||
|
||||
if data.tag:
|
||||
self.clear()
|
||||
self.__parse_tag(data.tag, data.items)
|
||||
else:
|
||||
raise APENoHeaderError("No APE tag found")
|
||||
|
||||
def __parse_tag(self, tag, count):
|
||||
fileobj = cBytesIO(tag)
|
||||
|
||||
for i in xrange(count):
|
||||
size_data = fileobj.read(4)
|
||||
# someone writes wrong item counts
|
||||
if not size_data:
|
||||
break
|
||||
size = cdata.uint_le(size_data)
|
||||
flags = cdata.uint_le(fileobj.read(4))
|
||||
|
||||
# Bits 1 and 2 bits are flags, 0-3
|
||||
# Bit 0 is read/write flag, ignored
|
||||
kind = (flags & 6) >> 1
|
||||
if kind == 3:
|
||||
raise APEBadItemError("value type must be 0, 1, or 2")
|
||||
key = value = fileobj.read(1)
|
||||
while key[-1:] != b'\x00' and value:
|
||||
value = fileobj.read(1)
|
||||
key += value
|
||||
if key[-1:] == b"\x00":
|
||||
key = key[:-1]
|
||||
if PY3:
|
||||
try:
|
||||
key = key.decode("ascii")
|
||||
except UnicodeError as err:
|
||||
reraise(APEBadItemError, err, sys.exc_info()[2])
|
||||
value = fileobj.read(size)
|
||||
|
||||
value = _get_value_type(kind)._new(value)
|
||||
|
||||
self[key] = value
|
||||
|
||||
def __getitem__(self, key):
|
||||
if not is_valid_apev2_key(key):
|
||||
raise KeyError("%r is not a valid APEv2 key" % key)
|
||||
if PY2:
|
||||
key = key.encode('ascii')
|
||||
|
||||
return super(APEv2, self).__getitem__(key)
|
||||
|
||||
def __delitem__(self, key):
|
||||
if not is_valid_apev2_key(key):
|
||||
raise KeyError("%r is not a valid APEv2 key" % key)
|
||||
if PY2:
|
||||
key = key.encode('ascii')
|
||||
|
||||
super(APEv2, self).__delitem__(key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""'Magic' value setter.
|
||||
|
||||
This function tries to guess at what kind of value you want to
|
||||
store. If you pass in a valid UTF-8 or Unicode string, it
|
||||
treats it as a text value. If you pass in a list, it treats it
|
||||
as a list of string/Unicode values. If you pass in a string
|
||||
that is not valid UTF-8, it assumes it is a binary value.
|
||||
|
||||
Python 3: all bytes will be assumed to be a byte value, even
|
||||
if they are valid utf-8.
|
||||
|
||||
If you need to force a specific type of value (e.g. binary
|
||||
data that also happens to be valid UTF-8, or an external
|
||||
reference), use the APEValue factory and set the value to the
|
||||
result of that::
|
||||
|
||||
from mutagen.apev2 import APEValue, EXTERNAL
|
||||
tag['Website'] = APEValue('http://example.org', EXTERNAL)
|
||||
"""
|
||||
|
||||
if not is_valid_apev2_key(key):
|
||||
raise KeyError("%r is not a valid APEv2 key" % key)
|
||||
|
||||
if PY2:
|
||||
key = key.encode('ascii')
|
||||
|
||||
if not isinstance(value, _APEValue):
|
||||
# let's guess at the content if we're not already a value...
|
||||
if isinstance(value, text_type):
|
||||
# unicode? we've got to be text.
|
||||
value = APEValue(value, TEXT)
|
||||
elif isinstance(value, list):
|
||||
items = []
|
||||
for v in value:
|
||||
if not isinstance(v, text_type):
|
||||
if PY3:
|
||||
raise TypeError("item in list not str")
|
||||
v = v.decode("utf-8")
|
||||
items.append(v)
|
||||
|
||||
# list? text.
|
||||
value = APEValue(u"\0".join(items), TEXT)
|
||||
else:
|
||||
if PY3:
|
||||
value = APEValue(value, BINARY)
|
||||
else:
|
||||
try:
|
||||
value.decode("utf-8")
|
||||
except UnicodeError:
|
||||
# invalid UTF8 text, probably binary
|
||||
value = APEValue(value, BINARY)
|
||||
else:
|
||||
# valid UTF8, probably text
|
||||
value = APEValue(value, TEXT)
|
||||
|
||||
super(APEv2, self).__setitem__(key, value)
|
||||
|
||||
def save(self, filename=None):
|
||||
"""Save changes to a file.
|
||||
|
||||
If no filename is given, the one most recently loaded is used.
|
||||
|
||||
Tags are always written at the end of the file, and include
|
||||
a header and a footer.
|
||||
"""
|
||||
|
||||
filename = filename or self.filename
|
||||
try:
|
||||
fileobj = open(filename, "r+b")
|
||||
except IOError:
|
||||
fileobj = open(filename, "w+b")
|
||||
data = _APEv2Data(fileobj)
|
||||
|
||||
if data.is_at_start:
|
||||
delete_bytes(fileobj, data.end - data.start, data.start)
|
||||
elif data.start is not None:
|
||||
fileobj.seek(data.start)
|
||||
# Delete an ID3v1 tag if present, too.
|
||||
fileobj.truncate()
|
||||
fileobj.seek(0, 2)
|
||||
|
||||
tags = []
|
||||
for key, value in self.items():
|
||||
# Packed format for an item:
|
||||
# 4B: Value length
|
||||
# 4B: Value type
|
||||
# Key name
|
||||
# 1B: Null
|
||||
# Key value
|
||||
value_data = value._write()
|
||||
if not isinstance(key, bytes):
|
||||
key = key.encode("utf-8")
|
||||
tag_data = bytearray()
|
||||
tag_data += struct.pack("<2I", len(value_data), value.kind << 1)
|
||||
tag_data += key + b"\0" + value_data
|
||||
tags.append(bytes(tag_data))
|
||||
|
||||
# "APE tags items should be sorted ascending by size... This is
|
||||
# not a MUST, but STRONGLY recommended. Actually the items should
|
||||
# be sorted by importance/byte, but this is not feasible."
|
||||
tags.sort(key=len)
|
||||
num_tags = len(tags)
|
||||
tags = b"".join(tags)
|
||||
|
||||
header = bytearray(b"APETAGEX")
|
||||
# version, tag size, item count, flags
|
||||
header += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
|
||||
HAS_HEADER | IS_HEADER)
|
||||
header += b"\0" * 8
|
||||
fileobj.write(header)
|
||||
|
||||
fileobj.write(tags)
|
||||
|
||||
footer = bytearray(b"APETAGEX")
|
||||
footer += struct.pack("<4I", 2000, len(tags) + 32, num_tags,
|
||||
HAS_HEADER)
|
||||
footer += b"\0" * 8
|
||||
|
||||
fileobj.write(footer)
|
||||
fileobj.close()
|
||||
|
||||
def delete(self, filename=None):
|
||||
"""Remove tags from a file."""
|
||||
|
||||
filename = filename or self.filename
|
||||
with open(filename, "r+b") as fileobj:
|
||||
data = _APEv2Data(fileobj)
|
||||
if data.start is not None and data.size is not None:
|
||||
delete_bytes(fileobj, data.end - data.start, data.start)
|
||||
|
||||
self.clear()
|
||||
|
||||
|
||||
Open = APEv2
|
||||
|
||||
|
||||
def delete(filename):
|
||||
"""Remove tags from a file."""
|
||||
|
||||
try:
|
||||
APEv2(filename).delete()
|
||||
except APENoHeaderError:
|
||||
pass
|
||||
|
||||
|
||||
def _get_value_type(kind):
|
||||
"""Returns a _APEValue subclass or raises ValueError"""
|
||||
|
||||
if kind == TEXT:
|
||||
return APETextValue
|
||||
elif kind == BINARY:
|
||||
return APEBinaryValue
|
||||
elif kind == EXTERNAL:
|
||||
return APEExtValue
|
||||
raise ValueError("unknown kind %r" % kind)
|
||||
|
||||
|
||||
def APEValue(value, kind):
|
||||
"""APEv2 tag value factory.
|
||||
|
||||
Use this if you need to specify the value's type manually. Binary
|
||||
and text data are automatically detected by APEv2.__setitem__.
|
||||
"""
|
||||
|
||||
try:
|
||||
type_ = _get_value_type(kind)
|
||||
except ValueError:
|
||||
raise ValueError("kind must be TEXT, BINARY, or EXTERNAL")
|
||||
else:
|
||||
return type_(value)
|
||||
|
||||
|
||||
class _APEValue(object):
|
||||
|
||||
kind = None
|
||||
value = None
|
||||
|
||||
def __init__(self, value, kind=None):
|
||||
# kind kwarg is for backwards compat
|
||||
if kind is not None and kind != self.kind:
|
||||
raise ValueError
|
||||
self.value = self._validate(value)
|
||||
|
||||
@classmethod
|
||||
def _new(cls, data):
|
||||
instance = cls.__new__(cls)
|
||||
instance._parse(data)
|
||||
return instance
|
||||
|
||||
def _parse(self, data):
|
||||
"""Sets value or raises APEBadItemError"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def _write(self):
|
||||
"""Returns bytes"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def _validate(self, value):
|
||||
"""Returns validated value or raises TypeError/ValueErrr"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(%r, %d)" % (type(self).__name__, self.value, self.kind)
|
||||
|
||||
|
||||
@swap_to_string
|
||||
@total_ordering
|
||||
class _APEUtf8Value(_APEValue):
|
||||
|
||||
def _parse(self, data):
|
||||
try:
|
||||
self.value = data.decode("utf-8")
|
||||
except UnicodeDecodeError as e:
|
||||
reraise(APEBadItemError, e, sys.exc_info()[2])
|
||||
|
||||
def _validate(self, value):
|
||||
if not isinstance(value, text_type):
|
||||
if PY3:
|
||||
raise TypeError("value not str")
|
||||
else:
|
||||
value = value.decode("utf-8")
|
||||
return value
|
||||
|
||||
def _write(self):
|
||||
return self.value.encode("utf-8")
|
||||
|
||||
def __len__(self):
|
||||
return len(self.value)
|
||||
|
||||
def __bytes__(self):
|
||||
return self._write()
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.value == other
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.value < other
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
|
||||
class APETextValue(_APEUtf8Value, MutableSequence):
|
||||
"""An APEv2 text value.
|
||||
|
||||
Text values are Unicode/UTF-8 strings. They can be accessed like
|
||||
strings (with a null separating the values), or arrays of strings.
|
||||
"""
|
||||
|
||||
kind = TEXT
|
||||
|
||||
def __iter__(self):
|
||||
"""Iterate over the strings of the value (not the characters)"""
|
||||
|
||||
return iter(self.value.split(u"\0"))
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.value.split(u"\0")[index]
|
||||
|
||||
def __len__(self):
|
||||
return self.value.count(u"\0") + 1
|
||||
|
||||
def __setitem__(self, index, value):
|
||||
if not isinstance(value, text_type):
|
||||
if PY3:
|
||||
raise TypeError("value not str")
|
||||
else:
|
||||
value = value.decode("utf-8")
|
||||
|
||||
values = list(self)
|
||||
values[index] = value
|
||||
self.value = u"\0".join(values)
|
||||
|
||||
def insert(self, index, value):
|
||||
if not isinstance(value, text_type):
|
||||
if PY3:
|
||||
raise TypeError("value not str")
|
||||
else:
|
||||
value = value.decode("utf-8")
|
||||
|
||||
values = list(self)
|
||||
values.insert(index, value)
|
||||
self.value = u"\0".join(values)
|
||||
|
||||
def __delitem__(self, index):
|
||||
values = list(self)
|
||||
del values[index]
|
||||
self.value = u"\0".join(values)
|
||||
|
||||
def pprint(self):
|
||||
return u" / ".join(self)
|
||||
|
||||
|
||||
@swap_to_string
|
||||
@total_ordering
|
||||
class APEBinaryValue(_APEValue):
|
||||
"""An APEv2 binary value."""
|
||||
|
||||
kind = BINARY
|
||||
|
||||
def _parse(self, data):
|
||||
self.value = data
|
||||
|
||||
def _write(self):
|
||||
return self.value
|
||||
|
||||
def _validate(self, value):
|
||||
if not isinstance(value, bytes):
|
||||
raise TypeError("value not bytes")
|
||||
return bytes(value)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.value)
|
||||
|
||||
def __bytes__(self):
|
||||
return self._write()
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.value == other
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.value < other
|
||||
|
||||
def pprint(self):
|
||||
return u"[%d bytes]" % len(self)
|
||||
|
||||
|
||||
class APEExtValue(_APEUtf8Value):
|
||||
"""An APEv2 external value.
|
||||
|
||||
External values are usually URI or IRI strings.
|
||||
"""
|
||||
|
||||
kind = EXTERNAL
|
||||
|
||||
def pprint(self):
|
||||
return u"[External] %s" % self.value
|
||||
|
||||
|
||||
class APEv2File(FileType):
|
||||
class _Info(StreamInfo):
|
||||
length = 0
|
||||
bitrate = 0
|
||||
|
||||
def __init__(self, fileobj):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def pprint():
|
||||
return u"Unknown format with APEv2 tag."
|
||||
|
||||
def load(self, filename):
|
||||
self.filename = filename
|
||||
self.info = self._Info(open(filename, "rb"))
|
||||
try:
|
||||
self.tags = APEv2(filename)
|
||||
except APENoHeaderError:
|
||||
self.tags = None
|
||||
|
||||
def add_tags(self):
|
||||
if self.tags is None:
|
||||
self.tags = APEv2()
|
||||
else:
|
||||
raise error("%r already has tags: %r" % (self, self.tags))
|
||||
|
||||
@staticmethod
|
||||
def score(filename, fileobj, header):
|
||||
try:
|
||||
fileobj.seek(-160, 2)
|
||||
except IOError:
|
||||
fileobj.seek(0)
|
||||
footer = fileobj.read()
|
||||
return ((b"APETAGEX" in footer) - header.startswith(b"ID3"))
|
|
@ -1,319 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2005-2006 Joe Wreschnig
|
||||
# Copyright (C) 2006-2007 Lukas Lalinsky
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
"""Read and write ASF (Window Media Audio) files."""
|
||||
|
||||
__all__ = ["ASF", "Open"]
|
||||
|
||||
from mutagen import FileType, Metadata, StreamInfo
|
||||
from mutagen._util import resize_bytes, DictMixin
|
||||
from mutagen._compat import string_types, long_, PY3, izip
|
||||
|
||||
from ._util import error, ASFError, ASFHeaderError
|
||||
from ._objects import HeaderObject, MetadataLibraryObject, MetadataObject, \
|
||||
ExtendedContentDescriptionObject, HeaderExtensionObject, \
|
||||
ContentDescriptionObject
|
||||
from ._attrs import ASFGUIDAttribute, ASFWordAttribute, ASFQWordAttribute, \
|
||||
ASFDWordAttribute, ASFBoolAttribute, ASFByteArrayAttribute, \
|
||||
ASFUnicodeAttribute, ASFBaseAttribute, ASFValue
|
||||
|
||||
|
||||
# pyflakes
|
||||
error, ASFError, ASFHeaderError, ASFValue
|
||||
|
||||
|
||||
class ASFInfo(StreamInfo):
|
||||
"""ASF stream information."""
|
||||
|
||||
length = 0.0
|
||||
"""Length in seconds (`float`)"""
|
||||
|
||||
sample_rate = 0
|
||||
"""Sample rate in Hz (`int`)"""
|
||||
|
||||
bitrate = 0
|
||||
"""Bitrate in bps (`int`)"""
|
||||
|
||||
channels = 0
|
||||
"""Number of channels (`int`)"""
|
||||
|
||||
codec_type = u""
|
||||
"""Name of the codec type of the first audio stream or
|
||||
an empty string if unknown. Example: ``Windows Media Audio 9 Standard``
|
||||
(:class:`mutagen.text`)
|
||||
"""
|
||||
|
||||
codec_name = u""
|
||||
"""Name and maybe version of the codec used. Example:
|
||||
``Windows Media Audio 9.1`` (:class:`mutagen.text`)
|
||||
"""
|
||||
|
||||
codec_description = u""
|
||||
"""Further information on the codec used.
|
||||
Example: ``64 kbps, 48 kHz, stereo 2-pass CBR`` (:class:`mutagen.text`)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.length = 0.0
|
||||
self.sample_rate = 0
|
||||
self.bitrate = 0
|
||||
self.channels = 0
|
||||
self.codec_type = u""
|
||||
self.codec_name = u""
|
||||
self.codec_description = u""
|
||||
|
||||
def pprint(self):
|
||||
"""Returns a stream information text summary
|
||||
|
||||
:rtype: text
|
||||
"""
|
||||
|
||||
s = u"ASF (%s) %d bps, %s Hz, %d channels, %.2f seconds" % (
|
||||
self.codec_type or self.codec_name or u"???", self.bitrate,
|
||||
self.sample_rate, self.channels, self.length)
|
||||
return s
|
||||
|
||||
|
||||
class ASFTags(list, DictMixin, Metadata):
|
||||
"""Dictionary containing ASF attributes."""
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""A list of values for the key.
|
||||
|
||||
This is a copy, so comment['title'].append('a title') will not
|
||||
work.
|
||||
|
||||
"""
|
||||
|
||||
# PY3 only
|
||||
if isinstance(key, slice):
|
||||
return list.__getitem__(self, key)
|
||||
|
||||
values = [value for (k, value) in self if k == key]
|
||||
if not values:
|
||||
raise KeyError(key)
|
||||
else:
|
||||
return values
|
||||
|
||||
def __delitem__(self, key):
|
||||
"""Delete all values associated with the key."""
|
||||
|
||||
# PY3 only
|
||||
if isinstance(key, slice):
|
||||
return list.__delitem__(self, key)
|
||||
|
||||
to_delete = [x for x in self if x[0] == key]
|
||||
if not to_delete:
|
||||
raise KeyError(key)
|
||||
else:
|
||||
for k in to_delete:
|
||||
self.remove(k)
|
||||
|
||||
def __contains__(self, key):
|
||||
"""Return true if the key has any values."""
|
||||
for k, value in self:
|
||||
if k == key:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def __setitem__(self, key, values):
|
||||
"""Set a key's value or values.
|
||||
|
||||
Setting a value overwrites all old ones. The value may be a
|
||||
list of Unicode or UTF-8 strings, or a single Unicode or UTF-8
|
||||
string.
|
||||
"""
|
||||
|
||||
# PY3 only
|
||||
if isinstance(key, slice):
|
||||
return list.__setitem__(self, key, values)
|
||||
|
||||
if not isinstance(values, list):
|
||||
values = [values]
|
||||
|
||||
to_append = []
|
||||
for value in values:
|
||||
if not isinstance(value, ASFBaseAttribute):
|
||||
if isinstance(value, string_types):
|
||||
value = ASFUnicodeAttribute(value)
|
||||
elif PY3 and isinstance(value, bytes):
|
||||
value = ASFByteArrayAttribute(value)
|
||||
elif isinstance(value, bool):
|
||||
value = ASFBoolAttribute(value)
|
||||
elif isinstance(value, int):
|
||||
value = ASFDWordAttribute(value)
|
||||
elif isinstance(value, long_):
|
||||
value = ASFQWordAttribute(value)
|
||||
else:
|
||||
raise TypeError("Invalid type %r" % type(value))
|
||||
to_append.append((key, value))
|
||||
|
||||
try:
|
||||
del(self[key])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
self.extend(to_append)
|
||||
|
||||
def keys(self):
|
||||
"""Return a sequence of all keys in the comment."""
|
||||
|
||||
return self and set(next(izip(*self)))
|
||||
|
||||
def as_dict(self):
|
||||
"""Return a copy of the comment data in a real dict."""
|
||||
|
||||
d = {}
|
||||
for key, value in self:
|
||||
d.setdefault(key, []).append(value)
|
||||
return d
|
||||
|
||||
def pprint(self):
|
||||
"""Returns a string containing all key, value pairs.
|
||||
|
||||
:rtype: text
|
||||
"""
|
||||
|
||||
return "\n".join("%s=%s" % (k, v) for k, v in self)
|
||||
|
||||
|
||||
UNICODE = ASFUnicodeAttribute.TYPE
|
||||
"""Unicode string type"""
|
||||
|
||||
BYTEARRAY = ASFByteArrayAttribute.TYPE
|
||||
"""Byte array type"""
|
||||
|
||||
BOOL = ASFBoolAttribute.TYPE
|
||||
"""Bool type"""
|
||||
|
||||
DWORD = ASFDWordAttribute.TYPE
|
||||
""""DWord type (uint32)"""
|
||||
|
||||
QWORD = ASFQWordAttribute.TYPE
|
||||
"""QWord type (uint64)"""
|
||||
|
||||
WORD = ASFWordAttribute.TYPE
|
||||
"""Word type (uint16)"""
|
||||
|
||||
GUID = ASFGUIDAttribute.TYPE
|
||||
"""GUID type"""
|
||||
|
||||
|
||||
class ASF(FileType):
|
||||
"""An ASF file, probably containing WMA or WMV.
|
||||
|
||||
:param filename: a filename to load
|
||||
:raises mutagen.asf.error: In case loading fails
|
||||
"""
|
||||
|
||||
_mimes = ["audio/x-ms-wma", "audio/x-ms-wmv", "video/x-ms-asf",
|
||||
"audio/x-wma", "video/x-wmv"]
|
||||
|
||||
info = None
|
||||
"""A `ASFInfo` instance"""
|
||||
|
||||
tags = None
|
||||
"""A `ASFTags` instance"""
|
||||
|
||||
def load(self, filename):
|
||||
self.filename = filename
|
||||
self.info = ASFInfo()
|
||||
self.tags = ASFTags()
|
||||
|
||||
with open(filename, "rb") as fileobj:
|
||||
self._tags = {}
|
||||
|
||||
self._header = HeaderObject.parse_full(self, fileobj)
|
||||
|
||||
for guid in [ContentDescriptionObject.GUID,
|
||||
ExtendedContentDescriptionObject.GUID, MetadataObject.GUID,
|
||||
MetadataLibraryObject.GUID]:
|
||||
self.tags.extend(self._tags.pop(guid, []))
|
||||
|
||||
assert not self._tags
|
||||
|
||||
def save(self, filename=None, padding=None):
|
||||
"""Save tag changes back to the loaded file.
|
||||
|
||||
:param padding: A callback which returns the amount of padding to use.
|
||||
See :class:`mutagen.PaddingInfo`
|
||||
|
||||
:raises mutagen.asf.error: In case saving fails
|
||||
"""
|
||||
|
||||
if filename is not None and filename != self.filename:
|
||||
raise ValueError("saving to another file not supported atm")
|
||||
|
||||
# Move attributes to the right objects
|
||||
self.to_content_description = {}
|
||||
self.to_extended_content_description = {}
|
||||
self.to_metadata = {}
|
||||
self.to_metadata_library = []
|
||||
for name, value in self.tags:
|
||||
library_only = (value.data_size() > 0xFFFF or value.TYPE == GUID)
|
||||
can_cont_desc = value.TYPE == UNICODE
|
||||
|
||||
if library_only or value.language is not None:
|
||||
self.to_metadata_library.append((name, value))
|
||||
elif value.stream is not None:
|
||||
if name not in self.to_metadata:
|
||||
self.to_metadata[name] = value
|
||||
else:
|
||||
self.to_metadata_library.append((name, value))
|
||||
elif name in ContentDescriptionObject.NAMES:
|
||||
if name not in self.to_content_description and can_cont_desc:
|
||||
self.to_content_description[name] = value
|
||||
else:
|
||||
self.to_metadata_library.append((name, value))
|
||||
else:
|
||||
if name not in self.to_extended_content_description:
|
||||
self.to_extended_content_description[name] = value
|
||||
else:
|
||||
self.to_metadata_library.append((name, value))
|
||||
|
||||
# Add missing objects
|
||||
header = self._header
|
||||
if header.get_child(ContentDescriptionObject.GUID) is None:
|
||||
header.objects.append(ContentDescriptionObject())
|
||||
if header.get_child(ExtendedContentDescriptionObject.GUID) is None:
|
||||
header.objects.append(ExtendedContentDescriptionObject())
|
||||
header_ext = header.get_child(HeaderExtensionObject.GUID)
|
||||
if header_ext is None:
|
||||
header_ext = HeaderExtensionObject()
|
||||
header.objects.append(header_ext)
|
||||
if header_ext.get_child(MetadataObject.GUID) is None:
|
||||
header_ext.objects.append(MetadataObject())
|
||||
if header_ext.get_child(MetadataLibraryObject.GUID) is None:
|
||||
header_ext.objects.append(MetadataLibraryObject())
|
||||
|
||||
# Render to file
|
||||
with open(self.filename, "rb+") as fileobj:
|
||||
old_size = header.parse_size(fileobj)[0]
|
||||
data = header.render_full(self, fileobj, old_size, padding)
|
||||
size = len(data)
|
||||
resize_bytes(fileobj, old_size, size, 0)
|
||||
fileobj.seek(0)
|
||||
fileobj.write(data)
|
||||
|
||||
def add_tags(self):
|
||||
raise ASFError
|
||||
|
||||
def delete(self, filename=None):
|
||||
|
||||
if filename is not None and filename != self.filename:
|
||||
raise ValueError("saving to another file not supported atm")
|
||||
|
||||
self.tags.clear()
|
||||
self.save(padding=lambda x: 0)
|
||||
|
||||
@staticmethod
|
||||
def score(filename, fileobj, header):
|
||||
return header.startswith(HeaderObject.GUID) * 2
|
||||
|
||||
Open = ASF
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,438 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2005-2006 Joe Wreschnig
|
||||
# Copyright (C) 2006-2007 Lukas Lalinsky
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
import sys
|
||||
import struct
|
||||
|
||||
from mutagen._compat import swap_to_string, text_type, PY2, reraise
|
||||
from mutagen._util import total_ordering
|
||||
|
||||
from ._util import ASFError
|
||||
|
||||
|
||||
class ASFBaseAttribute(object):
|
||||
"""Generic attribute."""
|
||||
|
||||
TYPE = None
|
||||
|
||||
_TYPES = {}
|
||||
|
||||
value = None
|
||||
"""The Python value of this attribute (type depends on the class)"""
|
||||
|
||||
language = None
|
||||
"""Language"""
|
||||
|
||||
stream = None
|
||||
"""Stream"""
|
||||
|
||||
def __init__(self, value=None, data=None, language=None,
|
||||
stream=None, **kwargs):
|
||||
self.language = language
|
||||
self.stream = stream
|
||||
if data:
|
||||
self.value = self.parse(data, **kwargs)
|
||||
else:
|
||||
if value is None:
|
||||
# we used to support not passing any args and instead assign
|
||||
# them later, keep that working..
|
||||
self.value = None
|
||||
else:
|
||||
self.value = self._validate(value)
|
||||
|
||||
@classmethod
|
||||
def _register(cls, other):
|
||||
cls._TYPES[other.TYPE] = other
|
||||
return other
|
||||
|
||||
@classmethod
|
||||
def _get_type(cls, type_):
|
||||
"""Raises KeyError"""
|
||||
|
||||
return cls._TYPES[type_]
|
||||
|
||||
def _validate(self, value):
|
||||
"""Raises TypeError or ValueError in case the user supplied value
|
||||
isn't valid.
|
||||
"""
|
||||
|
||||
return value
|
||||
|
||||
def data_size(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def __repr__(self):
|
||||
name = "%s(%r" % (type(self).__name__, self.value)
|
||||
if self.language:
|
||||
name += ", language=%d" % self.language
|
||||
if self.stream:
|
||||
name += ", stream=%d" % self.stream
|
||||
name += ")"
|
||||
return name
|
||||
|
||||
def render(self, name):
|
||||
name = name.encode("utf-16-le") + b"\x00\x00"
|
||||
data = self._render()
|
||||
return (struct.pack("<H", len(name)) + name +
|
||||
struct.pack("<HH", self.TYPE, len(data)) + data)
|
||||
|
||||
def render_m(self, name):
|
||||
name = name.encode("utf-16-le") + b"\x00\x00"
|
||||
if self.TYPE == 2:
|
||||
data = self._render(dword=False)
|
||||
else:
|
||||
data = self._render()
|
||||
return (struct.pack("<HHHHI", 0, self.stream or 0, len(name),
|
||||
self.TYPE, len(data)) + name + data)
|
||||
|
||||
def render_ml(self, name):
|
||||
name = name.encode("utf-16-le") + b"\x00\x00"
|
||||
if self.TYPE == 2:
|
||||
data = self._render(dword=False)
|
||||
else:
|
||||
data = self._render()
|
||||
|
||||
return (struct.pack("<HHHHI", self.language or 0, self.stream or 0,
|
||||
len(name), self.TYPE, len(data)) + name + data)
|
||||
|
||||
|
||||
@ASFBaseAttribute._register
|
||||
@swap_to_string
|
||||
@total_ordering
|
||||
class ASFUnicodeAttribute(ASFBaseAttribute):
|
||||
"""Unicode string attribute.
|
||||
|
||||
::
|
||||
|
||||
ASFUnicodeAttribute(u'some text')
|
||||
"""
|
||||
|
||||
TYPE = 0x0000
|
||||
|
||||
def parse(self, data):
|
||||
try:
|
||||
return data.decode("utf-16-le").strip("\x00")
|
||||
except UnicodeDecodeError as e:
|
||||
reraise(ASFError, e, sys.exc_info()[2])
|
||||
|
||||
def _validate(self, value):
|
||||
if not isinstance(value, text_type):
|
||||
if PY2:
|
||||
return value.decode("utf-8")
|
||||
else:
|
||||
raise TypeError("%r not str" % value)
|
||||
return value
|
||||
|
||||
def _render(self):
|
||||
return self.value.encode("utf-16-le") + b"\x00\x00"
|
||||
|
||||
def data_size(self):
|
||||
return len(self._render())
|
||||
|
||||
def __bytes__(self):
|
||||
return self.value.encode("utf-16-le")
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
def __eq__(self, other):
|
||||
return text_type(self) == other
|
||||
|
||||
def __lt__(self, other):
|
||||
return text_type(self) < other
|
||||
|
||||
__hash__ = ASFBaseAttribute.__hash__
|
||||
|
||||
|
||||
@ASFBaseAttribute._register
|
||||
@swap_to_string
|
||||
@total_ordering
|
||||
class ASFByteArrayAttribute(ASFBaseAttribute):
|
||||
"""Byte array attribute.
|
||||
|
||||
::
|
||||
|
||||
ASFByteArrayAttribute(b'1234')
|
||||
"""
|
||||
TYPE = 0x0001
|
||||
|
||||
def parse(self, data):
|
||||
assert isinstance(data, bytes)
|
||||
return data
|
||||
|
||||
def _render(self):
|
||||
assert isinstance(self.value, bytes)
|
||||
return self.value
|
||||
|
||||
def _validate(self, value):
|
||||
if not isinstance(value, bytes):
|
||||
raise TypeError("must be bytes/str: %r" % value)
|
||||
return value
|
||||
|
||||
def data_size(self):
|
||||
return len(self.value)
|
||||
|
||||
def __bytes__(self):
|
||||
return self.value
|
||||
|
||||
def __str__(self):
|
||||
return "[binary data (%d bytes)]" % len(self.value)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.value == other
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.value < other
|
||||
|
||||
__hash__ = ASFBaseAttribute.__hash__
|
||||
|
||||
|
||||
@ASFBaseAttribute._register
|
||||
@swap_to_string
|
||||
@total_ordering
|
||||
class ASFBoolAttribute(ASFBaseAttribute):
|
||||
"""Bool attribute.
|
||||
|
||||
::
|
||||
|
||||
ASFBoolAttribute(True)
|
||||
"""
|
||||
|
||||
TYPE = 0x0002
|
||||
|
||||
def parse(self, data, dword=True):
|
||||
if dword:
|
||||
return struct.unpack("<I", data)[0] == 1
|
||||
else:
|
||||
return struct.unpack("<H", data)[0] == 1
|
||||
|
||||
def _render(self, dword=True):
|
||||
if dword:
|
||||
return struct.pack("<I", bool(self.value))
|
||||
else:
|
||||
return struct.pack("<H", bool(self.value))
|
||||
|
||||
def _validate(self, value):
|
||||
return bool(value)
|
||||
|
||||
def data_size(self):
|
||||
return 4
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.value)
|
||||
|
||||
def __bytes__(self):
|
||||
return text_type(self.value).encode('utf-8')
|
||||
|
||||
def __str__(self):
|
||||
return text_type(self.value)
|
||||
|
||||
def __eq__(self, other):
|
||||
return bool(self.value) == other
|
||||
|
||||
def __lt__(self, other):
|
||||
return bool(self.value) < other
|
||||
|
||||
__hash__ = ASFBaseAttribute.__hash__
|
||||
|
||||
|
||||
@ASFBaseAttribute._register
|
||||
@swap_to_string
|
||||
@total_ordering
|
||||
class ASFDWordAttribute(ASFBaseAttribute):
|
||||
"""DWORD attribute.
|
||||
|
||||
::
|
||||
|
||||
ASFDWordAttribute(42)
|
||||
"""
|
||||
|
||||
TYPE = 0x0003
|
||||
|
||||
def parse(self, data):
|
||||
return struct.unpack("<L", data)[0]
|
||||
|
||||
def _render(self):
|
||||
return struct.pack("<L", self.value)
|
||||
|
||||
def _validate(self, value):
|
||||
value = int(value)
|
||||
if not 0 <= value <= 2 ** 32 - 1:
|
||||
raise ValueError("Out of range")
|
||||
return value
|
||||
|
||||
def data_size(self):
|
||||
return 4
|
||||
|
||||
def __int__(self):
|
||||
return self.value
|
||||
|
||||
def __bytes__(self):
|
||||
return text_type(self.value).encode('utf-8')
|
||||
|
||||
def __str__(self):
|
||||
return text_type(self.value)
|
||||
|
||||
def __eq__(self, other):
|
||||
return int(self.value) == other
|
||||
|
||||
def __lt__(self, other):
|
||||
return int(self.value) < other
|
||||
|
||||
__hash__ = ASFBaseAttribute.__hash__
|
||||
|
||||
|
||||
@ASFBaseAttribute._register
|
||||
@swap_to_string
|
||||
@total_ordering
|
||||
class ASFQWordAttribute(ASFBaseAttribute):
|
||||
"""QWORD attribute.
|
||||
|
||||
::
|
||||
|
||||
ASFQWordAttribute(42)
|
||||
"""
|
||||
|
||||
TYPE = 0x0004
|
||||
|
||||
def parse(self, data):
|
||||
return struct.unpack("<Q", data)[0]
|
||||
|
||||
def _render(self):
|
||||
return struct.pack("<Q", self.value)
|
||||
|
||||
def _validate(self, value):
|
||||
value = int(value)
|
||||
if not 0 <= value <= 2 ** 64 - 1:
|
||||
raise ValueError("Out of range")
|
||||
return value
|
||||
|
||||
def data_size(self):
|
||||
return 8
|
||||
|
||||
def __int__(self):
|
||||
return self.value
|
||||
|
||||
def __bytes__(self):
|
||||
return text_type(self.value).encode('utf-8')
|
||||
|
||||
def __str__(self):
|
||||
return text_type(self.value)
|
||||
|
||||
def __eq__(self, other):
|
||||
return int(self.value) == other
|
||||
|
||||
def __lt__(self, other):
|
||||
return int(self.value) < other
|
||||
|
||||
__hash__ = ASFBaseAttribute.__hash__
|
||||
|
||||
|
||||
@ASFBaseAttribute._register
|
||||
@swap_to_string
|
||||
@total_ordering
|
||||
class ASFWordAttribute(ASFBaseAttribute):
|
||||
"""WORD attribute.
|
||||
|
||||
::
|
||||
|
||||
ASFWordAttribute(42)
|
||||
"""
|
||||
|
||||
TYPE = 0x0005
|
||||
|
||||
def parse(self, data):
|
||||
return struct.unpack("<H", data)[0]
|
||||
|
||||
def _render(self):
|
||||
return struct.pack("<H", self.value)
|
||||
|
||||
def _validate(self, value):
|
||||
value = int(value)
|
||||
if not 0 <= value <= 2 ** 16 - 1:
|
||||
raise ValueError("Out of range")
|
||||
return value
|
||||
|
||||
def data_size(self):
|
||||
return 2
|
||||
|
||||
def __int__(self):
|
||||
return self.value
|
||||
|
||||
def __bytes__(self):
|
||||
return text_type(self.value).encode('utf-8')
|
||||
|
||||
def __str__(self):
|
||||
return text_type(self.value)
|
||||
|
||||
def __eq__(self, other):
|
||||
return int(self.value) == other
|
||||
|
||||
def __lt__(self, other):
|
||||
return int(self.value) < other
|
||||
|
||||
__hash__ = ASFBaseAttribute.__hash__
|
||||
|
||||
|
||||
@ASFBaseAttribute._register
|
||||
@swap_to_string
|
||||
@total_ordering
|
||||
class ASFGUIDAttribute(ASFBaseAttribute):
|
||||
"""GUID attribute."""
|
||||
|
||||
TYPE = 0x0006
|
||||
|
||||
def parse(self, data):
|
||||
assert isinstance(data, bytes)
|
||||
return data
|
||||
|
||||
def _render(self):
|
||||
assert isinstance(self.value, bytes)
|
||||
return self.value
|
||||
|
||||
def _validate(self, value):
|
||||
if not isinstance(value, bytes):
|
||||
raise TypeError("must be bytes/str: %r" % value)
|
||||
return value
|
||||
|
||||
def data_size(self):
|
||||
return len(self.value)
|
||||
|
||||
def __bytes__(self):
|
||||
return self.value
|
||||
|
||||
def __str__(self):
|
||||
return repr(self.value)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.value == other
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.value < other
|
||||
|
||||
__hash__ = ASFBaseAttribute.__hash__
|
||||
|
||||
|
||||
def ASFValue(value, kind, **kwargs):
|
||||
"""Create a tag value of a specific kind.
|
||||
|
||||
::
|
||||
|
||||
ASFValue(u"My Value", UNICODE)
|
||||
|
||||
:rtype: ASFBaseAttribute
|
||||
:raises TypeError: in case a wrong type was passed
|
||||
:raises ValueError: in case the value can't be be represented as ASFValue.
|
||||
"""
|
||||
|
||||
try:
|
||||
attr_type = ASFBaseAttribute._get_type(kind)
|
||||
except KeyError:
|
||||
raise ValueError("Unknown value type %r" % kind)
|
||||
else:
|
||||
return attr_type(value=value, **kwargs)
|
|
@ -1,437 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2005-2006 Joe Wreschnig
|
||||
# Copyright (C) 2006-2007 Lukas Lalinsky
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
import struct
|
||||
|
||||
from mutagen._util import cdata, get_size
|
||||
from mutagen._compat import text_type, xrange, izip
|
||||
from mutagen._tags import PaddingInfo
|
||||
|
||||
from ._util import guid2bytes, bytes2guid, CODECS, ASFError, ASFHeaderError
|
||||
from ._attrs import ASFBaseAttribute, ASFUnicodeAttribute
|
||||
|
||||
|
||||
class BaseObject(object):
|
||||
"""Base ASF object."""
|
||||
|
||||
GUID = None
|
||||
_TYPES = {}
|
||||
|
||||
def __init__(self):
|
||||
self.objects = []
|
||||
self.data = b""
|
||||
|
||||
def parse(self, asf, data):
|
||||
self.data = data
|
||||
|
||||
def render(self, asf):
|
||||
data = self.GUID + struct.pack("<Q", len(self.data) + 24) + self.data
|
||||
return data
|
||||
|
||||
def get_child(self, guid):
|
||||
for obj in self.objects:
|
||||
if obj.GUID == guid:
|
||||
return obj
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def _register(cls, other):
|
||||
cls._TYPES[other.GUID] = other
|
||||
return other
|
||||
|
||||
@classmethod
|
||||
def _get_object(cls, guid):
|
||||
if guid in cls._TYPES:
|
||||
return cls._TYPES[guid]()
|
||||
else:
|
||||
return UnknownObject(guid)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s GUID=%s objects=%r>" % (
|
||||
type(self).__name__, bytes2guid(self.GUID), self.objects)
|
||||
|
||||
def pprint(self):
|
||||
l = []
|
||||
l.append("%s(%s)" % (type(self).__name__, bytes2guid(self.GUID)))
|
||||
for o in self.objects:
|
||||
for e in o.pprint().splitlines():
|
||||
l.append(" " + e)
|
||||
return "\n".join(l)
|
||||
|
||||
|
||||
class UnknownObject(BaseObject):
|
||||
"""Unknown ASF object."""
|
||||
|
||||
def __init__(self, guid):
|
||||
super(UnknownObject, self).__init__()
|
||||
assert isinstance(guid, bytes)
|
||||
self.GUID = guid
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class HeaderObject(BaseObject):
|
||||
"""ASF header."""
|
||||
|
||||
GUID = guid2bytes("75B22630-668E-11CF-A6D9-00AA0062CE6C")
|
||||
|
||||
@classmethod
|
||||
def parse_full(cls, asf, fileobj):
|
||||
"""Raises ASFHeaderError"""
|
||||
|
||||
header = cls()
|
||||
|
||||
size, num_objects = cls.parse_size(fileobj)
|
||||
for i in xrange(num_objects):
|
||||
guid, size = struct.unpack("<16sQ", fileobj.read(24))
|
||||
obj = BaseObject._get_object(guid)
|
||||
data = fileobj.read(size - 24)
|
||||
obj.parse(asf, data)
|
||||
header.objects.append(obj)
|
||||
|
||||
return header
|
||||
|
||||
@classmethod
|
||||
def parse_size(cls, fileobj):
|
||||
"""Returns (size, num_objects)
|
||||
|
||||
Raises ASFHeaderError
|
||||
"""
|
||||
|
||||
header = fileobj.read(30)
|
||||
if len(header) != 30 or header[:16] != HeaderObject.GUID:
|
||||
raise ASFHeaderError("Not an ASF file.")
|
||||
|
||||
return struct.unpack("<QL", header[16:28])
|
||||
|
||||
def render_full(self, asf, fileobj, available, padding_func):
|
||||
# Render everything except padding
|
||||
num_objects = 0
|
||||
data = bytearray()
|
||||
for obj in self.objects:
|
||||
if obj.GUID == PaddingObject.GUID:
|
||||
continue
|
||||
data += obj.render(asf)
|
||||
num_objects += 1
|
||||
|
||||
# calculate how much space we need at least
|
||||
padding_obj = PaddingObject()
|
||||
header_size = len(HeaderObject.GUID) + 14
|
||||
padding_overhead = len(padding_obj.render(asf))
|
||||
needed_size = len(data) + header_size + padding_overhead
|
||||
|
||||
# ask the user for padding adjustments
|
||||
file_size = get_size(fileobj)
|
||||
content_size = file_size - available
|
||||
assert content_size >= 0
|
||||
info = PaddingInfo(available - needed_size, content_size)
|
||||
|
||||
# add padding
|
||||
padding = info._get_padding(padding_func)
|
||||
padding_obj.parse(asf, b"\x00" * padding)
|
||||
data += padding_obj.render(asf)
|
||||
num_objects += 1
|
||||
|
||||
data = (HeaderObject.GUID +
|
||||
struct.pack("<QL", len(data) + 30, num_objects) +
|
||||
b"\x01\x02" + data)
|
||||
|
||||
return data
|
||||
|
||||
def parse(self, asf, data):
|
||||
raise NotImplementedError
|
||||
|
||||
def render(self, asf):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class ContentDescriptionObject(BaseObject):
|
||||
"""Content description."""
|
||||
|
||||
GUID = guid2bytes("75B22633-668E-11CF-A6D9-00AA0062CE6C")
|
||||
|
||||
NAMES = [
|
||||
u"Title",
|
||||
u"Author",
|
||||
u"Copyright",
|
||||
u"Description",
|
||||
u"Rating",
|
||||
]
|
||||
|
||||
def parse(self, asf, data):
|
||||
super(ContentDescriptionObject, self).parse(asf, data)
|
||||
lengths = struct.unpack("<HHHHH", data[:10])
|
||||
texts = []
|
||||
pos = 10
|
||||
for length in lengths:
|
||||
end = pos + length
|
||||
if length > 0:
|
||||
texts.append(data[pos:end].decode("utf-16-le").strip(u"\x00"))
|
||||
else:
|
||||
texts.append(None)
|
||||
pos = end
|
||||
|
||||
for key, value in izip(self.NAMES, texts):
|
||||
if value is not None:
|
||||
value = ASFUnicodeAttribute(value=value)
|
||||
asf._tags.setdefault(self.GUID, []).append((key, value))
|
||||
|
||||
def render(self, asf):
|
||||
def render_text(name):
|
||||
value = asf.to_content_description.get(name)
|
||||
if value is not None:
|
||||
return text_type(value).encode("utf-16-le") + b"\x00\x00"
|
||||
else:
|
||||
return b""
|
||||
|
||||
texts = [render_text(x) for x in self.NAMES]
|
||||
data = struct.pack("<HHHHH", *map(len, texts)) + b"".join(texts)
|
||||
return self.GUID + struct.pack("<Q", 24 + len(data)) + data
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class ExtendedContentDescriptionObject(BaseObject):
|
||||
"""Extended content description."""
|
||||
|
||||
GUID = guid2bytes("D2D0A440-E307-11D2-97F0-00A0C95EA850")
|
||||
|
||||
def parse(self, asf, data):
|
||||
super(ExtendedContentDescriptionObject, self).parse(asf, data)
|
||||
num_attributes, = struct.unpack("<H", data[0:2])
|
||||
pos = 2
|
||||
for i in xrange(num_attributes):
|
||||
name_length, = struct.unpack("<H", data[pos:pos + 2])
|
||||
pos += 2
|
||||
name = data[pos:pos + name_length]
|
||||
name = name.decode("utf-16-le").strip("\x00")
|
||||
pos += name_length
|
||||
value_type, value_length = struct.unpack("<HH", data[pos:pos + 4])
|
||||
pos += 4
|
||||
value = data[pos:pos + value_length]
|
||||
pos += value_length
|
||||
attr = ASFBaseAttribute._get_type(value_type)(data=value)
|
||||
asf._tags.setdefault(self.GUID, []).append((name, attr))
|
||||
|
||||
def render(self, asf):
|
||||
attrs = asf.to_extended_content_description.items()
|
||||
data = b"".join(attr.render(name) for (name, attr) in attrs)
|
||||
data = struct.pack("<QH", 26 + len(data), len(attrs)) + data
|
||||
return self.GUID + data
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class FilePropertiesObject(BaseObject):
|
||||
"""File properties."""
|
||||
|
||||
GUID = guid2bytes("8CABDCA1-A947-11CF-8EE4-00C00C205365")
|
||||
|
||||
def parse(self, asf, data):
|
||||
super(FilePropertiesObject, self).parse(asf, data)
|
||||
length, _, preroll = struct.unpack("<QQQ", data[40:64])
|
||||
# there are files where preroll is larger than length, limit to >= 0
|
||||
asf.info.length = max((length / 10000000.0) - (preroll / 1000.0), 0.0)
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class StreamPropertiesObject(BaseObject):
|
||||
"""Stream properties."""
|
||||
|
||||
GUID = guid2bytes("B7DC0791-A9B7-11CF-8EE6-00C00C205365")
|
||||
|
||||
def parse(self, asf, data):
|
||||
super(StreamPropertiesObject, self).parse(asf, data)
|
||||
channels, sample_rate, bitrate = struct.unpack("<HII", data[56:66])
|
||||
asf.info.channels = channels
|
||||
asf.info.sample_rate = sample_rate
|
||||
asf.info.bitrate = bitrate * 8
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class CodecListObject(BaseObject):
|
||||
"""Codec List"""
|
||||
|
||||
GUID = guid2bytes("86D15240-311D-11D0-A3A4-00A0C90348F6")
|
||||
|
||||
def _parse_entry(self, data, offset):
|
||||
"""can raise cdata.error"""
|
||||
|
||||
type_, offset = cdata.uint16_le_from(data, offset)
|
||||
|
||||
units, offset = cdata.uint16_le_from(data, offset)
|
||||
# utf-16 code units, not characters..
|
||||
next_offset = offset + units * 2
|
||||
try:
|
||||
name = data[offset:next_offset].decode("utf-16-le").strip("\x00")
|
||||
except UnicodeDecodeError:
|
||||
name = u""
|
||||
offset = next_offset
|
||||
|
||||
units, offset = cdata.uint16_le_from(data, offset)
|
||||
next_offset = offset + units * 2
|
||||
try:
|
||||
desc = data[offset:next_offset].decode("utf-16-le").strip("\x00")
|
||||
except UnicodeDecodeError:
|
||||
desc = u""
|
||||
offset = next_offset
|
||||
|
||||
bytes_, offset = cdata.uint16_le_from(data, offset)
|
||||
next_offset = offset + bytes_
|
||||
codec = u""
|
||||
if bytes_ == 2:
|
||||
codec_id = cdata.uint16_le_from(data, offset)[0]
|
||||
if codec_id in CODECS:
|
||||
codec = CODECS[codec_id]
|
||||
offset = next_offset
|
||||
|
||||
return offset, type_, name, desc, codec
|
||||
|
||||
def parse(self, asf, data):
|
||||
super(CodecListObject, self).parse(asf, data)
|
||||
|
||||
offset = 16
|
||||
count, offset = cdata.uint32_le_from(data, offset)
|
||||
for i in xrange(count):
|
||||
try:
|
||||
offset, type_, name, desc, codec = \
|
||||
self._parse_entry(data, offset)
|
||||
except cdata.error:
|
||||
raise ASFError("invalid codec entry")
|
||||
|
||||
# go with the first audio entry
|
||||
if type_ == 2:
|
||||
name = name.strip()
|
||||
desc = desc.strip()
|
||||
asf.info.codec_type = codec
|
||||
asf.info.codec_name = name
|
||||
asf.info.codec_description = desc
|
||||
return
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class PaddingObject(BaseObject):
|
||||
"""Padding object"""
|
||||
|
||||
GUID = guid2bytes("1806D474-CADF-4509-A4BA-9AABCB96AAE8")
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class StreamBitratePropertiesObject(BaseObject):
|
||||
"""Stream bitrate properties"""
|
||||
|
||||
GUID = guid2bytes("7BF875CE-468D-11D1-8D82-006097C9A2B2")
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class ContentEncryptionObject(BaseObject):
|
||||
"""Content encryption"""
|
||||
|
||||
GUID = guid2bytes("2211B3FB-BD23-11D2-B4B7-00A0C955FC6E")
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class ExtendedContentEncryptionObject(BaseObject):
|
||||
"""Extended content encryption"""
|
||||
|
||||
GUID = guid2bytes("298AE614-2622-4C17-B935-DAE07EE9289C")
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class HeaderExtensionObject(BaseObject):
|
||||
"""Header extension."""
|
||||
|
||||
GUID = guid2bytes("5FBF03B5-A92E-11CF-8EE3-00C00C205365")
|
||||
|
||||
def parse(self, asf, data):
|
||||
super(HeaderExtensionObject, self).parse(asf, data)
|
||||
datasize, = struct.unpack("<I", data[18:22])
|
||||
datapos = 0
|
||||
while datapos < datasize:
|
||||
guid, size = struct.unpack(
|
||||
"<16sQ", data[22 + datapos:22 + datapos + 24])
|
||||
obj = BaseObject._get_object(guid)
|
||||
obj.parse(asf, data[22 + datapos + 24:22 + datapos + size])
|
||||
self.objects.append(obj)
|
||||
datapos += size
|
||||
|
||||
def render(self, asf):
|
||||
data = bytearray()
|
||||
for obj in self.objects:
|
||||
# some files have the padding in the extension header, but we
|
||||
# want to add it at the end of the top level header. Just
|
||||
# skip padding at this level.
|
||||
if obj.GUID == PaddingObject.GUID:
|
||||
continue
|
||||
data += obj.render(asf)
|
||||
return (self.GUID + struct.pack("<Q", 24 + 16 + 6 + len(data)) +
|
||||
b"\x11\xD2\xD3\xAB\xBA\xA9\xcf\x11" +
|
||||
b"\x8E\xE6\x00\xC0\x0C\x20\x53\x65" +
|
||||
b"\x06\x00" + struct.pack("<I", len(data)) + data)
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class MetadataObject(BaseObject):
|
||||
"""Metadata description."""
|
||||
|
||||
GUID = guid2bytes("C5F8CBEA-5BAF-4877-8467-AA8C44FA4CCA")
|
||||
|
||||
def parse(self, asf, data):
|
||||
super(MetadataObject, self).parse(asf, data)
|
||||
num_attributes, = struct.unpack("<H", data[0:2])
|
||||
pos = 2
|
||||
for i in xrange(num_attributes):
|
||||
(reserved, stream, name_length, value_type,
|
||||
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
|
||||
pos += 12
|
||||
name = data[pos:pos + name_length]
|
||||
name = name.decode("utf-16-le").strip("\x00")
|
||||
pos += name_length
|
||||
value = data[pos:pos + value_length]
|
||||
pos += value_length
|
||||
args = {'data': value, 'stream': stream}
|
||||
if value_type == 2:
|
||||
args['dword'] = False
|
||||
attr = ASFBaseAttribute._get_type(value_type)(**args)
|
||||
asf._tags.setdefault(self.GUID, []).append((name, attr))
|
||||
|
||||
def render(self, asf):
|
||||
attrs = asf.to_metadata.items()
|
||||
data = b"".join([attr.render_m(name) for (name, attr) in attrs])
|
||||
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
|
||||
data)
|
||||
|
||||
|
||||
@BaseObject._register
|
||||
class MetadataLibraryObject(BaseObject):
|
||||
"""Metadata library description."""
|
||||
|
||||
GUID = guid2bytes("44231C94-9498-49D1-A141-1D134E457054")
|
||||
|
||||
def parse(self, asf, data):
|
||||
super(MetadataLibraryObject, self).parse(asf, data)
|
||||
num_attributes, = struct.unpack("<H", data[0:2])
|
||||
pos = 2
|
||||
for i in xrange(num_attributes):
|
||||
(language, stream, name_length, value_type,
|
||||
value_length) = struct.unpack("<HHHHI", data[pos:pos + 12])
|
||||
pos += 12
|
||||
name = data[pos:pos + name_length]
|
||||
name = name.decode("utf-16-le").strip("\x00")
|
||||
pos += name_length
|
||||
value = data[pos:pos + value_length]
|
||||
pos += value_length
|
||||
args = {'data': value, 'language': language, 'stream': stream}
|
||||
if value_type == 2:
|
||||
args['dword'] = False
|
||||
attr = ASFBaseAttribute._get_type(value_type)(**args)
|
||||
asf._tags.setdefault(self.GUID, []).append((name, attr))
|
||||
|
||||
def render(self, asf):
|
||||
attrs = asf.to_metadata_library
|
||||
data = b"".join([attr.render_ml(name) for (name, attr) in attrs])
|
||||
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
|
||||
data)
|
|
@ -1,315 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright (C) 2005-2006 Joe Wreschnig
|
||||
# Copyright (C) 2006-2007 Lukas Lalinsky
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
import struct
|
||||
|
||||
from mutagen._util import MutagenError
|
||||
|
||||
|
||||
class error(IOError, MutagenError):
|
||||
"""Error raised by :mod:`mutagen.asf`"""
|
||||
|
||||
|
||||
class ASFError(error):
|
||||
pass
|
||||
|
||||
|
||||
class ASFHeaderError(error):
|
||||
pass
|
||||
|
||||
|
||||
def guid2bytes(s):
|
||||
"""Converts a GUID to the serialized bytes representation"""
|
||||
|
||||
assert isinstance(s, str)
|
||||
assert len(s) == 36
|
||||
|
||||
p = struct.pack
|
||||
return b"".join([
|
||||
p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)),
|
||||
p(">H", int(s[19:23], 16)),
|
||||
p(">Q", int(s[24:], 16))[2:],
|
||||
])
|
||||
|
||||
|
||||
def bytes2guid(s):
|
||||
"""Converts a serialized GUID to a text GUID"""
|
||||
|
||||
assert isinstance(s, bytes)
|
||||
|
||||
u = struct.unpack
|
||||
v = []
|
||||
v.extend(u("<IHH", s[:8]))
|
||||
v.extend(u(">HQ", s[8:10] + b"\x00\x00" + s[10:]))
|
||||
return "%08X-%04X-%04X-%04X-%012X" % tuple(v)
|
||||
|
||||
|
||||
# Names from http://windows.microsoft.com/en-za/windows7/c00d10d1-[0-9A-F]{1,4}
|
||||
CODECS = {
|
||||
0x0000: u"Unknown Wave Format",
|
||||
0x0001: u"Microsoft PCM Format",
|
||||
0x0002: u"Microsoft ADPCM Format",
|
||||
0x0003: u"IEEE Float",
|
||||
0x0004: u"Compaq Computer VSELP",
|
||||
0x0005: u"IBM CVSD",
|
||||
0x0006: u"Microsoft CCITT A-Law",
|
||||
0x0007: u"Microsoft CCITT u-Law",
|
||||
0x0008: u"Microsoft DTS",
|
||||
0x0009: u"Microsoft DRM",
|
||||
0x000A: u"Windows Media Audio 9 Voice",
|
||||
0x000B: u"Windows Media Audio 10 Voice",
|
||||
0x000C: u"OGG Vorbis",
|
||||
0x000D: u"FLAC",
|
||||
0x000E: u"MOT AMR",
|
||||
0x000F: u"Nice Systems IMBE",
|
||||
0x0010: u"OKI ADPCM",
|
||||
0x0011: u"Intel IMA ADPCM",
|
||||
0x0012: u"Videologic MediaSpace ADPCM",
|
||||
0x0013: u"Sierra Semiconductor ADPCM",
|
||||
0x0014: u"Antex Electronics G.723 ADPCM",
|
||||
0x0015: u"DSP Solutions DIGISTD",
|
||||
0x0016: u"DSP Solutions DIGIFIX",
|
||||
0x0017: u"Dialogic OKI ADPCM",
|
||||
0x0018: u"MediaVision ADPCM",
|
||||
0x0019: u"Hewlett-Packard CU codec",
|
||||
0x001A: u"Hewlett-Packard Dynamic Voice",
|
||||
0x0020: u"Yamaha ADPCM",
|
||||
0x0021: u"Speech Compression SONARC",
|
||||
0x0022: u"DSP Group True Speech",
|
||||
0x0023: u"Echo Speech EchoSC1",
|
||||
0x0024: u"Ahead Inc. Audiofile AF36",
|
||||
0x0025: u"Audio Processing Technology APTX",
|
||||
0x0026: u"Ahead Inc. AudioFile AF10",
|
||||
0x0027: u"Aculab Prosody 1612",
|
||||
0x0028: u"Merging Technologies S.A. LRC",
|
||||
0x0030: u"Dolby Labs AC2",
|
||||
0x0031: u"Microsoft GSM 6.10",
|
||||
0x0032: u"Microsoft MSNAudio",
|
||||
0x0033: u"Antex Electronics ADPCME",
|
||||
0x0034: u"Control Resources VQLPC",
|
||||
0x0035: u"DSP Solutions Digireal",
|
||||
0x0036: u"DSP Solutions DigiADPCM",
|
||||
0x0037: u"Control Resources CR10",
|
||||
0x0038: u"Natural MicroSystems VBXADPCM",
|
||||
0x0039: u"Crystal Semiconductor IMA ADPCM",
|
||||
0x003A: u"Echo Speech EchoSC3",
|
||||
0x003B: u"Rockwell ADPCM",
|
||||
0x003C: u"Rockwell DigiTalk",
|
||||
0x003D: u"Xebec Multimedia Solutions",
|
||||
0x0040: u"Antex Electronics G.721 ADPCM",
|
||||
0x0041: u"Antex Electronics G.728 CELP",
|
||||
0x0042: u"Intel G.723",
|
||||
0x0043: u"Intel G.723.1",
|
||||
0x0044: u"Intel G.729 Audio",
|
||||
0x0045: u"Sharp G.726 Audio",
|
||||
0x0050: u"Microsoft MPEG-1",
|
||||
0x0052: u"InSoft RT24",
|
||||
0x0053: u"InSoft PAC",
|
||||
0x0055: u"MP3 - MPEG Layer III",
|
||||
0x0059: u"Lucent G.723",
|
||||
0x0060: u"Cirrus Logic",
|
||||
0x0061: u"ESS Technology ESPCM",
|
||||
0x0062: u"Voxware File-Mode",
|
||||
0x0063: u"Canopus Atrac",
|
||||
0x0064: u"APICOM G.726 ADPCM",
|
||||
0x0065: u"APICOM G.722 ADPCM",
|
||||
0x0066: u"Microsoft DSAT",
|
||||
0x0067: u"Microsoft DSAT Display",
|
||||
0x0069: u"Voxware Byte Aligned",
|
||||
0x0070: u"Voxware AC8",
|
||||
0x0071: u"Voxware AC10",
|
||||
0x0072: u"Voxware AC16",
|
||||
0x0073: u"Voxware AC20",
|
||||
0x0074: u"Voxware RT24 MetaVoice",
|
||||
0x0075: u"Voxware RT29 MetaSound",
|
||||
0x0076: u"Voxware RT29HW",
|
||||
0x0077: u"Voxware VR12",
|
||||
0x0078: u"Voxware VR18",
|
||||
0x0079: u"Voxware TQ40",
|
||||
0x007A: u"Voxware SC3",
|
||||
0x007B: u"Voxware SC3",
|
||||
0x0080: u"Softsound",
|
||||
0x0081: u"Voxware TQ60",
|
||||
0x0082: u"Microsoft MSRT24",
|
||||
0x0083: u"AT&T Labs G.729A",
|
||||
0x0084: u"Motion Pixels MVI MV12",
|
||||
0x0085: u"DataFusion Systems G.726",
|
||||
0x0086: u"DataFusion Systems GSM610",
|
||||
0x0088: u"Iterated Systems ISIAudio",
|
||||
0x0089: u"Onlive",
|
||||
0x008A: u"Multitude FT SX20",
|
||||
0x008B: u"Infocom ITS ACM G.721",
|
||||
0x008C: u"Convedia G.729",
|
||||
0x008D: u"Congruency Audio",
|
||||
0x0091: u"Siemens Business Communications SBC24",
|
||||
0x0092: u"Sonic Foundry Dolby AC3 SPDIF",
|
||||
0x0093: u"MediaSonic G.723",
|
||||
0x0094: u"Aculab Prosody 8KBPS",
|
||||
0x0097: u"ZyXEL ADPCM",
|
||||
0x0098: u"Philips LPCBB",
|
||||
0x0099: u"Studer Professional Audio AG Packed",
|
||||
0x00A0: u"Malden Electronics PHONYTALK",
|
||||
0x00A1: u"Racal Recorder GSM",
|
||||
0x00A2: u"Racal Recorder G720.a",
|
||||
0x00A3: u"Racal Recorder G723.1",
|
||||
0x00A4: u"Racal Recorder Tetra ACELP",
|
||||
0x00B0: u"NEC AAC",
|
||||
0x00FF: u"CoreAAC Audio",
|
||||
0x0100: u"Rhetorex ADPCM",
|
||||
0x0101: u"BeCubed Software IRAT",
|
||||
0x0111: u"Vivo G.723",
|
||||
0x0112: u"Vivo Siren",
|
||||
0x0120: u"Philips CELP",
|
||||
0x0121: u"Philips Grundig",
|
||||
0x0123: u"Digital G.723",
|
||||
0x0125: u"Sanyo ADPCM",
|
||||
0x0130: u"Sipro Lab Telecom ACELP.net",
|
||||
0x0131: u"Sipro Lab Telecom ACELP.4800",
|
||||
0x0132: u"Sipro Lab Telecom ACELP.8V3",
|
||||
0x0133: u"Sipro Lab Telecom ACELP.G.729",
|
||||
0x0134: u"Sipro Lab Telecom ACELP.G.729A",
|
||||
0x0135: u"Sipro Lab Telecom ACELP.KELVIN",
|
||||
0x0136: u"VoiceAge AMR",
|
||||
0x0140: u"Dictaphone G.726 ADPCM",
|
||||
0x0141: u"Dictaphone CELP68",
|
||||
0x0142: u"Dictaphone CELP54",
|
||||
0x0150: u"Qualcomm PUREVOICE",
|
||||
0x0151: u"Qualcomm HALFRATE",
|
||||
0x0155: u"Ring Zero Systems TUBGSM",
|
||||
0x0160: u"Windows Media Audio Standard",
|
||||
0x0161: u"Windows Media Audio 9 Standard",
|
||||
0x0162: u"Windows Media Audio 9 Professional",
|
||||
0x0163: u"Windows Media Audio 9 Lossless",
|
||||
0x0164: u"Windows Media Audio Pro over SPDIF",
|
||||
0x0170: u"Unisys NAP ADPCM",
|
||||
0x0171: u"Unisys NAP ULAW",
|
||||
0x0172: u"Unisys NAP ALAW",
|
||||
0x0173: u"Unisys NAP 16K",
|
||||
0x0174: u"Sycom ACM SYC008",
|
||||
0x0175: u"Sycom ACM SYC701 G725",
|
||||
0x0176: u"Sycom ACM SYC701 CELP54",
|
||||
0x0177: u"Sycom ACM SYC701 CELP68",
|
||||
0x0178: u"Knowledge Adventure ADPCM",
|
||||
0x0180: u"Fraunhofer IIS MPEG-2 AAC",
|
||||
0x0190: u"Digital Theater Systems DTS",
|
||||
0x0200: u"Creative Labs ADPCM",
|
||||
0x0202: u"Creative Labs FastSpeech8",
|
||||
0x0203: u"Creative Labs FastSpeech10",
|
||||
0x0210: u"UHER informatic GmbH ADPCM",
|
||||
0x0215: u"Ulead DV Audio",
|
||||
0x0216: u"Ulead DV Audio",
|
||||
0x0220: u"Quarterdeck",
|
||||
0x0230: u"I-link Worldwide ILINK VC",
|
||||
0x0240: u"Aureal Semiconductor RAW SPORT",
|
||||
0x0249: u"Generic Passthru",
|
||||
0x0250: u"Interactive Products HSX",
|
||||
0x0251: u"Interactive Products RPELP",
|
||||
0x0260: u"Consistent Software CS2",
|
||||
0x0270: u"Sony SCX",
|
||||
0x0271: u"Sony SCY",
|
||||
0x0272: u"Sony ATRAC3",
|
||||
0x0273: u"Sony SPC",
|
||||
0x0280: u"Telum Audio",
|
||||
0x0281: u"Telum IA Audio",
|
||||
0x0285: u"Norcom Voice Systems ADPCM",
|
||||
0x0300: u"Fujitsu TOWNS SND",
|
||||
0x0350: u"Micronas SC4 Speech",
|
||||
0x0351: u"Micronas CELP833",
|
||||
0x0400: u"Brooktree BTV Digital",
|
||||
0x0401: u"Intel Music Coder",
|
||||
0x0402: u"Intel Audio",
|
||||
0x0450: u"QDesign Music",
|
||||
0x0500: u"On2 AVC0 Audio",
|
||||
0x0501: u"On2 AVC1 Audio",
|
||||
0x0680: u"AT&T Labs VME VMPCM",
|
||||
0x0681: u"AT&T Labs TPC",
|
||||
0x08AE: u"ClearJump Lightwave Lossless",
|
||||
0x1000: u"Olivetti GSM",
|
||||
0x1001: u"Olivetti ADPCM",
|
||||
0x1002: u"Olivetti CELP",
|
||||
0x1003: u"Olivetti SBC",
|
||||
0x1004: u"Olivetti OPR",
|
||||
0x1100: u"Lernout & Hauspie",
|
||||
0x1101: u"Lernout & Hauspie CELP",
|
||||
0x1102: u"Lernout & Hauspie SBC8",
|
||||
0x1103: u"Lernout & Hauspie SBC12",
|
||||
0x1104: u"Lernout & Hauspie SBC16",
|
||||
0x1400: u"Norris Communication",
|
||||
0x1401: u"ISIAudio",
|
||||
0x1500: u"AT&T Labs Soundspace Music Compression",
|
||||
0x1600: u"Microsoft MPEG ADTS AAC",
|
||||
0x1601: u"Microsoft MPEG RAW AAC",
|
||||
0x1608: u"Nokia MPEG ADTS AAC",
|
||||
0x1609: u"Nokia MPEG RAW AAC",
|
||||
0x181C: u"VoxWare MetaVoice RT24",
|
||||
0x1971: u"Sonic Foundry Lossless",
|
||||
0x1979: u"Innings Telecom ADPCM",
|
||||
0x1FC4: u"NTCSoft ALF2CD ACM",
|
||||
0x2000: u"Dolby AC3",
|
||||
0x2001: u"DTS",
|
||||
0x4143: u"Divio AAC",
|
||||
0x4201: u"Nokia Adaptive Multi-Rate",
|
||||
0x4243: u"Divio G.726",
|
||||
0x4261: u"ITU-T H.261",
|
||||
0x4263: u"ITU-T H.263",
|
||||
0x4264: u"ITU-T H.264",
|
||||
0x674F: u"Ogg Vorbis Mode 1",
|
||||
0x6750: u"Ogg Vorbis Mode 2",
|
||||
0x6751: u"Ogg Vorbis Mode 3",
|
||||
0x676F: u"Ogg Vorbis Mode 1+",
|
||||
0x6770: u"Ogg Vorbis Mode 2+",
|
||||
0x6771: u"Ogg Vorbis Mode 3+",
|
||||
0x7000: u"3COM NBX Audio",
|
||||
0x706D: u"FAAD AAC Audio",
|
||||
0x77A1: u"True Audio Lossless Audio",
|
||||
0x7A21: u"GSM-AMR CBR 3GPP Audio",
|
||||
0x7A22: u"GSM-AMR VBR 3GPP Audio",
|
||||
0xA100: u"Comverse Infosys G723.1",
|
||||
0xA101: u"Comverse Infosys AVQSBC",
|
||||
0xA102: u"Comverse Infosys SBC",
|
||||
0xA103: u"Symbol Technologies G729a",
|
||||
0xA104: u"VoiceAge AMR WB",
|
||||
0xA105: u"Ingenient Technologies G.726",
|
||||
0xA106: u"ISO/MPEG-4 Advanced Audio Coding (AAC)",
|
||||
0xA107: u"Encore Software Ltd's G.726",
|
||||
0xA108: u"ZOLL Medical Corporation ASAO",
|
||||
0xA109: u"Speex Voice",
|
||||
0xA10A: u"Vianix MASC Speech Compression",
|
||||
0xA10B: u"Windows Media 9 Spectrum Analyzer Output",
|
||||
0xA10C: u"Media Foundation Spectrum Analyzer Output",
|
||||
0xA10D: u"GSM 6.10 (Full-Rate) Speech",
|
||||
0xA10E: u"GSM 6.20 (Half-Rate) Speech",
|
||||
0xA10F: u"GSM 6.60 (Enchanced Full-Rate) Speech",
|
||||
0xA110: u"GSM 6.90 (Adaptive Multi-Rate) Speech",
|
||||
0xA111: u"GSM Adaptive Multi-Rate WideBand Speech",
|
||||
0xA112: u"Polycom G.722",
|
||||
0xA113: u"Polycom G.728",
|
||||
0xA114: u"Polycom G.729a",
|
||||
0xA115: u"Polycom Siren",
|
||||
0xA116: u"Global IP Sound ILBC",
|
||||
0xA117: u"Radio Time Time Shifted Radio",
|
||||
0xA118: u"Nice Systems ACA",
|
||||
0xA119: u"Nice Systems ADPCM",
|
||||
0xA11A: u"Vocord Group ITU-T G.721",
|
||||
0xA11B: u"Vocord Group ITU-T G.726",
|
||||
0xA11C: u"Vocord Group ITU-T G.722.1",
|
||||
0xA11D: u"Vocord Group ITU-T G.728",
|
||||
0xA11E: u"Vocord Group ITU-T G.729",
|
||||
0xA11F: u"Vocord Group ITU-T G.729a",
|
||||
0xA120: u"Vocord Group ITU-T G.723.1",
|
||||
0xA121: u"Vocord Group LBC",
|
||||
0xA122: u"Nice G.728",
|
||||
0xA123: u"France Telecom G.729 ACM Audio",
|
||||
0xA124: u"CODIAN Audio",
|
||||
0xCC12: u"Intel YUV12 Codec",
|
||||
0xCFCC: u"Digital Processing Systems Perception Motion JPEG",
|
||||
0xD261: u"DEC H.261",
|
||||
0xD263: u"DEC H.263",
|
||||
0xFFFE: u"Extensible Wave Format",
|
||||
0xFFFF: u"Unregistered",
|
||||
}
|
|
@ -1,534 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2006 Joe Wreschnig
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
"""Easier access to ID3 tags.
|
||||
|
||||
EasyID3 is a wrapper around mutagen.id3.ID3 to make ID3 tags appear
|
||||
more like Vorbis or APEv2 tags.
|
||||
"""
|
||||
|
||||
import mutagen.id3
|
||||
|
||||
from ._compat import iteritems, text_type, PY2
|
||||
from mutagen import Metadata
|
||||
from mutagen._util import DictMixin, dict_match
|
||||
from mutagen.id3 import ID3, error, delete, ID3FileType
|
||||
|
||||
|
||||
__all__ = ['EasyID3', 'Open', 'delete']
|
||||
|
||||
|
||||
class EasyID3KeyError(KeyError, ValueError, error):
|
||||
"""Raised when trying to get/set an invalid key.
|
||||
|
||||
Subclasses both KeyError and ValueError for API compatibility,
|
||||
catching KeyError is preferred.
|
||||
"""
|
||||
|
||||
|
||||
class EasyID3(DictMixin, Metadata):
|
||||
"""A file with an ID3 tag.
|
||||
|
||||
Like Vorbis comments, EasyID3 keys are case-insensitive ASCII
|
||||
strings. Only a subset of ID3 frames are supported by default. Use
|
||||
EasyID3.RegisterKey and its wrappers to support more.
|
||||
|
||||
You can also set the GetFallback, SetFallback, and DeleteFallback
|
||||
to generic key getter/setter/deleter functions, which are called
|
||||
if no specific handler is registered for a key. Additionally,
|
||||
ListFallback can be used to supply an arbitrary list of extra
|
||||
keys. These can be set on EasyID3 or on individual instances after
|
||||
creation.
|
||||
|
||||
To use an EasyID3 class with mutagen.mp3.MP3::
|
||||
|
||||
from mutagen.mp3 import EasyMP3 as MP3
|
||||
MP3(filename)
|
||||
|
||||
Because many of the attributes are constructed on the fly, things
|
||||
like the following will not work::
|
||||
|
||||
ezid3["performer"].append("Joe")
|
||||
|
||||
Instead, you must do::
|
||||
|
||||
values = ezid3["performer"]
|
||||
values.append("Joe")
|
||||
ezid3["performer"] = values
|
||||
|
||||
"""
|
||||
|
||||
Set = {}
|
||||
Get = {}
|
||||
Delete = {}
|
||||
List = {}
|
||||
|
||||
# For compatibility.
|
||||
valid_keys = Get
|
||||
|
||||
GetFallback = None
|
||||
SetFallback = None
|
||||
DeleteFallback = None
|
||||
ListFallback = None
|
||||
|
||||
@classmethod
|
||||
def RegisterKey(cls, key,
|
||||
getter=None, setter=None, deleter=None, lister=None):
|
||||
"""Register a new key mapping.
|
||||
|
||||
A key mapping is four functions, a getter, setter, deleter,
|
||||
and lister. The key may be either a string or a glob pattern.
|
||||
|
||||
The getter, deleted, and lister receive an ID3 instance and
|
||||
the requested key name. The setter also receives the desired
|
||||
value, which will be a list of strings.
|
||||
|
||||
The getter, setter, and deleter are used to implement __getitem__,
|
||||
__setitem__, and __delitem__.
|
||||
|
||||
The lister is used to implement keys(). It should return a
|
||||
list of keys that are actually in the ID3 instance, provided
|
||||
by its associated getter.
|
||||
"""
|
||||
key = key.lower()
|
||||
if getter is not None:
|
||||
cls.Get[key] = getter
|
||||
if setter is not None:
|
||||
cls.Set[key] = setter
|
||||
if deleter is not None:
|
||||
cls.Delete[key] = deleter
|
||||
if lister is not None:
|
||||
cls.List[key] = lister
|
||||
|
||||
@classmethod
|
||||
def RegisterTextKey(cls, key, frameid):
|
||||
"""Register a text key.
|
||||
|
||||
If the key you need to register is a simple one-to-one mapping
|
||||
of ID3 frame name to EasyID3 key, then you can use this
|
||||
function::
|
||||
|
||||
EasyID3.RegisterTextKey("title", "TIT2")
|
||||
"""
|
||||
def getter(id3, key):
|
||||
return list(id3[frameid])
|
||||
|
||||
def setter(id3, key, value):
|
||||
try:
|
||||
frame = id3[frameid]
|
||||
except KeyError:
|
||||
id3.add(mutagen.id3.Frames[frameid](encoding=3, text=value))
|
||||
else:
|
||||
frame.encoding = 3
|
||||
frame.text = value
|
||||
|
||||
def deleter(id3, key):
|
||||
del(id3[frameid])
|
||||
|
||||
cls.RegisterKey(key, getter, setter, deleter)
|
||||
|
||||
@classmethod
|
||||
def RegisterTXXXKey(cls, key, desc):
|
||||
"""Register a user-defined text frame key.
|
||||
|
||||
Some ID3 tags are stored in TXXX frames, which allow a
|
||||
freeform 'description' which acts as a subkey,
|
||||
e.g. TXXX:BARCODE.::
|
||||
|
||||
EasyID3.RegisterTXXXKey('barcode', 'BARCODE').
|
||||
"""
|
||||
frameid = "TXXX:" + desc
|
||||
|
||||
def getter(id3, key):
|
||||
return list(id3[frameid])
|
||||
|
||||
def setter(id3, key, value):
|
||||
try:
|
||||
frame = id3[frameid]
|
||||
except KeyError:
|
||||
enc = 0
|
||||
# Store 8859-1 if we can, per MusicBrainz spec.
|
||||
for v in value:
|
||||
if v and max(v) > u'\x7f':
|
||||
enc = 3
|
||||
break
|
||||
|
||||
id3.add(mutagen.id3.TXXX(encoding=enc, text=value, desc=desc))
|
||||
else:
|
||||
frame.text = value
|
||||
|
||||
def deleter(id3, key):
|
||||
del(id3[frameid])
|
||||
|
||||
cls.RegisterKey(key, getter, setter, deleter)
|
||||
|
||||
def __init__(self, filename=None):
|
||||
self.__id3 = ID3()
|
||||
if filename is not None:
|
||||
self.load(filename)
|
||||
|
||||
load = property(lambda s: s.__id3.load,
|
||||
lambda s, v: setattr(s.__id3, 'load', v))
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
# ignore v2_version until we support 2.3 here
|
||||
kwargs.pop("v2_version", None)
|
||||
self.__id3.save(*args, **kwargs)
|
||||
|
||||
delete = property(lambda s: s.__id3.delete,
|
||||
lambda s, v: setattr(s.__id3, 'delete', v))
|
||||
|
||||
filename = property(lambda s: s.__id3.filename,
|
||||
lambda s, fn: setattr(s.__id3, 'filename', fn))
|
||||
|
||||
size = property(lambda s: s.__id3.size,
|
||||
lambda s, fn: setattr(s.__id3, 'size', s))
|
||||
|
||||
def __getitem__(self, key):
|
||||
key = key.lower()
|
||||
func = dict_match(self.Get, key, self.GetFallback)
|
||||
if func is not None:
|
||||
return func(self.__id3, key)
|
||||
else:
|
||||
raise EasyID3KeyError("%r is not a valid key" % key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
key = key.lower()
|
||||
if PY2:
|
||||
if isinstance(value, basestring):
|
||||
value = [value]
|
||||
else:
|
||||
if isinstance(value, text_type):
|
||||
value = [value]
|
||||
func = dict_match(self.Set, key, self.SetFallback)
|
||||
if func is not None:
|
||||
return func(self.__id3, key, value)
|
||||
else:
|
||||
raise EasyID3KeyError("%r is not a valid key" % key)
|
||||
|
||||
def __delitem__(self, key):
|
||||
key = key.lower()
|
||||
func = dict_match(self.Delete, key, self.DeleteFallback)
|
||||
if func is not None:
|
||||
return func(self.__id3, key)
|
||||
else:
|
||||
raise EasyID3KeyError("%r is not a valid key" % key)
|
||||
|
||||
def keys(self):
|
||||
keys = []
|
||||
for key in self.Get.keys():
|
||||
if key in self.List:
|
||||
keys.extend(self.List[key](self.__id3, key))
|
||||
elif key in self:
|
||||
keys.append(key)
|
||||
if self.ListFallback is not None:
|
||||
keys.extend(self.ListFallback(self.__id3, ""))
|
||||
return keys
|
||||
|
||||
def pprint(self):
|
||||
"""Print tag key=value pairs."""
|
||||
strings = []
|
||||
for key in sorted(self.keys()):
|
||||
values = self[key]
|
||||
for value in values:
|
||||
strings.append("%s=%s" % (key, value))
|
||||
return "\n".join(strings)
|
||||
|
||||
|
||||
Open = EasyID3
|
||||
|
||||
|
||||
def genre_get(id3, key):
|
||||
return id3["TCON"].genres
|
||||
|
||||
|
||||
def genre_set(id3, key, value):
|
||||
try:
|
||||
frame = id3["TCON"]
|
||||
except KeyError:
|
||||
id3.add(mutagen.id3.TCON(encoding=3, text=value))
|
||||
else:
|
||||
frame.encoding = 3
|
||||
frame.genres = value
|
||||
|
||||
|
||||
def genre_delete(id3, key):
|
||||
del(id3["TCON"])
|
||||
|
||||
|
||||
def date_get(id3, key):
|
||||
return [stamp.text for stamp in id3["TDRC"].text]
|
||||
|
||||
|
||||
def date_set(id3, key, value):
|
||||
id3.add(mutagen.id3.TDRC(encoding=3, text=value))
|
||||
|
||||
|
||||
def date_delete(id3, key):
|
||||
del(id3["TDRC"])
|
||||
|
||||
|
||||
def original_date_get(id3, key):
|
||||
return [stamp.text for stamp in id3["TDOR"].text]
|
||||
|
||||
|
||||
def original_date_set(id3, key, value):
|
||||
id3.add(mutagen.id3.TDOR(encoding=3, text=value))
|
||||
|
||||
|
||||
def original_date_delete(id3, key):
|
||||
del(id3["TDOR"])
|
||||
|
||||
|
||||
def performer_get(id3, key):
|
||||
people = []
|
||||
wanted_role = key.split(":", 1)[1]
|
||||
try:
|
||||
mcl = id3["TMCL"]
|
||||
except KeyError:
|
||||
raise KeyError(key)
|
||||
for role, person in mcl.people:
|
||||
if role == wanted_role:
|
||||
people.append(person)
|
||||
if people:
|
||||
return people
|
||||
else:
|
||||
raise KeyError(key)
|
||||
|
||||
|
||||
def performer_set(id3, key, value):
|
||||
wanted_role = key.split(":", 1)[1]
|
||||
try:
|
||||
mcl = id3["TMCL"]
|
||||
except KeyError:
|
||||
mcl = mutagen.id3.TMCL(encoding=3, people=[])
|
||||
id3.add(mcl)
|
||||
mcl.encoding = 3
|
||||
people = [p for p in mcl.people if p[0] != wanted_role]
|
||||
for v in value:
|
||||
people.append((wanted_role, v))
|
||||
mcl.people = people
|
||||
|
||||
|
||||
def performer_delete(id3, key):
|
||||
wanted_role = key.split(":", 1)[1]
|
||||
try:
|
||||
mcl = id3["TMCL"]
|
||||
except KeyError:
|
||||
raise KeyError(key)
|
||||
people = [p for p in mcl.people if p[0] != wanted_role]
|
||||
if people == mcl.people:
|
||||
raise KeyError(key)
|
||||
elif people:
|
||||
mcl.people = people
|
||||
else:
|
||||
del(id3["TMCL"])
|
||||
|
||||
|
||||
def performer_list(id3, key):
|
||||
try:
|
||||
mcl = id3["TMCL"]
|
||||
except KeyError:
|
||||
return []
|
||||
else:
|
||||
return list(set("performer:" + p[0] for p in mcl.people))
|
||||
|
||||
|
||||
def musicbrainz_trackid_get(id3, key):
|
||||
return [id3["UFID:http://musicbrainz.org"].data.decode('ascii')]
|
||||
|
||||
|
||||
def musicbrainz_trackid_set(id3, key, value):
|
||||
if len(value) != 1:
|
||||
raise ValueError("only one track ID may be set per song")
|
||||
value = value[0].encode('ascii')
|
||||
try:
|
||||
frame = id3["UFID:http://musicbrainz.org"]
|
||||
except KeyError:
|
||||
frame = mutagen.id3.UFID(owner="http://musicbrainz.org", data=value)
|
||||
id3.add(frame)
|
||||
else:
|
||||
frame.data = value
|
||||
|
||||
|
||||
def musicbrainz_trackid_delete(id3, key):
|
||||
del(id3["UFID:http://musicbrainz.org"])
|
||||
|
||||
|
||||
def website_get(id3, key):
|
||||
urls = [frame.url for frame in id3.getall("WOAR")]
|
||||
if urls:
|
||||
return urls
|
||||
else:
|
||||
raise EasyID3KeyError(key)
|
||||
|
||||
|
||||
def website_set(id3, key, value):
|
||||
id3.delall("WOAR")
|
||||
for v in value:
|
||||
id3.add(mutagen.id3.WOAR(url=v))
|
||||
|
||||
|
||||
def website_delete(id3, key):
|
||||
id3.delall("WOAR")
|
||||
|
||||
|
||||
def gain_get(id3, key):
|
||||
try:
|
||||
frame = id3["RVA2:" + key[11:-5]]
|
||||
except KeyError:
|
||||
raise EasyID3KeyError(key)
|
||||
else:
|
||||
return [u"%+f dB" % frame.gain]
|
||||
|
||||
|
||||
def gain_set(id3, key, value):
|
||||
if len(value) != 1:
|
||||
raise ValueError(
|
||||
"there must be exactly one gain value, not %r.", value)
|
||||
gain = float(value[0].split()[0])
|
||||
try:
|
||||
frame = id3["RVA2:" + key[11:-5]]
|
||||
except KeyError:
|
||||
frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1)
|
||||
id3.add(frame)
|
||||
frame.gain = gain
|
||||
|
||||
|
||||
def gain_delete(id3, key):
|
||||
try:
|
||||
frame = id3["RVA2:" + key[11:-5]]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
if frame.peak:
|
||||
frame.gain = 0.0
|
||||
else:
|
||||
del(id3["RVA2:" + key[11:-5]])
|
||||
|
||||
|
||||
def peak_get(id3, key):
|
||||
try:
|
||||
frame = id3["RVA2:" + key[11:-5]]
|
||||
except KeyError:
|
||||
raise EasyID3KeyError(key)
|
||||
else:
|
||||
return [u"%f" % frame.peak]
|
||||
|
||||
|
||||
def peak_set(id3, key, value):
|
||||
if len(value) != 1:
|
||||
raise ValueError(
|
||||
"there must be exactly one peak value, not %r.", value)
|
||||
peak = float(value[0])
|
||||
if peak >= 2 or peak < 0:
|
||||
raise ValueError("peak must be => 0 and < 2.")
|
||||
try:
|
||||
frame = id3["RVA2:" + key[11:-5]]
|
||||
except KeyError:
|
||||
frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1)
|
||||
id3.add(frame)
|
||||
frame.peak = peak
|
||||
|
||||
|
||||
def peak_delete(id3, key):
|
||||
try:
|
||||
frame = id3["RVA2:" + key[11:-5]]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
if frame.gain:
|
||||
frame.peak = 0.0
|
||||
else:
|
||||
del(id3["RVA2:" + key[11:-5]])
|
||||
|
||||
|
||||
def peakgain_list(id3, key):
|
||||
keys = []
|
||||
for frame in id3.getall("RVA2"):
|
||||
keys.append("replaygain_%s_gain" % frame.desc)
|
||||
keys.append("replaygain_%s_peak" % frame.desc)
|
||||
return keys
|
||||
|
||||
for frameid, key in iteritems({
|
||||
"TALB": "album",
|
||||
"TBPM": "bpm",
|
||||
"TCMP": "compilation", # iTunes extension
|
||||
"TCOM": "composer",
|
||||
"TCOP": "copyright",
|
||||
"TENC": "encodedby",
|
||||
"TEXT": "lyricist",
|
||||
"TLEN": "length",
|
||||
"TMED": "media",
|
||||
"TMOO": "mood",
|
||||
"TIT2": "title",
|
||||
"TIT3": "version",
|
||||
"TPE1": "artist",
|
||||
"TPE2": "performer",
|
||||
"TPE3": "conductor",
|
||||
"TPE4": "arranger",
|
||||
"TPOS": "discnumber",
|
||||
"TPUB": "organization",
|
||||
"TRCK": "tracknumber",
|
||||
"TOLY": "author",
|
||||
"TSO2": "albumartistsort", # iTunes extension
|
||||
"TSOA": "albumsort",
|
||||
"TSOC": "composersort", # iTunes extension
|
||||
"TSOP": "artistsort",
|
||||
"TSOT": "titlesort",
|
||||
"TSRC": "isrc",
|
||||
"TSST": "discsubtitle",
|
||||
"TLAN": "language",
|
||||
}):
|
||||
EasyID3.RegisterTextKey(key, frameid)
|
||||
|
||||
EasyID3.RegisterKey("genre", genre_get, genre_set, genre_delete)
|
||||
EasyID3.RegisterKey("date", date_get, date_set, date_delete)
|
||||
EasyID3.RegisterKey("originaldate", original_date_get, original_date_set,
|
||||
original_date_delete)
|
||||
EasyID3.RegisterKey(
|
||||
"performer:*", performer_get, performer_set, performer_delete,
|
||||
performer_list)
|
||||
EasyID3.RegisterKey("musicbrainz_trackid", musicbrainz_trackid_get,
|
||||
musicbrainz_trackid_set, musicbrainz_trackid_delete)
|
||||
EasyID3.RegisterKey("website", website_get, website_set, website_delete)
|
||||
EasyID3.RegisterKey(
|
||||
"replaygain_*_gain", gain_get, gain_set, gain_delete, peakgain_list)
|
||||
EasyID3.RegisterKey("replaygain_*_peak", peak_get, peak_set, peak_delete)
|
||||
|
||||
# At various times, information for this came from
|
||||
# http://musicbrainz.org/docs/specs/metadata_tags.html
|
||||
# http://bugs.musicbrainz.org/ticket/1383
|
||||
# http://musicbrainz.org/doc/MusicBrainzTag
|
||||
for desc, key in iteritems({
|
||||
u"MusicBrainz Artist Id": "musicbrainz_artistid",
|
||||
u"MusicBrainz Album Id": "musicbrainz_albumid",
|
||||
u"MusicBrainz Album Artist Id": "musicbrainz_albumartistid",
|
||||
u"MusicBrainz TRM Id": "musicbrainz_trmid",
|
||||
u"MusicIP PUID": "musicip_puid",
|
||||
u"MusicMagic Fingerprint": "musicip_fingerprint",
|
||||
u"MusicBrainz Album Status": "musicbrainz_albumstatus",
|
||||
u"MusicBrainz Album Type": "musicbrainz_albumtype",
|
||||
u"MusicBrainz Album Release Country": "releasecountry",
|
||||
u"MusicBrainz Disc Id": "musicbrainz_discid",
|
||||
u"ASIN": "asin",
|
||||
u"ALBUMARTISTSORT": "albumartistsort",
|
||||
u"BARCODE": "barcode",
|
||||
u"CATALOGNUMBER": "catalognumber",
|
||||
u"MusicBrainz Release Track Id": "musicbrainz_releasetrackid",
|
||||
u"MusicBrainz Release Group Id": "musicbrainz_releasegroupid",
|
||||
u"MusicBrainz Work Id": "musicbrainz_workid",
|
||||
u"Acoustid Fingerprint": "acoustid_fingerprint",
|
||||
u"Acoustid Id": "acoustid_id",
|
||||
}):
|
||||
EasyID3.RegisterTXXXKey(key, desc)
|
||||
|
||||
|
||||
class EasyID3FileType(ID3FileType):
|
||||
"""Like ID3FileType, but uses EasyID3 for tags."""
|
||||
ID3 = EasyID3
|
|
@ -1,285 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2009 Joe Wreschnig
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
from mutagen import Metadata
|
||||
from mutagen._util import DictMixin, dict_match
|
||||
from mutagen.mp4 import MP4, MP4Tags, error, delete
|
||||
from ._compat import PY2, text_type, PY3
|
||||
|
||||
|
||||
__all__ = ["EasyMP4Tags", "EasyMP4", "delete", "error"]
|
||||
|
||||
|
||||
class EasyMP4KeyError(error, KeyError, ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class EasyMP4Tags(DictMixin, Metadata):
|
||||
"""A file with MPEG-4 iTunes metadata.
|
||||
|
||||
Like Vorbis comments, EasyMP4Tags keys are case-insensitive ASCII
|
||||
strings, and values are a list of Unicode strings (and these lists
|
||||
are always of length 0 or 1).
|
||||
|
||||
If you need access to the full MP4 metadata feature set, you should use
|
||||
MP4, not EasyMP4.
|
||||
"""
|
||||
|
||||
Set = {}
|
||||
Get = {}
|
||||
Delete = {}
|
||||
List = {}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.__mp4 = MP4Tags(*args, **kwargs)
|
||||
self.load = self.__mp4.load
|
||||
self.save = self.__mp4.save
|
||||
self.delete = self.__mp4.delete
|
||||
self._padding = self.__mp4._padding
|
||||
|
||||
filename = property(lambda s: s.__mp4.filename,
|
||||
lambda s, fn: setattr(s.__mp4, 'filename', fn))
|
||||
|
||||
@classmethod
|
||||
def RegisterKey(cls, key,
|
||||
getter=None, setter=None, deleter=None, lister=None):
|
||||
"""Register a new key mapping.
|
||||
|
||||
A key mapping is four functions, a getter, setter, deleter,
|
||||
and lister. The key may be either a string or a glob pattern.
|
||||
|
||||
The getter, deleted, and lister receive an MP4Tags instance
|
||||
and the requested key name. The setter also receives the
|
||||
desired value, which will be a list of strings.
|
||||
|
||||
The getter, setter, and deleter are used to implement __getitem__,
|
||||
__setitem__, and __delitem__.
|
||||
|
||||
The lister is used to implement keys(). It should return a
|
||||
list of keys that are actually in the MP4 instance, provided
|
||||
by its associated getter.
|
||||
"""
|
||||
key = key.lower()
|
||||
if getter is not None:
|
||||
cls.Get[key] = getter
|
||||
if setter is not None:
|
||||
cls.Set[key] = setter
|
||||
if deleter is not None:
|
||||
cls.Delete[key] = deleter
|
||||
if lister is not None:
|
||||
cls.List[key] = lister
|
||||
|
||||
@classmethod
|
||||
def RegisterTextKey(cls, key, atomid):
|
||||
"""Register a text key.
|
||||
|
||||
If the key you need to register is a simple one-to-one mapping
|
||||
of MP4 atom name to EasyMP4Tags key, then you can use this
|
||||
function::
|
||||
|
||||
EasyMP4Tags.RegisterTextKey("artist", "\xa9ART")
|
||||
"""
|
||||
def getter(tags, key):
|
||||
return tags[atomid]
|
||||
|
||||
def setter(tags, key, value):
|
||||
tags[atomid] = value
|
||||
|
||||
def deleter(tags, key):
|
||||
del(tags[atomid])
|
||||
|
||||
cls.RegisterKey(key, getter, setter, deleter)
|
||||
|
||||
@classmethod
|
||||
def RegisterIntKey(cls, key, atomid, min_value=0, max_value=(2 ** 16) - 1):
|
||||
"""Register a scalar integer key.
|
||||
"""
|
||||
|
||||
def getter(tags, key):
|
||||
return list(map(text_type, tags[atomid]))
|
||||
|
||||
def setter(tags, key, value):
|
||||
clamp = lambda x: int(min(max(min_value, x), max_value))
|
||||
tags[atomid] = [clamp(v) for v in map(int, value)]
|
||||
|
||||
def deleter(tags, key):
|
||||
del(tags[atomid])
|
||||
|
||||
cls.RegisterKey(key, getter, setter, deleter)
|
||||
|
||||
@classmethod
|
||||
def RegisterIntPairKey(cls, key, atomid, min_value=0,
|
||||
max_value=(2 ** 16) - 1):
|
||||
def getter(tags, key):
|
||||
ret = []
|
||||
for (track, total) in tags[atomid]:
|
||||
if total:
|
||||
ret.append(u"%d/%d" % (track, total))
|
||||
else:
|
||||
ret.append(text_type(track))
|
||||
return ret
|
||||
|
||||
def setter(tags, key, value):
|
||||
clamp = lambda x: int(min(max(min_value, x), max_value))
|
||||
data = []
|
||||
for v in value:
|
||||
try:
|
||||
tracks, total = v.split("/")
|
||||
tracks = clamp(int(tracks))
|
||||
total = clamp(int(total))
|
||||
except (ValueError, TypeError):
|
||||
tracks = clamp(int(v))
|
||||
total = min_value
|
||||
data.append((tracks, total))
|
||||
tags[atomid] = data
|
||||
|
||||
def deleter(tags, key):
|
||||
del(tags[atomid])
|
||||
|
||||
cls.RegisterKey(key, getter, setter, deleter)
|
||||
|
||||
@classmethod
|
||||
def RegisterFreeformKey(cls, key, name, mean="com.apple.iTunes"):
|
||||
"""Register a text key.
|
||||
|
||||
If the key you need to register is a simple one-to-one mapping
|
||||
of MP4 freeform atom (----) and name to EasyMP4Tags key, then
|
||||
you can use this function::
|
||||
|
||||
EasyMP4Tags.RegisterFreeformKey(
|
||||
"musicbrainz_artistid", "MusicBrainz Artist Id")
|
||||
"""
|
||||
atomid = "----:" + mean + ":" + name
|
||||
|
||||
def getter(tags, key):
|
||||
return [s.decode("utf-8", "replace") for s in tags[atomid]]
|
||||
|
||||
def setter(tags, key, value):
|
||||
encoded = []
|
||||
for v in value:
|
||||
if not isinstance(v, text_type):
|
||||
if PY3:
|
||||
raise TypeError("%r not str" % v)
|
||||
v = v.decode("utf-8")
|
||||
encoded.append(v.encode("utf-8"))
|
||||
tags[atomid] = encoded
|
||||
|
||||
def deleter(tags, key):
|
||||
del(tags[atomid])
|
||||
|
||||
cls.RegisterKey(key, getter, setter, deleter)
|
||||
|
||||
def __getitem__(self, key):
|
||||
key = key.lower()
|
||||
func = dict_match(self.Get, key)
|
||||
if func is not None:
|
||||
return func(self.__mp4, key)
|
||||
else:
|
||||
raise EasyMP4KeyError("%r is not a valid key" % key)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
key = key.lower()
|
||||
|
||||
if PY2:
|
||||
if isinstance(value, basestring):
|
||||
value = [value]
|
||||
else:
|
||||
if isinstance(value, text_type):
|
||||
value = [value]
|
||||
|
||||
func = dict_match(self.Set, key)
|
||||
if func is not None:
|
||||
return func(self.__mp4, key, value)
|
||||
else:
|
||||
raise EasyMP4KeyError("%r is not a valid key" % key)
|
||||
|
||||
def __delitem__(self, key):
|
||||
key = key.lower()
|
||||
func = dict_match(self.Delete, key)
|
||||
if func is not None:
|
||||
return func(self.__mp4, key)
|
||||
else:
|
||||
raise EasyMP4KeyError("%r is not a valid key" % key)
|
||||
|
||||
def keys(self):
|
||||
keys = []
|
||||
for key in self.Get.keys():
|
||||
if key in self.List:
|
||||
keys.extend(self.List[key](self.__mp4, key))
|
||||
elif key in self:
|
||||
keys.append(key)
|
||||
return keys
|
||||
|
||||
def pprint(self):
|
||||
"""Print tag key=value pairs."""
|
||||
strings = []
|
||||
for key in sorted(self.keys()):
|
||||
values = self[key]
|
||||
for value in values:
|
||||
strings.append("%s=%s" % (key, value))
|
||||
return "\n".join(strings)
|
||||
|
||||
for atomid, key in {
|
||||
'\xa9nam': 'title',
|
||||
'\xa9alb': 'album',
|
||||
'\xa9ART': 'artist',
|
||||
'aART': 'albumartist',
|
||||
'\xa9day': 'date',
|
||||
'\xa9cmt': 'comment',
|
||||
'desc': 'description',
|
||||
'\xa9grp': 'grouping',
|
||||
'\xa9gen': 'genre',
|
||||
'cprt': 'copyright',
|
||||
'soal': 'albumsort',
|
||||
'soaa': 'albumartistsort',
|
||||
'soar': 'artistsort',
|
||||
'sonm': 'titlesort',
|
||||
'soco': 'composersort',
|
||||
}.items():
|
||||
EasyMP4Tags.RegisterTextKey(key, atomid)
|
||||
|
||||
for name, key in {
|
||||
'MusicBrainz Artist Id': 'musicbrainz_artistid',
|
||||
'MusicBrainz Track Id': 'musicbrainz_trackid',
|
||||
'MusicBrainz Album Id': 'musicbrainz_albumid',
|
||||
'MusicBrainz Album Artist Id': 'musicbrainz_albumartistid',
|
||||
'MusicIP PUID': 'musicip_puid',
|
||||
'MusicBrainz Album Status': 'musicbrainz_albumstatus',
|
||||
'MusicBrainz Album Type': 'musicbrainz_albumtype',
|
||||
'MusicBrainz Release Country': 'releasecountry',
|
||||
}.items():
|
||||
EasyMP4Tags.RegisterFreeformKey(key, name)
|
||||
|
||||
for name, key in {
|
||||
"tmpo": "bpm",
|
||||
}.items():
|
||||
EasyMP4Tags.RegisterIntKey(key, name)
|
||||
|
||||
for name, key in {
|
||||
"trkn": "tracknumber",
|
||||
"disk": "discnumber",
|
||||
}.items():
|
||||
EasyMP4Tags.RegisterIntPairKey(key, name)
|
||||
|
||||
|
||||
class EasyMP4(MP4):
|
||||
"""Like :class:`MP4 <mutagen.mp4.MP4>`,
|
||||
but uses :class:`EasyMP4Tags` for tags.
|
||||
|
||||
:ivar info: :class:`MP4Info <mutagen.mp4.MP4Info>`
|
||||
:ivar tags: :class:`EasyMP4Tags`
|
||||
"""
|
||||
|
||||
MP4Tags = EasyMP4Tags
|
||||
|
||||
Get = EasyMP4Tags.Get
|
||||
Set = EasyMP4Tags.Set
|
||||
Delete = EasyMP4Tags.Delete
|
||||
List = EasyMP4Tags.List
|
||||
RegisterTextKey = EasyMP4Tags.RegisterTextKey
|
||||
RegisterKey = EasyMP4Tags.RegisterKey
|
|
@ -1,876 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2005 Joe Wreschnig
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
"""Read and write FLAC Vorbis comments and stream information.
|
||||
|
||||
Read more about FLAC at http://flac.sourceforge.net.
|
||||
|
||||
FLAC supports arbitrary metadata blocks. The two most interesting ones
|
||||
are the FLAC stream information block, and the Vorbis comment block;
|
||||
these are also the only ones Mutagen can currently read.
|
||||
|
||||
This module does not handle Ogg FLAC files.
|
||||
|
||||
Based off documentation available at
|
||||
http://flac.sourceforge.net/format.html
|
||||
"""
|
||||
|
||||
__all__ = ["FLAC", "Open", "delete"]
|
||||
|
||||
import struct
|
||||
from ._vorbis import VCommentDict
|
||||
import mutagen
|
||||
|
||||
from ._compat import cBytesIO, endswith, chr_, xrange
|
||||
from mutagen._util import resize_bytes, MutagenError, get_size
|
||||
from mutagen._tags import PaddingInfo
|
||||
from mutagen.id3 import BitPaddedInt
|
||||
from functools import reduce
|
||||
|
||||
|
||||
class error(IOError, MutagenError):
|
||||
pass
|
||||
|
||||
|
||||
class FLACNoHeaderError(error):
|
||||
pass
|
||||
|
||||
|
||||
class FLACVorbisError(ValueError, error):
|
||||
pass
|
||||
|
||||
|
||||
def to_int_be(data):
|
||||
"""Convert an arbitrarily-long string to a long using big-endian
|
||||
byte order."""
|
||||
return reduce(lambda a, b: (a << 8) + b, bytearray(data), 0)
|
||||
|
||||
|
||||
class StrictFileObject(object):
|
||||
"""Wraps a file-like object and raises an exception if the requested
|
||||
amount of data to read isn't returned."""
|
||||
|
||||
def __init__(self, fileobj):
|
||||
self._fileobj = fileobj
|
||||
for m in ["close", "tell", "seek", "write", "name"]:
|
||||
if hasattr(fileobj, m):
|
||||
setattr(self, m, getattr(fileobj, m))
|
||||
|
||||
def read(self, size=-1):
|
||||
data = self._fileobj.read(size)
|
||||
if size >= 0 and len(data) != size:
|
||||
raise error("file said %d bytes, read %d bytes" % (
|
||||
size, len(data)))
|
||||
return data
|
||||
|
||||
def tryread(self, *args):
|
||||
return self._fileobj.read(*args)
|
||||
|
||||
|
||||
class MetadataBlock(object):
|
||||
"""A generic block of FLAC metadata.
|
||||
|
||||
This class is extended by specific used as an ancestor for more specific
|
||||
blocks, and also as a container for data blobs of unknown blocks.
|
||||
|
||||
Attributes:
|
||||
|
||||
* data -- raw binary data for this block
|
||||
"""
|
||||
|
||||
_distrust_size = False
|
||||
"""For block types setting this, we don't trust the size field and
|
||||
use the size of the content instead."""
|
||||
|
||||
_invalid_overflow_size = -1
|
||||
"""In case the real size was bigger than what is representable by the
|
||||
24 bit size field, we save the wrong specified size here. This can
|
||||
only be set if _distrust_size is True"""
|
||||
|
||||
_MAX_SIZE = 2 ** 24 - 1
|
||||
|
||||
def __init__(self, data):
|
||||
"""Parse the given data string or file-like as a metadata block.
|
||||
The metadata header should not be included."""
|
||||
if data is not None:
|
||||
if not isinstance(data, StrictFileObject):
|
||||
if isinstance(data, bytes):
|
||||
data = cBytesIO(data)
|
||||
elif not hasattr(data, 'read'):
|
||||
raise TypeError(
|
||||
"StreamInfo requires string data or a file-like")
|
||||
data = StrictFileObject(data)
|
||||
self.load(data)
|
||||
|
||||
def load(self, data):
|
||||
self.data = data.read()
|
||||
|
||||
def write(self):
|
||||
return self.data
|
||||
|
||||
@classmethod
|
||||
def _writeblock(cls, block, is_last=False):
|
||||
"""Returns the block content + header.
|
||||
|
||||
Raises error.
|
||||
"""
|
||||
|
||||
data = bytearray()
|
||||
code = (block.code | 128) if is_last else block.code
|
||||
datum = block.write()
|
||||
size = len(datum)
|
||||
if size > cls._MAX_SIZE:
|
||||
if block._distrust_size and block._invalid_overflow_size != -1:
|
||||
# The original size of this block was (1) wrong and (2)
|
||||
# the real size doesn't allow us to save the file
|
||||
# according to the spec (too big for 24 bit uint). Instead
|
||||
# simply write back the original wrong size.. at least
|
||||
# we don't make the file more "broken" as it is.
|
||||
size = block._invalid_overflow_size
|
||||
else:
|
||||
raise error("block is too long to write")
|
||||
assert not size > cls._MAX_SIZE
|
||||
length = struct.pack(">I", size)[-3:]
|
||||
data.append(code)
|
||||
data += length
|
||||
data += datum
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def _writeblocks(cls, blocks, available, cont_size, padding_func):
|
||||
"""Render metadata block as a byte string."""
|
||||
|
||||
# write everything except padding
|
||||
data = bytearray()
|
||||
for block in blocks:
|
||||
if isinstance(block, Padding):
|
||||
continue
|
||||
data += cls._writeblock(block)
|
||||
blockssize = len(data)
|
||||
|
||||
# take the padding overhead into account. we always add one
|
||||
# to make things simple.
|
||||
padding_block = Padding()
|
||||
blockssize += len(cls._writeblock(padding_block))
|
||||
|
||||
# finally add a padding block
|
||||
info = PaddingInfo(available - blockssize, cont_size)
|
||||
padding_block.length = min(info._get_padding(padding_func),
|
||||
cls._MAX_SIZE)
|
||||
data += cls._writeblock(padding_block, is_last=True)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class StreamInfo(MetadataBlock, mutagen.StreamInfo):
|
||||
"""FLAC stream information.
|
||||
|
||||
This contains information about the audio data in the FLAC file.
|
||||
Unlike most stream information objects in Mutagen, changes to this
|
||||
one will rewritten to the file when it is saved. Unless you are
|
||||
actually changing the audio stream itself, don't change any
|
||||
attributes of this block.
|
||||
|
||||
Attributes:
|
||||
|
||||
* min_blocksize -- minimum audio block size
|
||||
* max_blocksize -- maximum audio block size
|
||||
* sample_rate -- audio sample rate in Hz
|
||||
* channels -- audio channels (1 for mono, 2 for stereo)
|
||||
* bits_per_sample -- bits per sample
|
||||
* total_samples -- total samples in file
|
||||
* length -- audio length in seconds
|
||||
"""
|
||||
|
||||
code = 0
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return (self.min_blocksize == other.min_blocksize and
|
||||
self.max_blocksize == other.max_blocksize and
|
||||
self.sample_rate == other.sample_rate and
|
||||
self.channels == other.channels and
|
||||
self.bits_per_sample == other.bits_per_sample and
|
||||
self.total_samples == other.total_samples)
|
||||
except:
|
||||
return False
|
||||
|
||||
__hash__ = MetadataBlock.__hash__
|
||||
|
||||
def load(self, data):
|
||||
self.min_blocksize = int(to_int_be(data.read(2)))
|
||||
self.max_blocksize = int(to_int_be(data.read(2)))
|
||||
self.min_framesize = int(to_int_be(data.read(3)))
|
||||
self.max_framesize = int(to_int_be(data.read(3)))
|
||||
# first 16 bits of sample rate
|
||||
sample_first = to_int_be(data.read(2))
|
||||
# last 4 bits of sample rate, 3 of channels, first 1 of bits/sample
|
||||
sample_channels_bps = to_int_be(data.read(1))
|
||||
# last 4 of bits/sample, 36 of total samples
|
||||
bps_total = to_int_be(data.read(5))
|
||||
|
||||
sample_tail = sample_channels_bps >> 4
|
||||
self.sample_rate = int((sample_first << 4) + sample_tail)
|
||||
if not self.sample_rate:
|
||||
raise error("A sample rate value of 0 is invalid")
|
||||
self.channels = int(((sample_channels_bps >> 1) & 7) + 1)
|
||||
bps_tail = bps_total >> 36
|
||||
bps_head = (sample_channels_bps & 1) << 4
|
||||
self.bits_per_sample = int(bps_head + bps_tail + 1)
|
||||
self.total_samples = bps_total & 0xFFFFFFFFF
|
||||
self.length = self.total_samples / float(self.sample_rate)
|
||||
|
||||
self.md5_signature = to_int_be(data.read(16))
|
||||
|
||||
def write(self):
|
||||
f = cBytesIO()
|
||||
f.write(struct.pack(">I", self.min_blocksize)[-2:])
|
||||
f.write(struct.pack(">I", self.max_blocksize)[-2:])
|
||||
f.write(struct.pack(">I", self.min_framesize)[-3:])
|
||||
f.write(struct.pack(">I", self.max_framesize)[-3:])
|
||||
|
||||
# first 16 bits of sample rate
|
||||
f.write(struct.pack(">I", self.sample_rate >> 4)[-2:])
|
||||
# 4 bits sample, 3 channel, 1 bps
|
||||
byte = (self.sample_rate & 0xF) << 4
|
||||
byte += ((self.channels - 1) & 7) << 1
|
||||
byte += ((self.bits_per_sample - 1) >> 4) & 1
|
||||
f.write(chr_(byte))
|
||||
# 4 bits of bps, 4 of sample count
|
||||
byte = ((self.bits_per_sample - 1) & 0xF) << 4
|
||||
byte += (self.total_samples >> 32) & 0xF
|
||||
f.write(chr_(byte))
|
||||
# last 32 of sample count
|
||||
f.write(struct.pack(">I", self.total_samples & 0xFFFFFFFF))
|
||||
# MD5 signature
|
||||
sig = self.md5_signature
|
||||
f.write(struct.pack(
|
||||
">4I", (sig >> 96) & 0xFFFFFFFF, (sig >> 64) & 0xFFFFFFFF,
|
||||
(sig >> 32) & 0xFFFFFFFF, sig & 0xFFFFFFFF))
|
||||
return f.getvalue()
|
||||
|
||||
def pprint(self):
|
||||
return u"FLAC, %.2f seconds, %d Hz" % (self.length, self.sample_rate)
|
||||
|
||||
|
||||
class SeekPoint(tuple):
|
||||
"""A single seek point in a FLAC file.
|
||||
|
||||
Placeholder seek points have first_sample of 0xFFFFFFFFFFFFFFFFL,
|
||||
and byte_offset and num_samples undefined. Seek points must be
|
||||
sorted in ascending order by first_sample number. Seek points must
|
||||
be unique by first_sample number, except for placeholder
|
||||
points. Placeholder points must occur last in the table and there
|
||||
may be any number of them.
|
||||
|
||||
Attributes:
|
||||
|
||||
* first_sample -- sample number of first sample in the target frame
|
||||
* byte_offset -- offset from first frame to target frame
|
||||
* num_samples -- number of samples in target frame
|
||||
"""
|
||||
|
||||
def __new__(cls, first_sample, byte_offset, num_samples):
|
||||
return super(cls, SeekPoint).__new__(
|
||||
cls, (first_sample, byte_offset, num_samples))
|
||||
|
||||
first_sample = property(lambda self: self[0])
|
||||
byte_offset = property(lambda self: self[1])
|
||||
num_samples = property(lambda self: self[2])
|
||||
|
||||
|
||||
class SeekTable(MetadataBlock):
|
||||
"""Read and write FLAC seek tables.
|
||||
|
||||
Attributes:
|
||||
|
||||
* seekpoints -- list of SeekPoint objects
|
||||
"""
|
||||
|
||||
__SEEKPOINT_FORMAT = '>QQH'
|
||||
__SEEKPOINT_SIZE = struct.calcsize(__SEEKPOINT_FORMAT)
|
||||
|
||||
code = 3
|
||||
|
||||
def __init__(self, data):
|
||||
self.seekpoints = []
|
||||
super(SeekTable, self).__init__(data)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return (self.seekpoints == other.seekpoints)
|
||||
except (AttributeError, TypeError):
|
||||
return False
|
||||
|
||||
__hash__ = MetadataBlock.__hash__
|
||||
|
||||
def load(self, data):
|
||||
self.seekpoints = []
|
||||
sp = data.tryread(self.__SEEKPOINT_SIZE)
|
||||
while len(sp) == self.__SEEKPOINT_SIZE:
|
||||
self.seekpoints.append(SeekPoint(
|
||||
*struct.unpack(self.__SEEKPOINT_FORMAT, sp)))
|
||||
sp = data.tryread(self.__SEEKPOINT_SIZE)
|
||||
|
||||
def write(self):
|
||||
f = cBytesIO()
|
||||
for seekpoint in self.seekpoints:
|
||||
packed = struct.pack(
|
||||
self.__SEEKPOINT_FORMAT,
|
||||
seekpoint.first_sample, seekpoint.byte_offset,
|
||||
seekpoint.num_samples)
|
||||
f.write(packed)
|
||||
return f.getvalue()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s seekpoints=%r>" % (type(self).__name__, self.seekpoints)
|
||||
|
||||
|
||||
class VCFLACDict(VCommentDict):
|
||||
"""Read and write FLAC Vorbis comments.
|
||||
|
||||
FLACs don't use the framing bit at the end of the comment block.
|
||||
So this extends VCommentDict to not use the framing bit.
|
||||
"""
|
||||
|
||||
code = 4
|
||||
_distrust_size = True
|
||||
|
||||
def load(self, data, errors='replace', framing=False):
|
||||
super(VCFLACDict, self).load(data, errors=errors, framing=framing)
|
||||
|
||||
def write(self, framing=False):
|
||||
return super(VCFLACDict, self).write(framing=framing)
|
||||
|
||||
|
||||
class CueSheetTrackIndex(tuple):
|
||||
"""Index for a track in a cuesheet.
|
||||
|
||||
For CD-DA, an index_number of 0 corresponds to the track
|
||||
pre-gap. The first index in a track must have a number of 0 or 1,
|
||||
and subsequently, index_numbers must increase by 1. Index_numbers
|
||||
must be unique within a track. And index_offset must be evenly
|
||||
divisible by 588 samples.
|
||||
|
||||
Attributes:
|
||||
|
||||
* index_number -- index point number
|
||||
* index_offset -- offset in samples from track start
|
||||
"""
|
||||
|
||||
def __new__(cls, index_number, index_offset):
|
||||
return super(cls, CueSheetTrackIndex).__new__(
|
||||
cls, (index_number, index_offset))
|
||||
|
||||
index_number = property(lambda self: self[0])
|
||||
index_offset = property(lambda self: self[1])
|
||||
|
||||
|
||||
class CueSheetTrack(object):
|
||||
"""A track in a cuesheet.
|
||||
|
||||
For CD-DA, track_numbers must be 1-99, or 170 for the
|
||||
lead-out. Track_numbers must be unique within a cue sheet. There
|
||||
must be atleast one index in every track except the lead-out track
|
||||
which must have none.
|
||||
|
||||
Attributes:
|
||||
|
||||
* track_number -- track number
|
||||
* start_offset -- track offset in samples from start of FLAC stream
|
||||
* isrc -- ISRC code
|
||||
* type -- 0 for audio, 1 for digital data
|
||||
* pre_emphasis -- true if the track is recorded with pre-emphasis
|
||||
* indexes -- list of CueSheetTrackIndex objects
|
||||
"""
|
||||
|
||||
def __init__(self, track_number, start_offset, isrc='', type_=0,
|
||||
pre_emphasis=False):
|
||||
self.track_number = track_number
|
||||
self.start_offset = start_offset
|
||||
self.isrc = isrc
|
||||
self.type = type_
|
||||
self.pre_emphasis = pre_emphasis
|
||||
self.indexes = []
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return (self.track_number == other.track_number and
|
||||
self.start_offset == other.start_offset and
|
||||
self.isrc == other.isrc and
|
||||
self.type == other.type and
|
||||
self.pre_emphasis == other.pre_emphasis and
|
||||
self.indexes == other.indexes)
|
||||
except (AttributeError, TypeError):
|
||||
return False
|
||||
|
||||
__hash__ = object.__hash__
|
||||
|
||||
def __repr__(self):
|
||||
return (("<%s number=%r, offset=%d, isrc=%r, type=%r, "
|
||||
"pre_emphasis=%r, indexes=%r)>") %
|
||||
(type(self).__name__, self.track_number, self.start_offset,
|
||||
self.isrc, self.type, self.pre_emphasis, self.indexes))
|
||||
|
||||
|
||||
class CueSheet(MetadataBlock):
|
||||
"""Read and write FLAC embedded cue sheets.
|
||||
|
||||
Number of tracks should be from 1 to 100. There should always be
|
||||
exactly one lead-out track and that track must be the last track
|
||||
in the cue sheet.
|
||||
|
||||
Attributes:
|
||||
|
||||
* media_catalog_number -- media catalog number in ASCII
|
||||
* lead_in_samples -- number of lead-in samples
|
||||
* compact_disc -- true if the cuesheet corresponds to a compact disc
|
||||
* tracks -- list of CueSheetTrack objects
|
||||
* lead_out -- lead-out as CueSheetTrack or None if lead-out was not found
|
||||
"""
|
||||
|
||||
__CUESHEET_FORMAT = '>128sQB258xB'
|
||||
__CUESHEET_SIZE = struct.calcsize(__CUESHEET_FORMAT)
|
||||
__CUESHEET_TRACK_FORMAT = '>QB12sB13xB'
|
||||
__CUESHEET_TRACK_SIZE = struct.calcsize(__CUESHEET_TRACK_FORMAT)
|
||||
__CUESHEET_TRACKINDEX_FORMAT = '>QB3x'
|
||||
__CUESHEET_TRACKINDEX_SIZE = struct.calcsize(__CUESHEET_TRACKINDEX_FORMAT)
|
||||
|
||||
code = 5
|
||||
|
||||
media_catalog_number = b''
|
||||
lead_in_samples = 88200
|
||||
compact_disc = True
|
||||
|
||||
def __init__(self, data):
|
||||
self.tracks = []
|
||||
super(CueSheet, self).__init__(data)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return (self.media_catalog_number == other.media_catalog_number and
|
||||
self.lead_in_samples == other.lead_in_samples and
|
||||
self.compact_disc == other.compact_disc and
|
||||
self.tracks == other.tracks)
|
||||
except (AttributeError, TypeError):
|
||||
return False
|
||||
|
||||
__hash__ = MetadataBlock.__hash__
|
||||
|
||||
def load(self, data):
|
||||
header = data.read(self.__CUESHEET_SIZE)
|
||||
media_catalog_number, lead_in_samples, flags, num_tracks = \
|
||||
struct.unpack(self.__CUESHEET_FORMAT, header)
|
||||
self.media_catalog_number = media_catalog_number.rstrip(b'\0')
|
||||
self.lead_in_samples = lead_in_samples
|
||||
self.compact_disc = bool(flags & 0x80)
|
||||
self.tracks = []
|
||||
for i in xrange(num_tracks):
|
||||
track = data.read(self.__CUESHEET_TRACK_SIZE)
|
||||
start_offset, track_number, isrc_padded, flags, num_indexes = \
|
||||
struct.unpack(self.__CUESHEET_TRACK_FORMAT, track)
|
||||
isrc = isrc_padded.rstrip(b'\0')
|
||||
type_ = (flags & 0x80) >> 7
|
||||
pre_emphasis = bool(flags & 0x40)
|
||||
val = CueSheetTrack(
|
||||
track_number, start_offset, isrc, type_, pre_emphasis)
|
||||
for j in xrange(num_indexes):
|
||||
index = data.read(self.__CUESHEET_TRACKINDEX_SIZE)
|
||||
index_offset, index_number = struct.unpack(
|
||||
self.__CUESHEET_TRACKINDEX_FORMAT, index)
|
||||
val.indexes.append(
|
||||
CueSheetTrackIndex(index_number, index_offset))
|
||||
self.tracks.append(val)
|
||||
|
||||
def write(self):
|
||||
f = cBytesIO()
|
||||
flags = 0
|
||||
if self.compact_disc:
|
||||
flags |= 0x80
|
||||
packed = struct.pack(
|
||||
self.__CUESHEET_FORMAT, self.media_catalog_number,
|
||||
self.lead_in_samples, flags, len(self.tracks))
|
||||
f.write(packed)
|
||||
for track in self.tracks:
|
||||
track_flags = 0
|
||||
track_flags |= (track.type & 1) << 7
|
||||
if track.pre_emphasis:
|
||||
track_flags |= 0x40
|
||||
track_packed = struct.pack(
|
||||
self.__CUESHEET_TRACK_FORMAT, track.start_offset,
|
||||
track.track_number, track.isrc, track_flags,
|
||||
len(track.indexes))
|
||||
f.write(track_packed)
|
||||
for index in track.indexes:
|
||||
index_packed = struct.pack(
|
||||
self.__CUESHEET_TRACKINDEX_FORMAT,
|
||||
index.index_offset, index.index_number)
|
||||
f.write(index_packed)
|
||||
return f.getvalue()
|
||||
|
||||
def __repr__(self):
|
||||
return (("<%s media_catalog_number=%r, lead_in=%r, compact_disc=%r, "
|
||||
"tracks=%r>") %
|
||||
(type(self).__name__, self.media_catalog_number,
|
||||
self.lead_in_samples, self.compact_disc, self.tracks))
|
||||
|
||||
|
||||
class Picture(MetadataBlock):
|
||||
"""Read and write FLAC embed pictures.
|
||||
|
||||
Attributes:
|
||||
|
||||
* type -- picture type (same as types for ID3 APIC frames)
|
||||
* mime -- MIME type of the picture
|
||||
* desc -- picture's description
|
||||
* width -- width in pixels
|
||||
* height -- height in pixels
|
||||
* depth -- color depth in bits-per-pixel
|
||||
* colors -- number of colors for indexed palettes (like GIF),
|
||||
0 for non-indexed
|
||||
* data -- picture data
|
||||
|
||||
To create a picture from file (in order to add to a FLAC file),
|
||||
instantiate this object without passing anything to the constructor and
|
||||
then set the properties manually::
|
||||
|
||||
p = Picture()
|
||||
|
||||
with open("Folder.jpg", "rb") as f:
|
||||
pic.data = f.read()
|
||||
|
||||
pic.type = id3.PictureType.COVER_FRONT
|
||||
pic.mime = u"image/jpeg"
|
||||
pic.width = 500
|
||||
pic.height = 500
|
||||
pic.depth = 16 # color depth
|
||||
"""
|
||||
|
||||
code = 6
|
||||
_distrust_size = True
|
||||
|
||||
def __init__(self, data=None):
|
||||
self.type = 0
|
||||
self.mime = u''
|
||||
self.desc = u''
|
||||
self.width = 0
|
||||
self.height = 0
|
||||
self.depth = 0
|
||||
self.colors = 0
|
||||
self.data = b''
|
||||
super(Picture, self).__init__(data)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return (self.type == other.type and
|
||||
self.mime == other.mime and
|
||||
self.desc == other.desc and
|
||||
self.width == other.width and
|
||||
self.height == other.height and
|
||||
self.depth == other.depth and
|
||||
self.colors == other.colors and
|
||||
self.data == other.data)
|
||||
except (AttributeError, TypeError):
|
||||
return False
|
||||
|
||||
__hash__ = MetadataBlock.__hash__
|
||||
|
||||
def load(self, data):
|
||||
self.type, length = struct.unpack('>2I', data.read(8))
|
||||
self.mime = data.read(length).decode('UTF-8', 'replace')
|
||||
length, = struct.unpack('>I', data.read(4))
|
||||
self.desc = data.read(length).decode('UTF-8', 'replace')
|
||||
(self.width, self.height, self.depth,
|
||||
self.colors, length) = struct.unpack('>5I', data.read(20))
|
||||
self.data = data.read(length)
|
||||
|
||||
def write(self):
|
||||
f = cBytesIO()
|
||||
mime = self.mime.encode('UTF-8')
|
||||
f.write(struct.pack('>2I', self.type, len(mime)))
|
||||
f.write(mime)
|
||||
desc = self.desc.encode('UTF-8')
|
||||
f.write(struct.pack('>I', len(desc)))
|
||||
f.write(desc)
|
||||
f.write(struct.pack('>5I', self.width, self.height, self.depth,
|
||||
self.colors, len(self.data)))
|
||||
f.write(self.data)
|
||||
return f.getvalue()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s '%s' (%d bytes)>" % (type(self).__name__, self.mime,
|
||||
len(self.data))
|
||||
|
||||
|
||||
class Padding(MetadataBlock):
|
||||
"""Empty padding space for metadata blocks.
|
||||
|
||||
To avoid rewriting the entire FLAC file when editing comments,
|
||||
metadata is often padded. Padding should occur at the end, and no
|
||||
more than one padding block should be in any FLAC file.
|
||||
"""
|
||||
|
||||
code = 1
|
||||
|
||||
def __init__(self, data=b""):
|
||||
super(Padding, self).__init__(data)
|
||||
|
||||
def load(self, data):
|
||||
self.length = len(data.read())
|
||||
|
||||
def write(self):
|
||||
try:
|
||||
return b"\x00" * self.length
|
||||
# On some 64 bit platforms this won't generate a MemoryError
|
||||
# or OverflowError since you might have enough RAM, but it
|
||||
# still generates a ValueError. On other 64 bit platforms,
|
||||
# this will still succeed for extremely large values.
|
||||
# Those should never happen in the real world, and if they
|
||||
# do, writeblocks will catch it.
|
||||
except (OverflowError, ValueError, MemoryError):
|
||||
raise error("cannot write %d bytes" % self.length)
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, Padding) and self.length == other.length
|
||||
|
||||
__hash__ = MetadataBlock.__hash__
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s (%d bytes)>" % (type(self).__name__, self.length)
|
||||
|
||||
|
||||
class FLAC(mutagen.FileType):
|
||||
"""A FLAC audio file.
|
||||
|
||||
Attributes:
|
||||
|
||||
* cuesheet -- CueSheet object, if any
|
||||
* seektable -- SeekTable object, if any
|
||||
* pictures -- list of embedded pictures
|
||||
"""
|
||||
|
||||
_mimes = ["audio/x-flac", "application/x-flac"]
|
||||
|
||||
info = None
|
||||
"""A `StreamInfo`"""
|
||||
|
||||
tags = None
|
||||
"""A `VCommentDict`"""
|
||||
|
||||
METADATA_BLOCKS = [StreamInfo, Padding, None, SeekTable, VCFLACDict,
|
||||
CueSheet, Picture]
|
||||
"""Known metadata block types, indexed by ID."""
|
||||
|
||||
@staticmethod
|
||||
def score(filename, fileobj, header_data):
|
||||
return (header_data.startswith(b"fLaC") +
|
||||
endswith(filename.lower(), ".flac") * 3)
|
||||
|
||||
def __read_metadata_block(self, fileobj):
|
||||
byte = ord(fileobj.read(1))
|
||||
size = to_int_be(fileobj.read(3))
|
||||
code = byte & 0x7F
|
||||
last_block = bool(byte & 0x80)
|
||||
|
||||
try:
|
||||
block_type = self.METADATA_BLOCKS[code] or MetadataBlock
|
||||
except IndexError:
|
||||
block_type = MetadataBlock
|
||||
|
||||
if block_type._distrust_size:
|
||||
# Some jackass is writing broken Metadata block length
|
||||
# for Vorbis comment blocks, and the FLAC reference
|
||||
# implementaton can parse them (mostly by accident),
|
||||
# so we have to too. Instead of parsing the size
|
||||
# given, parse an actual Vorbis comment, leaving
|
||||
# fileobj in the right position.
|
||||
# http://code.google.com/p/mutagen/issues/detail?id=52
|
||||
# ..same for the Picture block:
|
||||
# http://code.google.com/p/mutagen/issues/detail?id=106
|
||||
start = fileobj.tell()
|
||||
block = block_type(fileobj)
|
||||
real_size = fileobj.tell() - start
|
||||
if real_size > MetadataBlock._MAX_SIZE:
|
||||
block._invalid_overflow_size = size
|
||||
else:
|
||||
data = fileobj.read(size)
|
||||
block = block_type(data)
|
||||
block.code = code
|
||||
|
||||
if block.code == VCFLACDict.code:
|
||||
if self.tags is None:
|
||||
self.tags = block
|
||||
else:
|
||||
raise FLACVorbisError("> 1 Vorbis comment block found")
|
||||
elif block.code == CueSheet.code:
|
||||
if self.cuesheet is None:
|
||||
self.cuesheet = block
|
||||
else:
|
||||
raise error("> 1 CueSheet block found")
|
||||
elif block.code == SeekTable.code:
|
||||
if self.seektable is None:
|
||||
self.seektable = block
|
||||
else:
|
||||
raise error("> 1 SeekTable block found")
|
||||
self.metadata_blocks.append(block)
|
||||
return not last_block
|
||||
|
||||
def add_tags(self):
|
||||
"""Add a Vorbis comment block to the file."""
|
||||
if self.tags is None:
|
||||
self.tags = VCFLACDict()
|
||||
self.metadata_blocks.append(self.tags)
|
||||
else:
|
||||
raise FLACVorbisError("a Vorbis comment already exists")
|
||||
|
||||
add_vorbiscomment = add_tags
|
||||
|
||||
def delete(self, filename=None):
|
||||
"""Remove Vorbis comments from a file.
|
||||
|
||||
If no filename is given, the one most recently loaded is used.
|
||||
"""
|
||||
if filename is None:
|
||||
filename = self.filename
|
||||
|
||||
if self.tags is not None:
|
||||
self.metadata_blocks.remove(self.tags)
|
||||
self.save(padding=lambda x: 0)
|
||||
self.metadata_blocks.append(self.tags)
|
||||
self.tags.clear()
|
||||
|
||||
vc = property(lambda s: s.tags, doc="Alias for tags; don't use this.")
|
||||
|
||||
def load(self, filename):
|
||||
"""Load file information from a filename."""
|
||||
|
||||
self.metadata_blocks = []
|
||||
self.tags = None
|
||||
self.cuesheet = None
|
||||
self.seektable = None
|
||||
self.filename = filename
|
||||
fileobj = StrictFileObject(open(filename, "rb"))
|
||||
try:
|
||||
self.__check_header(fileobj)
|
||||
while self.__read_metadata_block(fileobj):
|
||||
pass
|
||||
finally:
|
||||
fileobj.close()
|
||||
|
||||
try:
|
||||
self.metadata_blocks[0].length
|
||||
except (AttributeError, IndexError):
|
||||
raise FLACNoHeaderError("Stream info block not found")
|
||||
|
||||
@property
|
||||
def info(self):
|
||||
return self.metadata_blocks[0]
|
||||
|
||||
def add_picture(self, picture):
|
||||
"""Add a new picture to the file."""
|
||||
self.metadata_blocks.append(picture)
|
||||
|
||||
def clear_pictures(self):
|
||||
"""Delete all pictures from the file."""
|
||||
|
||||
blocks = [b for b in self.metadata_blocks if b.code != Picture.code]
|
||||
self.metadata_blocks = blocks
|
||||
|
||||
@property
|
||||
def pictures(self):
|
||||
"""List of embedded pictures"""
|
||||
|
||||
return [b for b in self.metadata_blocks if b.code == Picture.code]
|
||||
|
||||
def save(self, filename=None, deleteid3=False, padding=None):
|
||||
"""Save metadata blocks to a file.
|
||||
|
||||
If no filename is given, the one most recently loaded is used.
|
||||
"""
|
||||
|
||||
if filename is None:
|
||||
filename = self.filename
|
||||
|
||||
with open(filename, 'rb+') as f:
|
||||
header = self.__check_header(f)
|
||||
audio_offset = self.__find_audio_offset(f)
|
||||
# "fLaC" and maybe ID3
|
||||
available = audio_offset - header
|
||||
|
||||
# Delete ID3v2
|
||||
if deleteid3 and header > 4:
|
||||
available += header - 4
|
||||
header = 4
|
||||
|
||||
content_size = get_size(f) - audio_offset
|
||||
assert content_size >= 0
|
||||
data = MetadataBlock._writeblocks(
|
||||
self.metadata_blocks, available, content_size, padding)
|
||||
data_size = len(data)
|
||||
|
||||
resize_bytes(f, available, data_size, header)
|
||||
f.seek(header - 4)
|
||||
f.write(b"fLaC")
|
||||
f.write(data)
|
||||
|
||||
# Delete ID3v1
|
||||
if deleteid3:
|
||||
try:
|
||||
f.seek(-128, 2)
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
if f.read(3) == b"TAG":
|
||||
f.seek(-128, 2)
|
||||
f.truncate()
|
||||
|
||||
def __find_audio_offset(self, fileobj):
|
||||
byte = 0x00
|
||||
while not (byte & 0x80):
|
||||
byte = ord(fileobj.read(1))
|
||||
size = to_int_be(fileobj.read(3))
|
||||
try:
|
||||
block_type = self.METADATA_BLOCKS[byte & 0x7F]
|
||||
except IndexError:
|
||||
block_type = None
|
||||
|
||||
if block_type and block_type._distrust_size:
|
||||
# See comments in read_metadata_block; the size can't
|
||||
# be trusted for Vorbis comment blocks and Picture block
|
||||
block_type(fileobj)
|
||||
else:
|
||||
fileobj.read(size)
|
||||
return fileobj.tell()
|
||||
|
||||
def __check_header(self, fileobj):
|
||||
"""Returns the offset of the flac block start
|
||||
(skipping id3 tags if found). The passed fileobj will be advanced to
|
||||
that offset as well.
|
||||
"""
|
||||
|
||||
size = 4
|
||||
header = fileobj.read(4)
|
||||
if header != b"fLaC":
|
||||
size = None
|
||||
if header[:3] == b"ID3":
|
||||
size = 14 + BitPaddedInt(fileobj.read(6)[2:])
|
||||
fileobj.seek(size - 4)
|
||||
if fileobj.read(4) != b"fLaC":
|
||||
size = None
|
||||
if size is None:
|
||||
raise FLACNoHeaderError(
|
||||
"%r is not a valid FLAC file" % fileobj.name)
|
||||
return size
|
||||
|
||||
|
||||
Open = FLAC
|
||||
|
||||
|
||||
def delete(filename):
|
||||
"""Remove tags from a file."""
|
||||
FLAC(filename).delete()
|
File diff suppressed because it is too large
Load diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because it is too large
Load diff
|
@ -1,635 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (C) 2005 Michael Urman
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of version 2 of the GNU General Public License as
|
||||
# published by the Free Software Foundation.
|
||||
|
||||
import struct
|
||||
from struct import unpack, pack
|
||||
|
||||
from .._compat import text_type, chr_, PY3, swap_to_string, string_types, \
|
||||
xrange
|
||||
from .._util import total_ordering, decode_terminated, enum, izip
|
||||
from ._util import BitPaddedInt
|
||||
|
||||
|
||||
@enum
|
||||
class PictureType(object):
|
||||
"""Enumeration of image types defined by the ID3 standard for the APIC
|
||||
frame, but also reused in WMA/FLAC/VorbisComment.
|
||||
"""
|
||||
|
||||
OTHER = 0
|
||||
"""Other"""
|
||||
|
||||
FILE_ICON = 1
|
||||
"""32x32 pixels 'file icon' (PNG only)"""
|
||||
|
||||
OTHER_FILE_ICON = 2
|
||||
"""Other file icon"""
|
||||
|
||||
COVER_FRONT = 3
|
||||
"""Cover (front)"""
|
||||
|
||||
COVER_BACK = 4
|
||||
"""Cover (back)"""
|
||||
|
||||
LEAFLET_PAGE = 5
|
||||
"""Leaflet page"""
|
||||
|
||||
MEDIA = 6
|
||||
"""Media (e.g. label side of CD)"""
|
||||
|
||||
LEAD_ARTIST = 7
|
||||
"""Lead artist/lead performer/soloist"""
|
||||
|
||||
ARTIST = 8
|
||||
"""Artist/performer"""
|
||||
|
||||
CONDUCTOR = 9
|
||||
"""Conductor"""
|
||||
|
||||
BAND = 10
|
||||
"""Band/Orchestra"""
|
||||
|
||||
COMPOSER = 11
|
||||
"""Composer"""
|
||||
|
||||
LYRICIST = 12
|
||||
"""Lyricist/text writer"""
|
||||
|
||||
RECORDING_LOCATION = 13
|
||||
"""Recording Location"""
|
||||
|
||||
DURING_RECORDING = 14
|
||||
"""During recording"""
|
||||
|
||||
DURING_PERFORMANCE = 15
|
||||
"""During performance"""
|
||||
|
||||
SCREEN_CAPTURE = 16
|
||||
"""Movie/video screen capture"""
|
||||
|
||||
FISH = 17
|
||||
"""A bright coloured fish"""
|
||||
|
||||
ILLUSTRATION = 18
|
||||
"""Illustration"""
|
||||
|
||||
BAND_LOGOTYPE = 19
|
||||
"""Band/artist logotype"""
|
||||
|
||||
PUBLISHER_LOGOTYPE = 20
|
||||
"""Publisher/Studio logotype"""
|
||||
|
||||
|
||||
class SpecError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Spec(object):
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def __hash__(self):
|
||||
raise TypeError("Spec objects are unhashable")
|
||||
|
||||
def _validate23(self, frame, value, **kwargs):
|
||||
"""Return a possibly modified value which, if written,
|
||||
results in valid id3v2.3 data.
|
||||
"""
|
||||
|
||||
return value
|
||||
|
||||
def read(self, frame, data):
|
||||
"""Returns the (value, left_data) or raises SpecError"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
def write(self, frame, value):
|
||||
raise NotImplementedError
|
||||
|
||||
def validate(self, frame, value):
|
||||
"""Returns the validated data or raises ValueError/TypeError"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class ByteSpec(Spec):
|
||||
def read(self, frame, data):
|
||||
return bytearray(data)[0], data[1:]
|
||||
|
||||
def write(self, frame, value):
|
||||
return chr_(value)
|
||||
|
||||
def validate(self, frame, value):
|
||||
if value is not None:
|
||||
chr_(value)
|
||||
return value
|
||||
|
||||
|
||||
class IntegerSpec(Spec):
|
||||
def read(self, frame, data):
|
||||
return int(BitPaddedInt(data, bits=8)), b''
|
||||
|
||||
def write(self, frame, value):
|
||||
return BitPaddedInt.to_str(value, bits=8, width=-1)
|
||||
|
||||
def validate(self, frame, value):
|
||||
return value
|
||||
|
||||
|
||||
class SizedIntegerSpec(Spec):
|
||||
def __init__(self, name, size):
|
||||
self.name, self.__sz = name, size
|
||||
|
||||
def read(self, frame, data):
|
||||
return int(BitPaddedInt(data[:self.__sz], bits=8)), data[self.__sz:]
|
||||
|
||||
def write(self, frame, value):
|
||||
return BitPaddedInt.to_str(value, bits=8, width=self.__sz)
|
||||
|
||||
def validate(self, frame, value):
|
||||
return value
|
||||
|
||||
|
||||
@enum
|
||||
class Encoding(object):
|
||||
"""Text Encoding"""
|
||||
|
||||
LATIN1 = 0
|
||||
"""ISO-8859-1"""
|
||||
|
||||
UTF16 = 1
|
||||
"""UTF-16 with BOM"""
|
||||
|
||||
UTF16BE = 2
|
||||
"""UTF-16BE without BOM"""
|
||||
|
||||
UTF8 = 3
|
||||
"""UTF-8"""
|
||||
|
||||
|
||||
class EncodingSpec(ByteSpec):
|
||||
|
||||
def read(self, frame, data):
|
||||
enc, data = super(EncodingSpec, self).read(frame, data)
|
||||
if enc not in (Encoding.LATIN1, Encoding.UTF16, Encoding.UTF16BE,
|
||||
Encoding.UTF8):
|
||||
raise SpecError('Invalid Encoding: %r' % enc)
|
||||
return enc, data
|
||||
|
||||
def validate(self, frame, value):
|
||||
if value is None:
|
||||
return None
|
||||
if value not in (Encoding.LATIN1, Encoding.UTF16, Encoding.UTF16BE,
|
||||
Encoding.UTF8):
|
||||
raise ValueError('Invalid Encoding: %r' % value)
|
||||
return value
|
||||
|
||||
def _validate23(self, frame, value, **kwargs):
|
||||
# only 0, 1 are valid in v2.3, default to utf-16
|
||||
if value not in (Encoding.LATIN1, Encoding.UTF16):
|
||||
value = Encoding.UTF16
|
||||
return value
|
||||
|
||||
|
||||
class StringSpec(Spec):
|
||||
"""A fixed size ASCII only payload."""
|
||||
|
||||
def __init__(self, name, length):
|
||||
super(StringSpec, self).__init__(name)
|
||||
self.len = length
|
||||
|
||||
def read(s, frame, data):
|
||||
chunk = data[:s.len]
|
||||
try:
|
||||
ascii = chunk.decode("ascii")
|
||||
except UnicodeDecodeError:
|
||||
raise SpecError("not ascii")
|
||||
else:
|
||||
if PY3:
|
||||
chunk = ascii
|
||||
|
||||
return chunk, data[s.len:]
|
||||
|
||||
def write(s, frame, value):
|
||||
if value is None:
|
||||
return b'\x00' * s.len
|
||||
else:
|
||||
if PY3:
|
||||
value = value.encode("ascii")
|
||||
return (bytes(value) + b'\x00' * s.len)[:s.len]
|
||||
|
||||
def validate(s, frame, value):
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
if PY3:
|
||||
if not isinstance(value, str):
|
||||
raise TypeError("%s has to be str" % s.name)
|
||||
value.encode("ascii")
|
||||
else:
|
||||
if not isinstance(value, bytes):
|
||||
value = value.encode("ascii")
|
||||
|
||||
if len(value) == s.len:
|
||||
return value
|
||||
|
||||
raise ValueError('Invalid StringSpec[%d] data: %r' % (s.len, value))
|
||||
|
||||
|
||||
class BinaryDataSpec(Spec):
|
||||
def read(self, frame, data):
|
||||
return data, b''
|
||||
|
||||
def write(self, frame, value):
|
||||
if value is None:
|
||||
return b""
|
||||
if isinstance(value, bytes):
|
||||
return value
|
||||
value = text_type(value).encode("ascii")
|
||||
return value
|
||||
|
||||
def validate(self, frame, value):
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
if isinstance(value, bytes):
|
||||
return value
|
||||
elif PY3:
|
||||
raise TypeError("%s has to be bytes" % self.name)
|
||||
|
||||
value = text_type(value).encode("ascii")
|
||||
return value
|
||||
|
||||
|
||||
class EncodedTextSpec(Spec):
|
||||
|
||||
_encodings = {
|
||||
Encoding.LATIN1: ('latin1', b'\x00'),
|
||||
Encoding.UTF16: ('utf16', b'\x00\x00'),
|
||||
Encoding.UTF16BE: ('utf_16_be', b'\x00\x00'),
|
||||
Encoding.UTF8: ('utf8', b'\x00'),
|
||||
}
|
||||
|
||||
def read(self, frame, data):
|
||||
enc, term = self._encodings[frame.encoding]
|
||||
try:
|
||||
# allow missing termination
|
||||
return decode_terminated(data, enc, strict=False)
|
||||
except ValueError:
|
||||
# utf-16 termination with missing BOM, or single NULL
|
||||
if not data[:len(term)].strip(b"\x00"):
|
||||
return u"", data[len(term):]
|
||||
|
||||
# utf-16 data with single NULL, see issue 169
|
||||
try:
|
||||
return decode_terminated(data + b"\x00", enc)
|
||||
except ValueError:
|
||||
raise SpecError("Decoding error")
|
||||
|
||||
def write(self, frame, value):
|
||||
enc, term = self._encodings[frame.encoding]
|
||||
return value.encode(enc) + term
|
||||
|
||||
def validate(self, frame, value):
|
||||
return text_type(value)
|
||||
|
||||
|
||||
class MultiSpec(Spec):
|
||||
def __init__(self, name, *specs, **kw):
|
||||
super(MultiSpec, self).__init__(name)
|
||||
self.specs = specs
|
||||
self.sep = kw.get('sep')
|
||||
|
||||
def read(self, frame, data):
|
||||
values = []
|
||||
while data:
|
||||
record = []
|
||||
for spec in self.specs:
|
||||
value, data = spec.read(frame, data)
|
||||
record.append(value)
|
||||
if len(self.specs) != 1:
|
||||
values.append(record)
|
||||
else:
|
||||
values.append(record[0])
|
||||
return values, data
|
||||
|
||||
def write(self, frame, value):
|
||||
data = []
|
||||
if len(self.specs) == 1:
|
||||
for v in value:
|
||||
data.append(self.specs[0].write(frame, v))
|
||||
else:
|
||||
for record in value:
|
||||
for v, s in izip(record, self.specs):
|
||||
data.append(s.write(frame, v))
|
||||
return b''.join(data)
|
||||
|
||||
def validate(self, frame, value):
|
||||
if value is None:
|
||||
return []
|
||||
if self.sep and isinstance(value, string_types):
|
||||
value = value.split(self.sep)
|
||||
if isinstance(value, list):
|
||||
if len(self.specs) == 1:
|
||||
return [self.specs[0].validate(frame, v) for v in value]
|
||||
else:
|
||||
return [
|
||||
[s.validate(frame, v) for (v, s) in izip(val, self.specs)]
|
||||
for val in value]
|
||||
raise ValueError('Invalid MultiSpec data: %r' % value)
|
||||
|
||||
def _validate23(self, frame, value, **kwargs):
|
||||
if len(self.specs) != 1:
|
||||
return [[s._validate23(frame, v, **kwargs)
|
||||
for (v, s) in izip(val, self.specs)]
|
||||
for val in value]
|
||||
|
||||
spec = self.specs[0]
|
||||
|
||||
# Merge single text spec multispecs only.
|
||||
# (TimeStampSpec beeing the exception, but it's not a valid v2.3 frame)
|
||||
if not isinstance(spec, EncodedTextSpec) or \
|
||||
isinstance(spec, TimeStampSpec):
|
||||
return value
|
||||
|
||||
value = [spec._validate23(frame, v, **kwargs) for v in value]
|
||||
if kwargs.get("sep") is not None:
|
||||
return [spec.validate(frame, kwargs["sep"].join(value))]
|
||||
return value
|
||||
|
||||
|
||||
class EncodedNumericTextSpec(EncodedTextSpec):
|
||||
pass
|
||||
|
||||
|
||||
class EncodedNumericPartTextSpec(EncodedTextSpec):
|
||||
pass
|
||||
|
||||
|
||||
class Latin1TextSpec(EncodedTextSpec):
|
||||
def read(self, frame, data):
|
||||
if b'\x00' in data:
|
||||
data, ret = data.split(b'\x00', 1)
|
||||
else:
|
||||
ret = b''
|
||||
return data.decode('latin1'), ret
|
||||
|
||||
def write(self, data, value):
|
||||
return value.encode('latin1') + b'\x00'
|
||||
|
||||
def validate(self, frame, value):
|
||||
return text_type(value)
|
||||
|
||||
|
||||
@swap_to_string
|
||||
@total_ordering
|
||||
class ID3TimeStamp(object):
|
||||
"""A time stamp in ID3v2 format.
|
||||
|
||||
This is a restricted form of the ISO 8601 standard; time stamps
|
||||
take the form of:
|
||||
YYYY-MM-DD HH:MM:SS
|
||||
Or some partial form (YYYY-MM-DD HH, YYYY, etc.).
|
||||
|
||||
The 'text' attribute contains the raw text data of the time stamp.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
def __init__(self, text):
|
||||
if isinstance(text, ID3TimeStamp):
|
||||
text = text.text
|
||||
elif not isinstance(text, text_type):
|
||||
if PY3:
|
||||
raise TypeError("not a str")
|
||||
text = text.decode("utf-8")
|
||||
|
||||
self.text = text
|
||||
|
||||
__formats = ['%04d'] + ['%02d'] * 5
|
||||
__seps = ['-', '-', ' ', ':', ':', 'x']
|
||||
|
||||
def get_text(self):
|
||||
parts = [self.year, self.month, self.day,
|
||||
self.hour, self.minute, self.second]
|
||||
pieces = []
|
||||
for i, part in enumerate(parts):
|
||||
if part is None:
|
||||
break
|
||||
pieces.append(self.__formats[i] % part + self.__seps[i])
|
||||
return u''.join(pieces)[:-1]
|
||||
|
||||
def set_text(self, text, splitre=re.compile('[-T:/.]|\s+')):
|
||||
year, month, day, hour, minute, second = \
|
||||
splitre.split(text + ':::::')[:6]
|
||||
for a in 'year month day hour minute second'.split():
|
||||
try:
|
||||
v = int(locals()[a])
|
||||
except ValueError:
|
||||
v = None
|
||||
setattr(self, a, v)
|
||||
|
||||
text = property(get_text, set_text, doc="ID3v2.4 date and time.")
|
||||
|
||||
def __str__(self):
|
||||
return self.text
|
||||
|
||||
def __bytes__(self):
|
||||
return self.text.encode("utf-8")
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.text)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.text == other.text
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.text < other.text
|
||||
|
||||
__hash__ = object.__hash__
|
||||
|
||||
def encode(self, *args):
|
||||
return self.text.encode(*args)
|
||||
|
||||
|
||||
class TimeStampSpec(EncodedTextSpec):
|
||||
def read(self, frame, data):
|
||||
value, data = super(TimeStampSpec, self).read(frame, data)
|
||||
return self.validate(frame, value), data
|
||||
|
||||
def write(self, frame, data):
|
||||
return super(TimeStampSpec, self).write(frame,
|
||||
data.text.replace(' ', 'T'))
|
||||
|
||||
def validate(self, frame, value):
|
||||
try:
|
||||
return ID3TimeStamp(value)
|
||||
except TypeError:
|
||||
raise ValueError("Invalid ID3TimeStamp: %r" % value)
|
||||
|
||||
|
||||
class ChannelSpec(ByteSpec):
|
||||
(OTHER, MASTER, FRONTRIGHT, FRONTLEFT, BACKRIGHT, BACKLEFT, FRONTCENTRE,
|
||||
BACKCENTRE, SUBWOOFER) = xrange(9)
|
||||
|
||||
|
||||
class VolumeAdjustmentSpec(Spec):
|
||||
def read(self, frame, data):
|
||||
value, = unpack('>h', data[0:2])
|
||||
return value / 512.0, data[2:]
|
||||
|
||||
def write(self, frame, value):
|
||||
number = int(round(value * 512))
|
||||
# pack only fails in 2.7, do it manually in 2.6
|
||||
if not -32768 <= number <= 32767:
|
||||
raise SpecError("not in range")
|
||||
return pack('>h', number)
|
||||
|
||||
def validate(self, frame, value):
|
||||
if value is not None:
|
||||
try:
|
||||
self.write(frame, value)
|
||||
except SpecError:
|
||||
raise ValueError("out of range")
|
||||
return value
|
||||
|
||||
|
||||
class VolumePeakSpec(Spec):
|
||||
def read(self, frame, data):
|
||||
# http://bugs.xmms.org/attachment.cgi?id=113&action=view
|
||||
peak = 0
|
||||
data_array = bytearray(data)
|
||||
bits = data_array[0]
|
||||
vol_bytes = min(4, (bits + 7) >> 3)
|
||||
# not enough frame data
|
||||
if vol_bytes + 1 > len(data):
|
||||
raise SpecError("not enough frame data")
|
||||
shift = ((8 - (bits & 7)) & 7) + (4 - vol_bytes) * 8
|
||||
for i in xrange(1, vol_bytes + 1):
|
||||
peak *= 256
|
||||
peak += data_array[i]
|
||||
peak *= 2 ** shift
|
||||
return (float(peak) / (2 ** 31 - 1)), data[1 + vol_bytes:]
|
||||
|
||||
def write(self, frame, value):
|
||||
number = int(round(value * 32768))
|
||||
# pack only fails in 2.7, do it manually in 2.6
|
||||
if not 0 <= number <= 65535:
|
||||
raise SpecError("not in range")
|
||||
# always write as 16 bits for sanity.
|
||||
return b"\x10" + pack('>H', number)
|
||||
|
||||
def validate(self, frame, value):
|
||||
if value is not None:
|
||||
try:
|
||||
self.write(frame, value)
|
||||
except SpecError:
|
||||
raise ValueError("out of range")
|
||||
return value
|
||||
|
||||
|
||||
class SynchronizedTextSpec(EncodedTextSpec):
|
||||
def read(self, frame, data):
|
||||
texts = []
|
||||
encoding, term = self._encodings[frame.encoding]
|
||||
while data:
|
||||
try:
|
||||
value, data = decode_terminated(data, encoding)
|
||||
except ValueError:
|
||||
raise SpecError("decoding error")
|
||||
|
||||
if len(data) < 4:
|
||||
raise SpecError("not enough data")
|
||||
time, = struct.unpack(">I", data[:4])
|
||||
|
||||
texts.append((value, time))
|
||||
data = data[4:]
|
||||
return texts, b""
|
||||
|
||||
def write(self, frame, value):
|
||||
data = []
|
||||
encoding, term = self._encodings[frame.encoding]
|
||||
for text, time in value:
|
||||
text = text.encode(encoding) + term
|
||||
data.append(text + struct.pack(">I", time))
|
||||
return b"".join(data)
|
||||
|
||||
def validate(self, frame, value):
|
||||
return value
|
||||
|
||||
|
||||
class KeyEventSpec(Spec):
|
||||
def read(self, frame, data):
|
||||
events = []
|
||||
while len(data) >= 5:
|
||||
events.append(struct.unpack(">bI", data[:5]))
|
||||
data = data[5:]
|
||||
return events, data
|
||||
|
||||
def write(self, frame, value):
|
||||
return b"".join(struct.pack(">bI", *event) for event in value)
|
||||
|
||||
def validate(self, frame, value):
|
||||
return value
|
||||
|
||||
|
||||
class VolumeAdjustmentsSpec(Spec):
|
||||
# Not to be confused with VolumeAdjustmentSpec.
|
||||
def read(self, frame, data):
|
||||
adjustments = {}
|
||||
while len(data) >= 4:
|
||||
freq, adj = struct.unpack(">Hh", data[:4])
|
||||
data = data[4:]
|
||||
freq /= 2.0
|
||||
adj /= 512.0
|
||||
adjustments[freq] = adj
|
||||
adjustments = sorted(adjustments.items())
|
||||
return adjustments, data
|
||||
|
||||
def write(self, frame, value):
|
||||
value.sort()
|
||||
return b"".join(struct.pack(">Hh", int(freq * 2), int(adj * 512))
|
||||
for (freq, adj) in value)
|
||||
|
||||
def validate(self, frame, value):
|
||||
return value
|
||||
|
||||
|
||||
class ASPIIndexSpec(Spec):
|
||||
def read(self, frame, data):
|
||||
if frame.b == 16:
|
||||
format = "H"
|
||||
size = 2
|
||||
elif frame.b == 8:
|
||||
format = "B"
|
||||
size = 1
|
||||
else:
|
||||
raise SpecError("invalid bit count in ASPI (%d)" % frame.b)
|
||||
|
||||
indexes = data[:frame.N * size]
|
||||
data = data[frame.N * size:]
|
||||
try:
|
||||
return list(struct.unpack(">" + format * frame.N, indexes)), data
|
||||
except struct.error as e:
|
||||
raise SpecError(e)
|
||||
|
||||
def write(self, frame, values):
|
||||
if frame.b == 16:
|
||||
format = "H"
|
||||
elif frame.b == 8:
|
||||
format = "B"
|
||||
else:
|
||||
raise SpecError("frame.b must be 8 or 16")
|
||||
try:
|
||||
return struct.pack(">" + format * frame.N, *values)
|
||||
except struct.error as e:
|
||||
raise SpecError(e)
|
||||
|
||||
def validate(self, frame, values):
|
||||
return values
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue