diff --git a/.gitignore b/.gitignore index 70182ec2..959fc2ad 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ *.pyo +__local__/ machine_guid /resources/media/Thumbs.db diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a220718d..7938f5fc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ Thanks you for contributing to Emby for Kodi! -* Make pull requests towards the **develop** branch; +* Make pull requests towards the **master** branch; * Keep the maximum line length shorter than 100 characters to keep things clean and readable; * Follow pep8 style as closely as possible: https://www.python.org/dev/peps/pep-0008/ * Add comments if necessary. diff --git a/LICENSE b/LICENSE deleted file mode 100644 index d6a93266..00000000 --- a/LICENSE +++ /dev/null @@ -1,340 +0,0 @@ -GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., <http://fsf.org/> - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - {description} - Copyright (C) {year} {fullname} - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - {signature of Ty Coon}, 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. - diff --git a/LICENSE.txt b/LICENSE.txt index 1c9b0bde..e72bfdda 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,283 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - Preamble + Preamble - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Library General Public License instead.) You can apply it to + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". + TERMS AND CONDITIONS -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. + 0. Definitions. - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. + "This License" refers to version 3 of the GNU General Public License. -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. + A "covered work" means either the unmodified Program or a work based +on the Program. - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: + 1. Source Code. - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. + The Corresponding Source for a work in source code form is that +same work. - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of this License. - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. + 13. Use with the GNU Affero General Public License. -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. + 14. Revised Versions of this License. - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. - NO WARRANTY + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. + 15. Disclaimer of Warranty. - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - END OF TERMS AND CONDITIONS -------------------------------------------------------------------------- -------------------------------------------------------------------------- + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + <one line to give the program's name and a brief idea of what it does.> + Copyright (C) <year> <name of author> + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <https://www.gnu.org/licenses/>. + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + <program> Copyright (C) <year> <name of author> + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +<https://www.gnu.org/licenses/>. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +<https://www.gnu.org/licenses/why-not-lgpl.html>. \ No newline at end of file diff --git a/README.md b/README.md index 13f89483..52258167 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ [](https://github.com/MediaBrowser/plugin.video.emby/wiki) [](https://emby.media/community/index.php?/forum/99-kodi/) [](https://ko-fi.com/A5354BI) -[](https://emby.media/) +[](https://emby.media/) ___ **A whole new way to manage and view your media library.** @@ -25,7 +25,6 @@ The add-on supports a hybrid approach. You can decide which Emby libraries to sy - Other features supported: + Simple Live TV presentation + Home Videos & photos - + Audiobooks + Playlists + Theme media - Direct play and transcode @@ -36,19 +35,8 @@ The add-on supports a hybrid approach. You can decide which Emby libraries to sy - Backup your emby kodi profile. See the [Emby backup option](https://github.com/MediaBrowser/plugin.video.emby/wiki/Create-and-restore-from-backup) - and more... -### Download and installation -**Important notes** -- To achieve direct play, you will need to ensure your Emby library paths point to network paths (e.g: "\\\\server\Media\Movies"). See the [Emby wiki](https://github.com/MediaBrowser/Wiki/wiki/Path%20Substitution) for additional information. -- **The addon is not (and will not be) compatible with the MySQL database replacement in Kodi.** In fact, Emby takes over the point of having a MySQL database because it acts as a "man in the middle" for your entire media library. -- Emby for Kodi is not currently compatible with Kodi's Video Extras addon unless native playback mode is used. **Deactivate Video Extras if content start randomly playing.** - -View this short [Youtube video](https://youtu.be/IaecDPcXI3I?t=119) to give you a better idea of the general process. - -1. Install the Emby for Kodi repository, from the repo install the Emby addon. -2. Within a few seconds you should be prompted for your server-details. -3. Once you're succesfully authenticated with your Emby server, the initial sync will start. -4. The first sync of the Emby server to the local Kodi database may take some time depending on your device and library size. -5. Once the full sync is done, you can browse your media in Kodi, and syncs will be done automatically in the background. +### Install Emby for Kodi +Get started with the [wiki guide](https://github.com/MediaBrowser/plugin.video.emby/wiki) ### Known limitations - Chapter images are missing unless native playback mode is used. diff --git a/addon.xml b/addon.xml index f04cd610..d355868e 100644 --- a/addon.xml +++ b/addon.xml @@ -1,13 +1,13 @@ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <addon id="plugin.video.emby" name="Emby" - version="3.1.35a" + version="4.0.3" provider-name="angelblue05"> <requires> <import addon="xbmc.python" version="2.25.0"/> - <import addon="plugin.video.emby.movies" version="0.13" /> - <import addon="plugin.video.emby.tvshows" version="0.13" /> - <import addon="plugin.video.emby.musicvideos" version="0.13" /> + <import addon="plugin.video.emby.movies" version="0.14" /> + <import addon="plugin.video.emby.tvshows" version="0.14" /> + <import addon="plugin.video.emby.musicvideos" version="0.14" /> </requires> <extension point="xbmc.python.pluginsource" library="default.py"> @@ -37,7 +37,12 @@ <summary lang="en"></summary> <description lang="en">Welcome to Emby for Kodi A whole new way to manage and view your media library. The Emby addon for Kodi combines the best of Kodi - ultra smooth navigation, beautiful UIs and playback of any file under the sun, and Emby - the most powerful fully open source multi-client media metadata indexer and server. Emby for Kodi is the absolute best way to enjoy the incredible Kodi playback engine combined with the power of Emby's centralized database. Features: Direct integration with the Kodi library for native Kodi speed Instant synchronization with the Emby server Full support for Movie, TV and Music collections Emby Server direct stream and transcoding support - use Kodi when you are away from home!</description> <news> - - Many fixes, however still chasing the slowness issue. + New stable release + The wiki has been updated, PLEASE READ: https://github.com/MediaBrowser/plugin.video.emby/wiki + Fix playback for Kodi Leia + Fix masterlock + Home videos and pictures now show under videos and picture add-ons + Dependencies were updated to 0.14! </news> </extension> </addon> diff --git a/changelog.txt b/changelog.txt deleted file mode 100644 index ddac6c06..00000000 --- a/changelog.txt +++ /dev/null @@ -1,781 +0,0 @@ -version 2.3.57 -- Close dialog after syncing collections -- Update Leia support -- Fix trakt/watched status issues caused by tvshows being assigned their imdb id instead of tvdb. You'll need to reset your database to fix this (or a repair might do the trick too) -- Update photod (well now home videos and photos) library to properly display. -- Few other fixes. - -version 2.3.56 -- Fixed connection not falling back to external when local is unavailable -- Support server address without a port number -- Add refresh boxsets (launch the emby add-on > Refresh Boxsets). It should not be necessary, but just in case. - -version 2.3.51 -- Temporarily disabled series pooling for non-admin users. A future server update + a manual sync will correct this. I'll make a quick announcement. - -version 2.3.50 -- Fixed Series pooling (requires a reset of database + a manual sync to apply) -- Add PlayPause server event - -version 2.3.49 -- Update the websocket url for reverse proxy. wss://{server}/embywebsocket?api.. -- Fix music support for multiple Emby music libraries -- Fix music direct stream to use the original file container rather than defaulting to mp3 -- Fix subtitles to use original file container rather than defaulting to srt -- Fix network credentials using backslash for domain in their username -- Fix playback report to your Emby dashboard -- Add Polish translation - -version 2.3.47 - - Fix encoding error in context menu for non-ascii characters - - Fix playback reporting monitor for Kodi 17 - -version 2.3.35 - - DB connection management revamped - - Emby video nodes now have their own parent node - - Kodi 17 Ratings - - Thread pool system for Emby data loading - - A lot of small bug fixes - - Some code refactoring - -version 2.3.25 -- Fix on-wake sync - -version 2.3.24 -- Fix bug in the manual sync -- Add Italian translation - -version 2.3.23 -- Adjustments to the download throttle. See github wiki docs. -- Fix the fast sync -- Fix the emby backup -- other minor fixes - -version 2.3.12 -- Fix virtual episodes being processed -- Return offline items so they don't get removed in kodi -- other minor fixes - -version 2.3.8 -- Fix database connection -- other minor fixes - -version 2.3.6 -- Update French translation -- Fix screensaver bug - -version 2.3.4 -- add throttling for error event logging - -version 2.3.3 -- minor fix to exception handling - -version 2.3.1 -- minor fixes - -version 2.3.0 -- New stable version - -version 2.2.57 -- Fix for external subtitles while using HTTP playback -- Minor bug fixes - -version 2.2.52-54 -- Fix bugs that slipped in the few previous builds - -version 2.2.51 -- Rework manual sync -- Review OS -- Review view removal - -version 2.2.50 -- Add Kodi version to logging - -version 2.2.49 -- Add OS, Resolution and Lang to logging - -version 2.2.47 -- Small fix for logging - -version 2.2.46 -- Error logging improvments - -version 2.2.43 -- Review music -- Clean up syncing code - -version 2.2.41 -- Emergency update - -version 2.2.40 -- Review throttle -- Fix user migration for the new login method added after 2.2.19 (stable) -- Clean up code - -version 2.2.39 -- Update German translation -- Fix issue with throttle -- Fix server detection - -version 2.2.38 -- Fix series pooling - -version 2.2.37 -- Support favorite episodes -- Update Dutch translation -- Fix specials -1 bug -- minor fixes - -version 2.2.34 -- Repair sync can now be filtered by content type -- Automatically download external subs with language tag -- Fix broken music artwork -- Fix to music direct stream - -version 2.2.33 -- Fix manual sync crashing -- Update Portuguese translation -- Add support for German MPAA rating. You will need to reset your local database to apply the change. -- Add a backup option. Find out more: https://github.com/MediaBrowser/plugin.video.emby/wiki/Create-and-restore-from-backup - -version 2.2.32 -- Update the emby context menu -- Add option to disable the context menu in the add-on settings > extras tab - -version 2.2.31 -- Support series pooling. Will require to reset local database. Once content is resynced, proceed with a manual sync to apply the series pooling. -- Fix emby connect websocket issue, will also require updating server to beta 3.1.150 or higher -- Fix initial sync artwork dialog for Isengard - -version 2.2.30 -- Update German translation -- minor fixes - -version 2.2.28 -- Fix user selection when all users are hidden - -version 2.2.24 -- Filter music from fast sync response if music is disabled in the Kodi profile -- Fix restart server behavior in the add-on to fix post capabilities -- Fix ubuntu importerror crash -- Update Russian translation - -version 2.2.23 -- NEW! Emby connect integration. Find out more on the emby.media forums -- Sync season name for Jarvis and higher -- Expand video quality selection (25/30/35 Mpbs) -- Move deviceId to a permanent location to outlive reinstalls of the add-on -- Fix virtual episodes crashing sync -- Fix plugin listing such as home videos not loading -- Fix platform detection - -version 2.2.21 -- Fix new external subtitles from preventing playback in the event the subtitles had no language tag - -version 2.2.20 -- NEW: Default to HTTP playback when using add-on playback mode out of the box. -- Add string translation (German, French and Portuguese) -- Add option to download external subtitles when playing from HTTP (add-on settings > playback) -- Fix navigation not waking up display -- Fix fast sync being used to save update times when plugin is not installed -- Fix tv show detection when verifying if file exists. - -version 2.2.19 -- Fix transcode (logging error) - -version 2.2.18 -- Fix logging - -version 2.2.17 -- Fix crash when device wakes up -- Add option to disable external subs for direct stream - under add-on settings > playback - -version 2.2.16 -- Fix strptime error -- Temporary fix for database being locked -- Fix watched status failing to update if offer delete after playing is enabled but skipped - -version 2.2.14 -- Progress dialog always shows for full sync -- Add (if item count greater than) to options for inc sync progress dialog display -- Limit artwork loading to 25 threads by default -- Fix delete option -- Fix music log error -- Add string translation (Spanish) - -version 2.2.12 -- Preparation for Emby connect -- Add string translation (Dutch, Russian and Swedish) -- Various bug fixes - -version 2.2.11 -- Preparation for feature requests -- Add option to refresh Emby items via context menu -- Minor fixes - -version 2.2.10 -- Add keymap action for delete content: RunPlugin(plugin://plugin.video.emby?mode=delete) -- Fix various bugs - -version 2.2.9 -- Fix extrafanart - -version 2.2.8 -- Fix to photos not displaying directories without picutres. -- Fix to grouped views causing crash - -version 2.2.7 -- Prevent Kodi screensaver during the initial sync - -version 2.2.6 -- Fix unicode error -- Fix grouped folders error - -version 2.2.5 -- Add generate a new device Id option, found in the add-on settings > advanced. -- Offer to delete cached thumbnails upon database reset. -- Breaking fix for views. You will notice duplicates in your video nodes. When you have a moment to spare, run the refresh playlists/nodes action found by launching the emby add-on (this is not reversible). Your homescreen shortcuts actions will need to be redirected to the new playlists/nodes. -- Fix pictures, the shortcut should now appear under photo add-ons > emby. -- Fix view shortcuts to follow emby ordering. This changes the Emby.nodes.X ordering (automatically created shortcuts and via launching the emby add-on). This does not change the video nodes ordering. -- Fix ssl client certificate verification -- Fix resume -- Prevent artwork deletion from crashing the add-on -- Fix to import virtual season artwork - -version 2.2.4 -- Fix external subs being appended to direct play (via add-on playback) -- First attempt at keeping Kodi awake during the initial sync - -version 2.2.3 -- Fix resume - -version 2.2.2 -- Fix dialog crash in the manual sync -- Fix view duplicate views appearing via launching the emby add-on, when grouping views in emby - -version 2.2.1 -- Fix artist/album link for music videos -- Fix progress dialog when the manual sync runs at start up -- Fix encoding error for special characters in emby username -- Offer delete dialog after playback now times out after 2 mins - -version 2.1.4 -- Removed Emby delete via the Kodi context menu. It is exclusively offered via the extended emby context menu which is available for Isengard or higher. This change was necessary, because there was a risk of wiping the entire library if Kodi decides to run a clean database task and paths were set as plugin paths. - -version 2.1.3 -- Fix Live TV to terminate ffmpeg processes. - -version 2.1.2 -- Fix to repair entries if they are deleted by Kodi, but still exists in the Emby database. - -version 2.1.1 -- Update setting - skip emby delete confirmation, it is now under the extras tab. -- Update setting - new content notification, it's now disables the notification if the time is set to 0. -- Prevent manual sync from running if the add-on is not yet connected to the emby server. - -version 2.1.0 -- Add a throttle (automatically adjust the number of items requested at once) to prevent crashing during the initial sync -- Do not update the video library when there's a music-only update - -version 2.0.3 -- Add new retention time option that the latest server Sync plugin uses to help determine if full sync or inc sync should be used. -- Add control over new content pop up display time. You will find the settings under Extras > Enable new content notification -- Change to the transcode H265 setting. You will need to re-select the proper resolution, if you had the setting enabled. -- Change to the paths added to sources.xml -- Fix to the manual sync for the music library -- Fix resume when launching playback via the web client - -version 1.1.81 -- Fix missing deviceId -- Fix to newly added album/songs (if you experienced the bug, you will need to reset to fix it. Know that moving forward, it is corrected.) - -version 1.1.80 -- Add refresh for video nodes -- Fix for home videos (being unable to back out of the menu). Running refresh playlists/nodes will fix this. -- Fix to music, causing sync to crash - -version 1.1.76 -- Add music rating system -- Add home videos as a dynamic plugin entry (requires a reset) -- Add photo library -- Add/Fix force transcode setting for 720p-1080p/HEVC-H265 formats -- Fix to incremental sync, caused by the server restarting -- Fix for image caching during the initial sync on rpi devices -- Fix to audio/subtitles tracks (requires a repair, or reset) - -version 1.1.72 -- Fix to extrafanart -- Fix for artists deletion -- Fix for views - -version 1.1.70 -- Include AirsAfterSeason for special episodes ordering -- Cover art settings - label adjusted. A reset or repair will be required if you change the settings value. -- Fix duplicate views being created (reset will be required) -- Fix albums merge when they had the same name (reset will be required) -- Minor fix to songs - -version 1.1.69 -- Fix unicode error for video nodes -- Fix special episode ordering (repair sync can be run) -- Fix deletion via context menu -- Fix music add/update breaking incremental sync - -version 1.1.68 -- Minor fixes - -version 1.1.67 -- Add option to limit items requested at once from server -- Fix artwork cache -- Fix dialog crash - -version 1.1.66 -- Add manual refresh for playlists -- Fix fanart -- Fix H265 transcode -- Fix boxsets -- Fix people -- First attempt to fix next episode - -version 1.1.65 -- Fix aspect ratio error - -version 1.1.64 -- Fix trailer causing initial sync to crash - -version 1.1.63 -- Code refactoring of the add-on - -version 1.1.62 -- Fix connection to database staying open -- Fix artwork cache delete -- Add option to force transcode 1080P/H265 - -version 1.1.57 -- Fix for music videos directors - -version 1.1.55 -- Fix to incremental sync - database locked error -- Fix to music multi disc tracks ordering -- Fix to prioritize album artists and fall back to song artists if missing. - -version 1.1.53 -- Add visual warning when kodi version is incompatible -- Add ask to play trailers option -- Fix music singles -- Fix direct path not working during the initial setup -- Fix music videos missing artist link - -version 1.1.52 -- Report playback for music -- Support Emby tags for music videos -- Fix studio icon for movies -- FORCE RESET LOCAL DATABASE IN PLACE - -version 1.1.50 -- Ignore channels from syncing process -- Date added can now be updated -- Disable ssl warning -- Fix playlist when play command is issued outside of Kodi. - -version 1.1.48 -- Support Emby tags -- Respect emby "My views" settings -- Rework artwork api -- Rework playback (trailers, dvds) -- Fix mark as watched being reported twice (affected Trakt) -- Fix offer deletion -- Add direct path option to install wizard - -version 1.1.44 -- Play strm files regardless of playback method. -- Stack method for multi part, including trailers. -- Revise transcoding properties -- Fix channels name to display properly in Kodi. -- Fix DTS-HD MA display -- Fix profiles that were leaving threads running after loading a new profile in Kodi. - -version 1.1.43 -- Fix loop that was happening if you had intros and play next automatically enabled. - -version 1.1.42 -- Fix for cinema mode playback -- New skinhelper properties - -version 1.1.35 -- Added option to direct stream music library - useful for out of network playback. -- Fix error in reporting to the server, when playing music. -- Added external subtitles as selectable tracks for direct play and direct stream. -- Added extra setup dialog for the new music option. - -version 1.1.34 -- Fix for userdata causing the incremental sync to hang when user doesn't have music enabled. - -version 1.1.33 -- Implemented userdata update only -- Added progress dialog for incremental sync. -- Added the option to start Kodi session with permanent additional users. - -version 1.1.31 -- Added user image home property. - -version 1.1.30 -- Fix aspect ratio. Take into account metadata aspect ratio. -- Fix flag for NR -- Improve logging for transcoding playback - -version 1.1.29 -- Fix playback error -- added full changelog -- added description to the addon - -version 1.1.28 -- Fix playback for widgets - -version 1.1.27 -- Fix for nextup episodes -- Fix for transcoding not properly ending the ffmpeg process -- Added webclient remote control command to select the audio stream and subtitles -- You can now pre-select the audio and subtitles track when transcoding - -version 1.1.26 -- Moved the Date string from the path to a param for the Get Change list API endpoint. -- Season fanart is added -- Last date added is fixed for albums (will require a resync to correct your current listing) -- Server restarting message has been added. Enable it under the advanced tab of the add-on settings. - -version 1.1.25 -This contains a fix for music. -More info: -- We are now processing your music in batch of 200 items. You might see the scan get stuck at one point, it's very normal since the add-on is pulling all the data from your server for the next section to process. For music, we process the sections in the following order after boxsets: Artists, Albums, Songs. -For example, the scan gets stuck at 98% for Albums, it is currently pulling all your songs from your server. If you have a large amount of items for the songs section, it will take longer and might give the impression the scan is stuck, when it's not. :) Hopefully we will be able to improve the visual logging the reflect this in the future. -Please, let us know if you still see mentions of read timeout in your logs when scanning your music library in or if the scan still doesn't complete - -version 1.1.24 -- Sorry for the many updates in a row! Fix for video nodes not showing up. - -version 1.1.23 -- Fix a url encoding issue with time stamps of the new changes endpoint. - -version 1.1.22 -- Fixed a bug that was introduced in the last build. - -version 1.1.21 -- Fixed video node and source creation when using the new fast startup sync - -version 1.1.20 -- Added new fast startup sync feature. check out http://emby.media/community/index.php?/topic/23971-fast-startup-sync-server-plugin/ - -version 1.1.19 -- This version fixes the missing duration from home widgets - -version 1.1.18 -- We should be back on track with this build. The initial sync should complete once again. To make sure everything goes smoothly, you will need to reset and resync your library after updating to 1.1.18. Music also received a much needed update, thank you @marcelveldt. - -version 1.1.17 -- This version has support for Kodi 16 (aka Jarvis) - full rebuild needed. Allows video themes to be excluded when syncing theme media, contains a fix for non persistent settings and for local media flags - -version 1.1.16 -- Fix for the issue causing the initial sync to fail with 1.1.15. - -version 1.1.15 -Features: -- Added the option to delete movies after playback -- Resume jumpback (in seconds) -Fixes: -- Precise resume points -- Theme media - direct stream syncing -- Cast order should be the same as Emby's for Kodi Helix and Isengard -- Genres - Clean up genres if modified, as well as correctly display Genres for TV shows. -- Masterlock - We now create sources during the first initial sync. This means your library should now display when using Masterlock. -- Error during login - due to Emby "disable access user preferences" setting. -- Plugin paths now support mediaflags (bluray, 3D, etc) and should properly reflect this when navigating your Kodi library (MQ cases, etc.). -- Kodi audio and subtitles track should now be remembered after being changed. -- Make report progress to Emby more accurate -- Remote command should now be accepted during the first minute of playback. -Changes: -- The library syncing process was moved to it's on thread. In general, the add-on should perform faster and be more responsive. Especially at startup. -- Improved information logging (1) so you are able to see the exact content being processed. - -version 1.1.14 -- This version contains a fix for the cast order presented on the video info screens. It is restricted to Kodi 15 and onwards. -- A complete resync of the database will be needed to pick up the changes - -version 1.1.13 -Fix: -- If you were unable to launch scripts from the Emby add-on launch menu, this is now resolved. -Features: -- Cover art - Fix for cut off cover art -- Local/remote access from same Kodi profile ** This is not Emby connect ** -Cover art: -- Add-on settings > Extras > Force CoverArt ratio -If your poster CoverArt is cut off, you can enable this option to force the artwork to fit the standard Kodi image aspect ratio (image will be slightly distorted). Since images are cached in Kodi, you will need to reset/resync your library to see your artwork change. ** You should still request to fix the aspect ratio issue by posting in the appropriate thread to contact the skin creator. Using the Confluence skin, for example, displays Cover art correctly, without distorting the image. ** -Local/remote access: -- Add-on settings > Emby > Use alternate address / Secondary Server Address -This feature is useful in the event you are on the move. You simply enable the option and enter your external server address, restart Kodi and it should now load your profile using your external address instead. This way you can enable Play from HTTP and stream/transcode while away from home! To recap, the secondary server address is only to reach the same server as the primary server address - - - -version 1.1.0 -- stable release -- Note that the secondary IP address support in this release is still a BETA feature. -- Note that the beta is likely to go more unstable for a bit - so if you are using Beta - you might disable auto update, or switch to stable. - - -version 1.0.15 -- If having the auto caching images setting enabled was causing an issue while syncing, this should now be resolved. Other improvements have been made regarding widgets refresh rate for new content, theme media and direct paths. - -version 1.0.14 -- The last version assured a show/movie would have either a theme video or theme music as this was what the nfo format supported (which tvtunes reads and we create). However this was restrictive so a change has been made to include all theme media and with the help of the upcoming tvtunes version (5.0.2 onwards) it has options to prefer theme videos etc. - -version 1.0.13 -- The only difference to the previous version is a possible fix for theme music import with strange characters in the filename - -version 1.0.12 -- This version adds theme videos to the theme media sync, for people using theme videos you will need to run the option to "sync emby theme media to kodi" again - -version 1.0.10 -Important: -- We reverted the database detection method from previous version (1.0.09) back to a static method. This means we currently support Helix 14.2 (MyVideos90.db) and Isengard Beta 2 (MyVideos93.db). - Fix: -- Illegal characters in file name for Theme music (should now be finally fixed!) - Optimization: -- Season posters should now reflect instantly when changed in Emby -- Fanart backdrops should now reflect correctly when changed in Emby - give several seconds to see the new backdrop appear. -- Added Series poster as All Season poster. - you will need to reset - resync library for them to show for already established Series. -- Watched status should now be instant. Please test and report any issues. - -version 1.0.09 -- Important database detection change: We are trying something new. The famous error "can't find table id Path" or similar, when starting the sync happens in the event the add-on is unable to locate your Kodi database file. We are trying a more dynamic way of getting this information in order to eradicate this error. So keep your eyes peeled in the event the database is not found and report ASAP with logs as we are unsure of this change. It is really appreciated! -TV Tunes, Theme music: -- Fix for invalid characters in filename **Still needs work** -- Fix for special characters. -- Automatically set the custom path in your TV Tunes settings, so you don't have to (this allows TV Tunes to find your Emby themes). This means your themes will start working instantly. Make sure TV Tunes is enabled for the skin you are using! -- The feature should now be compatible with OpenElec. -- Video backdrops are coming soon! -Youtube trailers: -- Youtube Trailers should now work and be launched using the youtube add-on. However, it will require a reset - resync of your database. - -version 1.0.08 -- Attempt at supporting Isengard Beta 2 -- Caused error with special chars -- Disable audio-subs pref -- increase logging verbosity for WebSocket message errors -- with direct paths make sure a path was returned - -version 1.0.07 -- added stream language and subtitle language to stream details -- prevent errors on empty results -- Re-add connection message -- Support multiple theme songs - -version 1.0.06 -- bug fix - -version 1.0.05 -Fix: -- Error that was preventing the initial sync from running the first time around on fresh installs. -Highlight: -- As you know, using plugin paths, the Kodi's custom video settings were not sticking (audio and subtitles). We've implement an internal logic that will pick the correct setting automatically according to your Emby user preferences. It works with every type of playback: Direct Play, Direct Stream, Transcoding and Direct Paths. You will also find an additional option under the Playback tab in settings to "Always enable subtitles". With this change, there's no more need for Kodi's custom video settings - -version 1.0.04 -- Texture Cache now included in the addon. You can set an option to scan the images to the texture cache on setup or when new data is added. The Cinema Mode and User preferences mentioned in 1.0.02 are back. -- Please note that Cinema mode is not currently compatible with Kodi's option: Play next video automatically. - -version 1.0.02 -- This version now supports Cinema Mode/Intros and has the start of support for some user preferences. Currently the only setting is cinema mode, but we will extend this to support the audio language and maybe other settings in the future. - -version 1.0.1 -- In this version the netflix style next up functionality has been removed from the addon and placed into a separate addon. If you would still like to use this functionality install the nextup service from the beta repo -- also stable release - -version 1.0.0 -- Stable release - -version 0.1.94 -- Fix for TV episode info not showing when using direct paths on a password protected network - - -version 0.1.93 -- Bug fix for NON-direct path mode. Sorry - made a copy/paste error :( This will have caused missing music videos/TV shows, and bad syncing of states for music videos/TV Shows - -version 0.1.92 -- New option to point directly to files instead of going through the addon for playback. This will speed up playback on low end devices, and allow addons like TV Tunes to work - but at the cost of losing remote playback, transcoding support and parental control. Once turned on, you must reset your DB to use -- Fix for slow syncing after wakeup. However if using OpenElec I suggest a restart on wake as described here: http://openelec.tv/f...start=15#101953 (using kodi instead of xbmc though) -- Option to ignore specials in NextUp -- Added votes and taglines for movies -- Fix for 'Start from beginning' -- Getting REALLY close to our 1.0 releas - -version 0.1.9/91 -- Temporary removal of the service monitor, until it can be implemented without interfering with other functions. This means if you had trouble playing content because of it, it should be working again. -- Fix the delay in marking watched after content has just been watched in Kodi. This should now be reflected instantly. - -version 0.1.89 -- Alright guys, we are nearing a stable version. It is important you let us know if something is not working correctly for you! The best thing you can do to help us is to start from scratch and let us know if you experience any issues during the process, from start to finish! If everything goes well for everyone, we will release our first Stable version! -- If you were unable to get Direct playback when content had special character in the name, this should now be fixed (for real this time). -- We have been reviewing the playcount/watch status situation and tweak certain Kodi behaviors to allow for a perfect sync of your playstates between Emby and Kodi. This is a follow up to the previous update. The correct Emby playcount should display now. -- As usual, if something doesn't work as intended, please start a new thread and provide a log. Hopefully, you will not. *fingers crossed!* - -version 0.1.86 -- Clean up empty TV Shows when last episode deleted by web socket -- Add option to suppress successful connection message -- Fix 'offer delete' bug accidental introduced in previous release - -version 0.1.83/85 -- The playcount situation should be resolved. If you saw an item be marked as watched before the 90% this should be fixed. As well as rewatching an item, it should now stop marking it as unwatched. -- We now support Webclient remote control! It's so much fun - -version 0.1.82 -- The addon now respects Parental control for access schedules -- Adding a new series (that was never imported to Kodi during initial sync) and changing boxsets should be picked up instantly on event - -version 0.1.81 -- Officially fixes playback for files containing special characters. - -version 0.1.8 -We now support: -- split videos - if you do have them, please let us know if it works correctly. -- You can now send messages from the web client to Kodi -- Final touches for music support -- Added Genres and sets to video nodes and sublevels -Fixes: -- TV Shows: Recently added, in progress nodes are now filtered by the parent folder to show the appropriate content -- Fix for path containing special characters failing to playback -- Speed improvement when using Kodi Isengard -- EDIT: Emby for Kodi should behave if you also have tvtunes and videoextras addon enabled. - -version 0.1.6 -- fixes an error with playback - -version 0.1.5 -- Adds dialog to delete episodes for realz when hitting the delete key -- Adds option to offer delete when playback is >80% -- Fixes widget playback - -version 0.1.4 -New feature: -- Thanks to @marcelveldt we are now able to support your Emby music library. It is an optional feature. Please try it out and report back as it is experimental at this stage. You can find the option in your addon settings under Sync Options. After you have enabled the option, you will need to restart Kodi. -Noteworthy: -- New content is now added during playback instead of at the end of it. We thought "almost instant" was not good enough, we opted for instant instead! :) -- The playback went through a major rework. Local paths are now supported (this means you are no longer forced to use UNC path to view content available on the same device). We made the playback smart. Now, instead of failing on one method (let's say Direct play), it will try to play via Direct Stream and then Transcoding before giving you an error. It will still let you know if it failed to launch via direct play, so you are not in the dark. -Fix: -- video nodes not being created when you were switching Kodi profiles -- "Year" format for seasons are now supported -- Missing seasons (hopefully?...let us know!) -- Online server check offline even if server was online -- We forgot to mention that boxsets should be fixed, since the previous version. This should cover add/remove content from boxsets and displaying the correct boxset cover. -- Other minor fixes.... - -version 0.1.2/3 -- Playcount fix for unwatched count -- Video Nodes now working for multi-profiles -- Convenient listing for everything Emby when you launch the Emby for Kodi add-on (like it is with the Mediabrowser add-on). -- Add and Remove users from the viewing session (read below for everything you know about this feature!) - -version 0.1.0 -- Uses a new approach to interact with Kodi DB (Direct Access) You will need to do a Kodi DB reset for this new update -- In the Emby Addon settings under Advanced select "Reset Local Kodi DB" -- 10x+ speed improvement -- Sync reliability improvements -- Works fine on RaspPi 1 now -- Seamless switching from SMB to HTTP -- Box set art fixed -- Sync after resume fixed -- Disable coverart option added -- Transcoding options added -- Video nodes to mimic Emby nodes added -- Date created fix -- Native support for 'extra fanart' -- Dashboard viewing bug fixes -- 3D stream support -- New eTag server feature for stale data implemented -- 'Watched' fixes - -In short - if you tried this addon and thought it was slow/inaccurate, please give it another go. - -Reminder: You need to reset your DB for this release - but it is MUCH faster on initial sync, and follow up syncs are almost instant - -version 0.0.33 -- Playcount fix -- Playback report/resume fix. - - -version 0.0.31/32 -- Emby for Kodi can now support HTTPS fully. This works the same as it would in a browser. You can enable/disable Host certificate verification and if you use a custom ssl certificate for your Emby server, you also have the option to add a client-side ssl certificate(.pem). By default, only enabling the HTTPS option should work for most HTTPS connection (self-signed certificates). -- Minor fixes that affects the initial sync and sending a remote stream to Kodi. - - -version 0.0.30 -- New download system implemented - hopefully this will make the syncing process less lengthy. -- Special character fix for Emby username. - - -version 0.0.29 - - Fixed a lockup issue with sync when syncing episodes with no season - -version 0.0.28 -- Fix updates during playback -- Fix issues with deletes -- Images fixes - -version 0.0.27 -- Fix for images not showing up -- Fix for "NEW" Tv Shows being added with event driven triggers fixed -- Fix for episodes that have their season or episode index number change not being updated -- Force / in paths to fix a local path comparison issue -- Fix episode specials showing up in all seasons - -version 0.0.25 -- Fixed the option to reset your database, it should now work on every platform. You will be offered with the option to delete your database, followed by the option to erase your saved user information. Yay! -- HTTPS is now fully implemented. (custom certificate not yet accepted - server self signed should work) -- Custom settings for transcoding are now added in the add-on settings. -- Fixed Kodi "hanging" when shutting down. - - -version 0.0.24 -- This version has a major change: at startup, a full sync is performed, however after that ALL syncing is done using websocket messages from the server. This results in much faster updating, and way less CPU use -- ALSO - there is a big change to the "plot" support - so the first time you run it is going to take a while to get the plots all fixed up. - -version 0.0.23 -- changed the AutoPlay function a little bit. What it now does is popup in the last 20 seconds of play and gives the option to cancel viewing the next episode. This repeats at the end of each episode rather than how it was before where it added the full list of remaining episodes to play -- This version also adds back the 'play from HTTP' option. In a clean build it will ask you if you want to play from HTTP. Otherwise change it in settings and your database will slowly change over to point to the HTTP path - -version 0.0.22 -- added support for Kodi Isengard -- fixed ratings for all media types -- fixed episode thumbnails -- some code cleanup and small typos fixed -- Multiple profiles/users now working. You can setup each Kodi profile with a different Emby user. - -version 0.0.21 -- Now can use the "Easy Pin" option on local networks for signing in thanks to @Angelblue05 -- For those who like to binge watch series I added an option to "AutoPlay" the remaining episodes in a Season. This works with the "Ongoing Episodes/Next UP" widgets found on many home screens and attempts to sort of emulate the way it is done with Netflix. If you have enabled the option in the settings after playback has ended a dialog will popup asking if you want to carry on watching the remaining episodes...this dialog will auto time out after 10 seconds and if nothing is done proceeds to add the remaining episodes to a playlist which is then played - -version 0.0.16 -- More speedups! -- Fixed TV tags -- If you don't specify the type of a collection, we now assume it is movies (but you should specify..) - -version 0.0.15 -- New version has much, much, much faster initial import - plus support for TV collections - -version 0.0.14 -- some HUGE speed improvements with the initial sync time - -version 0.0.13 -- reduces the initial sync time for new installs but provides no additional changes for current users - -version 0.0.12 -- alter reset so that addon data dir is always removed when video db -- box set sync no longer an option -- solve encoding issue with playurl - -version 0.0.10 -- For those where the reset fails you now only need to delete the video database manually ""userdata/Database/MyVideo90.db"" (note version may differ if not running a Helix version of Kodi) - -version 0.0.9 -- This version changes a lot under the hood and needs a clean start to proceed. We have provided a reset option in the addon under advanced settings for this - -version 0.0.1 -- initital alpha version \ No newline at end of file diff --git a/context.py b/context.py index 15256491..3a89a817 100644 --- a/context.py +++ b/context.py @@ -13,11 +13,13 @@ import xbmcaddon __addon__ = xbmcaddon.Addon(id='plugin.video.emby') __base__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'resources', 'lib')).decode('utf-8') +__libraries__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'libraries')).decode('utf-8') __pcache__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('profile'), 'emby')).decode('utf-8') __cache__ = xbmc.translatePath('special://temp/emby').decode('utf-8') sys.path.insert(0, __cache__) sys.path.insert(0, __pcache__) +sys.path.insert(0, __libraries__) sys.path.append(__base__) ################################################################################################# diff --git a/context_play.py b/context_play.py index 660fb468..58ee93fb 100644 --- a/context_play.py +++ b/context_play.py @@ -13,11 +13,13 @@ import xbmcaddon __addon__ = xbmcaddon.Addon(id='plugin.video.emby') __base__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'resources', 'lib')).decode('utf-8') +__libraries__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'libraries')).decode('utf-8') __pcache__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('profile'), 'emby')).decode('utf-8') __cache__ = xbmc.translatePath('special://temp/emby').decode('utf-8') sys.path.insert(0, __cache__) sys.path.insert(0, __pcache__) +sys.path.insert(0, __libraries__) sys.path.append(__base__) ################################################################################################# diff --git a/default.py b/default.py index 0305ee81..6f1b05a0 100644 --- a/default.py +++ b/default.py @@ -13,11 +13,13 @@ import xbmcaddon __addon__ = xbmcaddon.Addon(id='plugin.video.emby') __base__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'resources', 'lib')).decode('utf-8') +__libraries__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'libraries')).decode('utf-8') __pcache__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('profile'), 'emby')).decode('utf-8') __cache__ = xbmc.translatePath('special://temp/emby').decode('utf-8') sys.path.insert(0, __cache__) sys.path.insert(0, __pcache__) +sys.path.insert(0, __libraries__) sys.path.append(__base__) ################################################################################################# diff --git a/resources/lib/libraries/requests/packages/urllib3/contrib/__init__.py b/libraries/__init__.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/contrib/__init__.py rename to libraries/__init__.py diff --git a/libraries/dateutil/LICENSE b/libraries/dateutil/LICENSE new file mode 100644 index 00000000..1e65815c --- /dev/null +++ b/libraries/dateutil/LICENSE @@ -0,0 +1,54 @@ +Copyright 2017- Paul Ganssle <paul@ganssle.io> +Copyright 2017- dateutil contributors (see AUTHORS file) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +The above license applies to all contributions after 2017-12-01, as well as +all contributions that have been re-licensed (see AUTHORS file for the list of +contributors who have re-licensed their code). +-------------------------------------------------------------------------------- +dateutil - Extensions to the standard Python datetime module. + +Copyright (c) 2003-2011 - Gustavo Niemeyer <gustavo@niemeyer.net> +Copyright (c) 2012-2014 - Tomi Pieviläinen <tomi.pievilainen@iki.fi> +Copyright (c) 2014-2016 - Yaron de Leeuw <me@jarondl.net> +Copyright (c) 2015- - Paul Ganssle <paul@ganssle.io> +Copyright (c) 2015- - dateutil contributors (see AUTHORS file) + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The above BSD License Applies to all code, even that also covered by Apache 2.0. \ No newline at end of file diff --git a/libraries/dateutil/NEWS b/libraries/dateutil/NEWS new file mode 100644 index 00000000..a30cdaab --- /dev/null +++ b/libraries/dateutil/NEWS @@ -0,0 +1,701 @@ +Version 2.7.3 (2018-05-09) +========================== + +Data updates +------------ + +- Update tzdata to 2018e. (gh pr #710) + + +Bugfixes +-------- + +- Fixed an issue where decimal.Decimal would cast `NaN` or infinite value in a + parser.parse, which will raise decimal.Decimal-specific errors. Reported and + fixed by @amureki (gh issue #662, gh pr #679). +- Fixed a ValueError being thrown if tzinfos call explicity returns ``None``. + Reported by @pganssle (gh issue #661) Fixed by @parsethis (gh pr #681) +- Fixed incorrect parsing of certain dates earlier than 100 AD when repesented + in the form "%B.%Y.%d", e.g. "December.0031.30". (gh issue #687, pr #700) +- Fixed a bug where automatically generated DTSTART was naive even if a + specified UNTIL had a time zone. Automatically generated DTSTART will now + take on the timezone of an UNTIL date, if provided. Reported by @href (gh + issue #652). Fixed by @absreim (gh pr #693). + + +Documentation changes +--------------------- + +- Corrected link syntax and updated URL to https for ISO year week number + notation in relativedelta examples. (gh issue #670, pr #711) +- Add doctest examples to tzfile documentation. Done by @weatherpattern and + @pganssle (gh pr #671) +- Updated the documentation for relativedelta. Removed references to tuple + arguments for weekday, explained effect of weekday(_, 1) and better explained + the order of operations that relativedelta applies. Fixed by @kvn219 + @huangy22 and @ElliotJH (gh pr #673) +- Added changelog to documentation. (gh issue #692, gh pr #707) +- Changed order of keywords in rrule docstring. Reported and fixed by + @rmahajan14 (gh issue #686, gh pr #695). +- Added documentation for ``dateutil.tz.gettz``. Reported by @pganssle (gh + issue #647). Fixed by @weatherpattern (gh pr #704) +- Cleaned up malformed RST in the ``tz`` documentation. (gh issue #702, gh pr + #706) +- Changed the default theme to sphinx_rtd_theme, and changed the sphinx + configuration to go along with that. (gh pr #707) +- Reorganized ``dateutil.tz`` documentation and fixed issue with the + ``dateutil.tz`` docstring. (gh pr #714) + + +Misc +---- + +- GH #674, GH #688, GH #699 + + +Version 2.7.2 (2018-03-26) +========================== + +Bugfixes +-------- + +- Fixed an issue with the setup script running in non-UTF-8 environment. + Reported and fixed by @gergondet (gh pr #651) + + +Misc +---- + +- GH #655 + + +Version 2.7.1 (2018-03-24) +=========================== + +Data updates +------------ + +- Updated tzdata version to 2018d. + + +Bugfixes +-------- + +- Fixed issue where parser.parse would occasionally raise + decimal.Decimal-specific error types rather than ValueError. Reported by + @amureki (gh issue #632). Fixed by @pganssle (gh pr #636). +- Improve error message when rrule's dtstart and until are not both naive or + both aware. Reported and fixed by @ryanpetrello (gh issue #633, gh pr #634) + + +Misc +---- + +- GH #644, GH #648 + + +Version 2.7.0 +============= +- Dropped support for Python 2.6 (gh pr #362 by @jdufresne) +- Dropped support for Python 3.2 (gh pr #626) +- Updated zoneinfo file to 2018c (gh pr #616) +- Changed licensing scheme so all new contributions are dual licensed under + Apache 2.0 and BSD. (gh pr #542, issue #496) +- Added __all__ variable to the root package. Reported by @tebriel + (gh issue #406), fixed by @mariocj89 (gh pr #494) +- Added python_requires to setup.py so that pip will distribute the right + version of dateutil. Fixed by @jakec-github (gh issue #537, pr #552) +- Added the utils submodule, for miscellaneous utilities. +- Added within_delta function to utils - added by @justanr (gh issue #432, + gh pr #437) +- Added today function to utils (gh pr #474) +- Added default_tzinfo function to utils (gh pr #475), solving an issue + reported by @nealmcb (gh issue #94) +- Added dedicated ISO 8601 parsing function isoparse (gh issue #424). + Initial implementation by @pganssle in gh pr #489 and #622, with a + pre-release fix by @kirit93 (gh issue #546, gh pr #573). +- Moved parser module into parser/_parser.py and officially deprecated the use + of several private functions and classes from that module. (gh pr #501, #515) +- Tweaked parser error message to include rejected string format, added by + @pbiering (gh pr #300) +- Add support for parsing bytesarray, reported by @uckelman (gh issue #417) and + fixed by @uckelman and @pganssle (gh pr #514) +- Started raising a warning when the parser finds a timezone string that it + cannot construct a tzinfo instance for (rather than succeeding with no + indication of an error). Reported and fixed by @jbrockmendel (gh pr #540) +- Dropped the use of assert in the parser. Fixed by @jbrockmendel (gh pr #502) +- Fixed to assertion logic in parser to support dates like '2015-15-May', + reported and fixed by @jbrockmendel (gh pr #409) +- Fixed IndexError in parser on dates with trailing colons, reported and fixed + by @jbrockmendel (gh pr #420) +- Fixed bug where hours were not validated, leading to improper parse. Reported + by @heappro (gh pr #353), fixed by @jbrockmendel (gh pr #482) +- Fixed problem parsing strings in %b-%Y-%d format. Reported and fixed by + @jbrockmendel (gh pr #481) +- Fixed problem parsing strings in the %d%B%y format. Reported by @asishm + (gh issue #360), fixed by @jbrockmendel (gh pr #483) +- Fixed problem parsing certain unambiguous strings when year <99 (gh pr #510). + Reported by @alexwlchan (gh issue #293). +- Fixed issue with parsing an unambiguous string representation of an ambiguous + datetime such that if possible the correct value for fold is set. Fixes + issue reported by @JordonPhillips and @pganssle (gh issue #318, #320, + gh pr #517) +- Fixed issue with improper rounding of fractional components. Reported by + @dddmello (gh issue #427), fixed by @m-dz (gh pr #570) +- Performance improvement to parser from removing certain min() calls. Reported + and fixed by @jbrockmendel (gh pr #589) +- Significantly refactored parser code by @jbrockmendel (gh prs #419, #436, + #490, #498, #539) and @pganssle (gh prs #435, #468) +- Implementated of __hash__ for relativedelta and weekday, reported and fixed + by @mrigor (gh pr #389) +- Implemented __abs__ for relativedelta. Reported by @binnisb and @pferreir + (gh issue #350, pr #472) +- Fixed relativedelta.weeks property getter and setter to work for both + negative and positive values. Reported and fixed by @souliane (gh issue #459, + pr #460) +- Fixed issue where passing whole number floats to the months or years + arguments of the relativedelta constructor would lead to errors during + addition. Reported by @arouanet (gh pr #411), fixed by @lkollar (gh pr #553) +- Added a pre-built tz.UTC object representing UTC (gh pr #497) +- Added a cache to tz.gettz so that by default it will return the same object + for identical inputs. This will change the semantics of certain operations + between datetimes constructed with tzinfo=tz.gettz(...). (gh pr #628) +- Changed the behavior of tz.tzutc to return a singleton (gh pr #497, #504) +- Changed the behavior of tz.tzoffset to return the same object when passed the + same inputs, with a corresponding performance improvement (gh pr #504) +- Changed the behavior of tz.tzstr to return the same object when passed the + same inputs. (gh pr #628) +- Added .instance alternate constructors for tz.tzoffset and tz.tzstr, to + allow the construction of a new instance if desired. (gh pr #628) +- Added the tz.gettz.nocache function to allow explicit retrieval of a new + instance of the relevant tzinfo. (gh pr #628) +- Expand definition of tz.tzlocal equality so that the local zone is allow + equality with tzoffset and tzutc. (gh pr #598) +- Deprecated the idiosyncratic tzstr format mentioned in several examples but + evidently designed exclusively for dateutil, and very likely not used by + any current users. (gh issue #595, gh pr #606) +- Added the tz.resolve_imaginary function, which generates a real date from + an imaginary one, if necessary. Implemented by @Cheukting (gh issue #339, + gh pr #607) +- Fixed issue where the tz.tzstr constructor would erroneously succeed if + passed an invalid value for tzstr. Fixed by @pablogsal (gh issue #259, + gh pr #581) +- Fixed issue with tz.gettz for TZ variables that start with a colon. Reported + and fixed by @lapointexavier (gh pr #601) +- Added a lock to tz.tzical's cache. Reported and fixed by @Unrud (gh pr #430) +- Fixed an issue with fold support on certain Python 3 implementations that + used the pre-3.6 pure Python implementation of datetime.replace, most + notably pypy3 (gh pr #446). +- Added support for VALUE=DATE-TIME for DTSTART in rrulestr. Reported by @potuz + (gh issue #401) and fixed by @Unrud (gh pr #429) +- Started enforcing that within VTIMEZONE, the VALUE parameter can only be + omitted or DATE-TIME, per RFC 5545. Reported by @Unrud (gh pr #439) +- Added support for TZID parameter for DTSTART in rrulestr. Reported and + fixed by @ryanpetrello (gh issue #614, gh pr #624) +- Added 'RRULE:' prefix to rrule strings generated by rrule.__str__, in + compliance with the RFC. Reported by @AndrewPashkin (gh issue #86), fixed by + @jarondl and @mlorant (gh pr #450) +- Switched to setuptools_scm for version management, automatically calculating + a version number from the git metadata. Reported by @jreback (gh issue #511), + implemented by @Sulley38 (gh pr #564) +- Switched setup.py to use find_packages, and started testing against pip + installed versions of dateutil in CI. Fixed issue with parser import + discovered by @jreback in pandas-dev/pandas#18141. (gh issue #507, pr #509) +- Switched test suite to using pytest (gh pr #495) +- Switched CI over to use tox. Fixed by @gaborbernat (gh pr #549) +- Added a test-only dependency on freezegun. (gh pr #474) +- Reduced number of CI builds on Appveyor. Fixed by @kirit93 (gh issue #529, + gh pr #579) +- Made xfails strict by default, so that an xpass is a failure. (gh pr #567) +- Added a documentation generation stage to tox and CI. (gh pr #568) +- Added an explicit warning when running python setup.py explaining how to run + the test suites with pytest. Fixed by @lkollar. (gh issue #544, gh pr #548) +- Added requirements-dev.txt for test dependency management (gh pr #499, #516) +- Fixed code coverage metrics to account for Windows builds (gh pr #526) +- Fixed code coverage metrics to NOT count xfails. Fixed by @gaborbernat + (gh issue #519, gh pr #563) +- Style improvement to zoneinfo.tzfile that was confusing to static type + checkers. Reported and fixed by @quodlibetor (gh pr #485) +- Several unused imports were removed by @jdufresne. (gh pr #486) +- Switched ``isinstance(*, collections.Callable)`` to callable, which is available + on all supported Python versions. Implemented by @jdufresne (gh pr #612) +- Added CONTRIBUTING.md (gh pr #533) +- Added AUTHORS.md (gh pr #542) +- Corrected setup.py metadata to reflect author vs. maintainer, (gh issue #477, + gh pr #538) +- Corrected README to reflect that tests are now run in pytest. Reported and + fixed by @m-dz (gh issue #556, gh pr #557) +- Updated all references to RFC 2445 (iCalendar) to point to RFC 5545. Fixed + by @mariocj89 (gh issue #543, gh pr #555) +- Corrected parse documentation to reflect proper integer offset units, + reported and fixed by @abrugh (gh pr #458) +- Fixed dangling parenthesis in tzoffset documentation (gh pr #461) +- Started including the license file in wheels. Reported and fixed by + @jdufresne (gh pr #476) +- Indendation fixes to parser docstring by @jbrockmendel (gh pr #492) +- Moved many examples from the "examples" documentation into their appropriate + module documentation pages. Fixed by @Tomasz-Kluczkowski and @jakec-github + (gh pr #558, #561) +- Fixed documentation so that the parser.isoparse documentation displays. + Fixed by @alexchamberlain (gh issue #545, gh pr #560) +- Refactored build and release sections and added setup instructions to + CONTRIBUTING. Reported and fixed by @kynan (gh pr #562) +- Cleaned up various dead links in the documentation. (gh pr #602, #608, #618) + +Version 2.6.1 +============= +- Updated zoneinfo file to 2017b. (gh pr #395) +- Added Python 3.6 to CI testing (gh pr #365) +- Removed duplicate test name that was preventing a test from being run. + Reported and fixed by @jdufresne (gh pr #371) +- Fixed testing of folds and gaps, particularly on Windows (gh pr #392) +- Fixed deprecated escape characters in regular expressions. Reported by + @nascheme and @thierryba (gh issue #361), fixed by @thierryba (gh pr #358) +- Many PEP8 style violations and other code smells were fixed by @jdufresne + (gh prs #358, #363, #364, #366, #367, #368, #372, #374, #379, #380, #398) +- Improved performance of tzutc and tzoffset objects. (gh pr #391) +- Fixed issue with several time zone classes around DST transitions in any + zones with +0 standard offset (e.g. Europe/London) (gh issue #321, pr #390) +- Fixed issue with fuzzy parsing where tokens similar to AM/PM that are in the + end skipped were dropped in the fuzzy_with_tokens list. Reported and fixed + by @jbrockmendel (gh pr #332). +- Fixed issue with parsing dates of the form X m YY. Reported by @jbrockmendel. + (gh issue #333, pr #393) +- Added support for parser weekdays with less than 3 characters. Reported by + @arcadefoam (gh issue #343), fixed by @jonemo (gh pr #382) +- Fixed issue with the addition and subtraction of certain relativedeltas. + Reported and fixed by @kootenpv (gh issue #346, pr #347) +- Fixed issue where the COUNT parameter of rrules was ignored if 0. Fixed by + @mshenfield (gh pr #330), reported by @vaultah (gh issue #329). +- Updated documentation to include the new tz methods. (gh pr #324) +- Update documentation to reflect that the parser can raise TypeError, reported + and fixed by @tomchuk (gh issue #336, pr #337) +- Fixed an incorrect year in a parser doctest. Fixed by @xlotlu (gh pr #357) +- Moved version information into _version.py and set up the versions more + granularly. + +Version 2.6.0 +============= +- Added PEP-495-compatible methods to address ambiguous and imaginary dates in + time zones in a backwards-compatible way. Ambiguous dates and times can now + be safely represented by all dateutil time zones. Many thanks to Alexander + Belopolski (@abalkin) and Tim Peters @tim-one for their inputs on how to + address this. Original issues reported by Yupeng and @zed (lP: 1390262, + gh issues #57, #112, #249, #284, #286, prs #127, #225, #248, #264, #302). +- Added new methods for working with ambiguous and imaginary dates to the tz + module. datetime_ambiguous() determines if a datetime is ambiguous for a given + zone and datetime_exists() determines if a datetime exists in a given zone. + This works for all fold-aware datetimes, not just those provided by dateutil. + (gh issue #253, gh pr #302) +- Fixed an issue where dst() in Portugal in 1996 was returning the wrong value + in tz.tzfile objects. Reported by @abalkin (gh issue #128, pr #225) +- Fixed an issue where zoneinfo.ZoneInfoFile errors were not being properly + deep-copied. (gh issue #226, pr #225) +- Refactored tzwin and tzrange as a subclass of a common class, tzrangebase, as + there was substantial overlapping functionality. As part of this change, + tzrange and tzstr now expose a transitions() function, which returns the + DST on and off transitions for a given year. (gh issue #260, pr #302) +- Deprecated zoneinfo.gettz() due to confusion with tz.gettz(), in favor of + get() method of zoneinfo.ZoneInfoFile objects. (gh issue #11, pr #310) +- For non-character, non-stream arguments, parser.parse now raises TypeError + instead of AttributeError. (gh issues #171, #269, pr #247) +- Fixed an issue where tzfile objects were not properly handling dst() and + tzname() when attached to datetime.time objects. Reported by @ovacephaloid. + (gh issue #292, pr #309) +- /usr/share/lib/zoneinfo was added to TZPATHS for compatibility with Solaris + systems. Reported by @dhduvall (gh issue #276, pr #307) +- tzoffset and tzrange objects now accept either a number of seconds or a + datetime.timedelta() object wherever previously only a number of seconds was + allowed. (gh pr #264, #277) +- datetime.timedelta objects can now be added to relativedelta objects. Reported + and added by Alec Nikolas Reiter (@justanr) (gh issue #282, pr #283 +- Refactored relativedelta.weekday and rrule.weekday into a common base class + to reduce code duplication. (gh issue #140, pr #311) +- An issue where the WKST parameter was improperly rendering in str(rrule) was + reported and fixed by Daniel LePage (@dplepage). (gh issue #262, pr #263) +- A replace() method has been added to rrule objects by @jendas1, which creates + new rrule with modified attributes, analogous to datetime.replace (gh pr #167) +- Made some significant performance improvements to rrule objects in Python 2.x + (gh pr #245) +- All classes defining equality functions now return NotImplemented when + compared to unsupported classes, rather than raising TypeError, to allow other + classes to provide fallback support. (gh pr #236) +- Several classes have been marked as explicitly unhashable to maintain + identical behavior between Python 2 and 3. Submitted by Roy Williams + (@rowillia) (gh pr #296) +- Trailing whitespace in easter.py has been removed. Submitted by @OmgImAlexis + (gh pr #299) +- Windows-only batch files in build scripts had line endings switched to CRLF. + (gh pr #237) +- @adamchainz updated the documentation links to reflect that the canonical + location for readthedocs links is now at .io, not .org. (gh pr #272) +- Made some changes to the CI and codecov to test against newer versions of + Python and pypy, and to adjust the code coverage requirements. For the moment, + full pypy3 compatibility is not supported until a new release is available, + due to upstream bugs in the old version affecting PEP-495 support. + (gh prs #265, #266, #304, #308) +- The full PGP signing key fingerprint was added to the README.md in favor of + the previously used long-id. Reported by @valholl (gh issue #287, pr #304) +- Updated zoneinfo to 2016i. (gh issue #298, gh pr #306) + + +Version 2.5.3 +============= +- Updated zoneinfo to 2016d +- Fixed parser bug where unambiguous datetimes fail to parse when dayfirst is + set to true. (gh issue #233, pr #234) +- Bug in zoneinfo file on platforms such as Google App Engine which do not + do not allow importing of subprocess.check_call was reported and fixed by + @savraj (gh issue #239, gh pr #240) +- Fixed incorrect version in documentation (gh issue #235, pr #243) + +Version 2.5.2 +============= +- Updated zoneinfo to 2016c +- Fixed parser bug where yearfirst and dayfirst parameters were not being + respected when no separator was present. (gh issue #81 and #217, pr #229) + +Version 2.5.1 +============= +- Updated zoneinfo to 2016b +- Changed MANIFEST.in to explicitly include test suite in source distributions, + with help from @koobs (gh issue #193, pr #194, #201, #221) +- Explicitly set all line-endings to LF, except for the NEWS file, on a + per-repository basis (gh pr #218) +- Fixed an issue with improper caching behavior in rruleset objects (gh issue + #104, pr #207) +- Changed to an explicit error when rrulestr strings contain a missing BYDAY + (gh issue #162, pr #211) +- tzfile now correctly handles files containing leapcnt (although the leapcnt + information is not actually used). Contributed by @hjoukl (gh issue #146, pr + #147) +- Fixed recursive import issue with tz module (gh pr #204) +- Added compatibility between tzwin objects and datetime.time objects (gh issue + #216, gh pr #219) +- Refactored monolithic test suite by module (gh issue #61, pr #200 and #206) +- Improved test coverage in the relativedelta module (gh pr #215) +- Adjusted documentation to reflect possibly counter-intuitive properties of + RFC-5545-compliant rrules, and other documentation improvements in the rrule + module (gh issue #105, gh issue #149 - pointer to the solution by @phep, + pr #213). + + +Version 2.5.0 +============= +- Updated zoneinfo to 2016a +- zoneinfo_metadata file version increased to 2.0 - the updated updatezinfo.py + script will work with older zoneinfo_metadata.json files, but new metadata + files will not work with older updatezinfo.py versions. Additionally, we have + started hosting our own mirror of the Olson databases on a github pages + site (https://dateutil.github.io/tzdata/) (gh pr #183) +- dateutil zoneinfo tarballs now contain the full zoneinfo_metadata file used + to generate them. (gh issue #27, gh pr #85) +- relativedelta can now be safely subclassed without derived objects reverting + to base relativedelta objects as a result of arithmetic operations. + (lp:1010199, gh issue #44, pr #49) +- relativedelta 'weeks' parameter can now be set and retrieved as a property of + relativedelta instances. (lp: 727525, gh issue #45, pr #49) +- relativedelta now explicitly supports fractional relative weeks, days, hours, + minutes and seconds. Fractional values in absolute parameters (year, day, etc) + are now deprecated. (gh issue #40, pr #190) +- relativedelta objects previously did not use microseconds to determine of two + relativedelta objects were equal. This oversight has been corrected. + Contributed by @elprans (gh pr #113) +- rrule now has an xafter() method for retrieving multiple recurrences after a + specified date. (gh pr #38) +- str(rrule) now returns an RFC2445-compliant rrule string, contributed by + @schinckel and @armicron (lp:1406305, gh issue #47, prs #50, #62 and #160) +- rrule performance under certain conditions has been significantly improved + thanks to a patch contributed by @dekoza, based on an article by Brian Beck + (@exogen) (gh pr #136) +- The use of both the 'until' and 'count' parameters is now deprecated as + inconsistent with RFC2445 (gh pr #62, #185) +- Parsing an empty string will now raise a ValueError, rather than returning the + datetime passed to the 'default' parameter. (gh issue #78, pr #187) +- tzwinlocal objects now have a meaningful repr() and str() implementation + (gh issue #148, prs #184 and #186) +- Added equality logic for tzwin and tzwinlocal objects. (gh issue #151, + pr #180, #184) +- Added some flexibility in subclassing timelex, and switched the default + behavior over to using string methods rather than comparing against a fixed + list. (gh pr #122, #139) +- An issue causing tzstr() to crash on Python 2.x was fixed. (lp: 1331576, + gh issue #51, pr #55) +- An issue with string encoding causing exceptions under certain circumstances + when tzname() is called was fixed. (gh issue #60, #74, pr #75) +- Parser issue where calling parse() on dates with no day specified when the + day of the month in the default datetime (which is "today" if unspecified) is + greater than the number of days in the parsed month was fixed (this issue + tended to crop up between the 29th and 31st of the month, for obvious reasons) + (canonical gh issue #25, pr #30, #191) +- Fixed parser issue causing fuzzy_with_tokens to raise an unexpected exception + in certain circumstances. Contributed by @MichaelAquilina (gh pr #91) +- Fixed parser issue where years > 100 AD were incorrectly parsed. Contributed + by @Bachmann1234 (gh pr #130) +- Fixed parser issue where commas were not a valid separator between seconds + and microseconds, preventing parsing of ISO 8601 dates. Contributed by + @ryanss (gh issue #28, pr #106) +- Fixed issue with tzwin encoding in locales with non-Latin alphabets + (gh issue #92, pr #98) +- Fixed an issue where tzwin was not being properly imported on Windows. + Contributed by @labrys. (gh pr #134) +- Fixed a problem causing issues importing zoneinfo in certain circumstances. + Issue and solution contributed by @alexxv (gh issue #97, pr #99) +- Fixed an issue where dateutil timezones were not compatible with basic time + objects. One of many, many timezone related issues contributed and tested by + @labrys. (gh issue #132, pr #181) +- Fixed issue where tzwinlocal had an invalid utcoffset. (gh issue #135, + pr #141, #142) +- Fixed issue with tzwin and tzwinlocal where DST transitions were incorrectly + parsed from the registry. (gh issue #143, pr #178) +- updatezinfo.py no longer suppresses certain OSErrors. Contributed by @bjamesv + (gh pr #164) +- An issue that arose when timezone locale changes during runtime has been + fixed by @carlosxl and @mjschultz (gh issue #100, prs #107, #109) +- Python 3.5 was added to the supported platforms in the metadata (@tacaswell + gh pr #159) and the test suites (@moreati gh pr #117). +- An issue with tox failing without unittest2 installed in Python 2.6 was fixed + by @moreati (gh pr #115) +- Several deprecated functions were replaced in the tests by @moreati + (gh pr #116) +- Improved the logic in Travis and Appveyor to alleviate issues where builds + were failing due to connection issues when downloading the IANA timezone + files. In addition to adding our own mirror for the files (gh pr #183), the + download is now retried a number of times (with a delay) (gh pr #177) +- Many failing doctests were fixed by @moreati. (gh pr #120) +- Many fixes to the documentation (gh pr #103, gh pr #87 from @radarhere, + gh pr #154 from @gpoesia, gh pr #156 from @awsum, gh pr #168 from @ja8zyjits) +- Added a code coverage tool to the CI to help improve the library. (gh pr #182) +- We now have a mailing list - dateutil@python.org, graciously hosted by + Python.org. + + +Version 2.4.2 +============= +- Updated zoneinfo to 2015b. +- Fixed issue with parsing of tzstr on Python 2.7.x; tzstr will now be decoded + if not a unicode type. gh #51 (lp:1331576), gh pr #55. +- Fix a parser issue where AM and PM tokens were showing up in fuzzy date + stamps, triggering inappropriate errors. gh #56 (lp: 1428895), gh pr #63. +- Missing function "setcachesize" removed from zoneinfo __all__ list by @ryanss, + fixing an issue with wildcard imports of dateutil.zoneinfo. (gh pr #66). +- (PyPI only) Fix an issue with source distributions not including the test + suite. + + +Version 2.4.1 +============= + +- Added explicit check for valid hours if AM/PM is specified in parser. + (gh pr #22, issue #21) +- Fix bug in rrule introduced in 2.4.0 where byweekday parameter was not + handled properly. (gh pr #35, issue #34) +- Fix error where parser allowed some invalid dates, overwriting existing hours + with the last 2-digit number in the string. (gh pr #32, issue #31) +- Fix and add test for Python 2.x compatibility with boolean checking of + relativedelta objects. Implemented by @nimasmi (gh pr #43) and Cédric Krier + (lp: 1035038) +- Replaced parse() calls with explicit datetime objects in unit tests unrelated + to parser. (gh pr #36) +- Changed private _byxxx from sets to sorted tuples and fixed one currently + unreachable bug in _construct_byset. (gh pr #54) +- Additional documentation for parser (gh pr #29, #33, #41) and rrule. +- Formatting fixes to documentation of rrule and README.rst. +- Updated zoneinfo to 2015a. + +Version 2.4.0 +============= + +- Fix an issue with relativedelta and freezegun (lp:1374022) +- Fix tzinfo in windows for timezones without dst (lp:1010050, gh #2) +- Ignore missing timezones in windows like in POSIX +- Fix minimal version requirement for six (gh #6) +- Many rrule changes and fixes by @pganssle (gh pull requests #13 #14 #17), + including defusing some infinite loops (gh #4) + +Version 2.3 +=========== + +- Cleanup directory structure, moved test.py to dateutil/tests/test.py + +- Changed many aspects of dealing with the zone info file. Instead of a cache, + all the zones are loaded to memory, but symbolic links are loaded only once, + so not much memory is used. + +- The package is now zip-safe, and universal-wheelable, thanks to changes in + the handling of the zoneinfo file. + +- Fixed tzwin silently not imported on windows python2 + +- New maintainer, together with new hosting: GitHub, Travis, Read-The-Docs + +Version 2.2 +=========== + +- Updated zoneinfo to 2013h + +- fuzzy_with_tokens parse addon from Christopher Corley + +- Bug with LANG=C fixed by Mike Gilbert + +Version 2.1 +=========== + +- New maintainer + +- Dateutil now works on Python 2.6, 2.7 and 3.2 from same codebase (with six) + +- #704047: Ismael Carnales' patch for a new time format + +- Small bug fixes, thanks for reporters! + + +Version 2.0 +=========== + +- Ported to Python 3, by Brian Jones. If you need dateutil for Python 2.X, + please continue using the 1.X series. + +- There's no such thing as a "PSF License". This source code is now + made available under the Simplified BSD license. See LICENSE for + details. + +Version 1.5 +=========== + +- As reported by Mathieu Bridon, rrules were matching the bysecond rules + incorrectly against byminute in some circumstances when the SECONDLY + frequency was in use, due to a copy & paste bug. The problem has been + unittested and corrected. + +- Adam Ryan reported a problem in the relativedelta implementation which + affected the yearday parameter in the month of January specifically. + This has been unittested and fixed. + +- Updated timezone information. + + +Version 1.4.1 +============= + +- Updated timezone information. + + +Version 1.4 +=========== + +- Fixed another parser precision problem on conversion of decimal seconds + to microseconds, as reported by Erik Brown. Now these issues are gone + for real since it's not using floating point arithmetic anymore. + +- Fixed case where tzrange.utcoffset and tzrange.dst() might fail due + to a date being used where a datetime was expected (reported and fixed + by Lennart Regebro). + +- Prevent tzstr from introducing daylight timings in strings that didn't + specify them (reported by Lennart Regebro). + +- Calls like gettz("GMT+3") and gettz("UTC-2") will now return the + expected values, instead of the TZ variable behavior. + +- Fixed DST signal handling in zoneinfo files. Reported by + Nicholas F. Fabry and John-Mark Gurney. + + +Version 1.3 +=========== + +- Fixed precision problem on conversion of decimal seconds to + microseconds, as reported by Skip Montanaro. + +- Fixed bug in constructor of parser, and converted parser classes to + new-style classes. Original report and patch by Michael Elsdörfer. + +- Initialize tzid and comps in tz.py, to prevent the code from ever + raising a NameError (even with broken files). Johan Dahlin suggested + the fix after a pyflakes run. + +- Version is now published in dateutil.__version__, as requested + by Darren Dale. + +- All code is compatible with new-style division. + + +Version 1.2 +=========== + +- Now tzfile will round timezones to full-minutes if necessary, + since Python's datetime doesn't support sub-minute offsets. + Thanks to Ilpo Nyyssönen for reporting the issue. + +- Removed bare string exceptions, as reported and fixed by + Wilfredo Sánchez Vega. + +- Fix bug in leap count parsing (reported and fixed by Eugene Oden). + + +Version 1.1 +=========== + +- Fixed rrule byyearday handling. Abramo Bagnara pointed out that + RFC2445 allows negative numbers. + +- Fixed --prefix handling in setup.py (by Sidnei da Silva). + +- Now tz.gettz() returns a tzlocal instance when not given any + arguments and no other timezone information is found. + +- Updating timezone information to version 2005q. + + +Version 1.0 +=========== + +- Fixed parsing of XXhXXm formatted time after day/month/year + has been parsed. + +- Added patch by Jeffrey Harris optimizing rrule.__contains__. + + +Version 0.9 +=========== + +- Fixed pickling of timezone types, as reported by + Andreas Köhler. + +- Implemented internal timezone information with binary + timezone files. datautil.tz.gettz() function will now + try to use the system timezone files, and fallback to + the internal versions. It's also possible to ask for + the internal versions directly by using + dateutil.zoneinfo.gettz(). + +- New tzwin timezone type, allowing access to Windows + internal timezones (contributed by Jeffrey Harris). + +- Fixed parsing of unicode date strings. + +- Accept parserinfo instances as the parser constructor + parameter, besides parserinfo (sub)classes. + +- Changed weekday to spell the not-set n value as None + instead of 0. + +- Fixed other reported bugs. + + +Version 0.5 +=========== + +- Removed ``FREQ_`` prefix from rrule frequency constants + WARNING: this breaks compatibility with previous versions. + +- Fixed rrule.between() for cases where "after" is achieved + before even starting, as reported by Andreas Köhler. + +- Fixed two digit zero-year parsing (such as 31-Dec-00), as + reported by Jim Abramson, and included test case for this. + +- Sort exdate and rdate before iterating over them, so that + it's not necessary to sort them before adding to the rruleset, + as reported by Nicholas Piper. diff --git a/libraries/dateutil/README.rst b/libraries/dateutil/README.rst new file mode 100644 index 00000000..7a37552e --- /dev/null +++ b/libraries/dateutil/README.rst @@ -0,0 +1,158 @@ +dateutil - powerful extensions to datetime +========================================== + +|pypi| |support| |licence| + +|gitter| |readthedocs| + +|travis| |appveyor| |coverage| + +.. |pypi| image:: https://img.shields.io/pypi/v/python-dateutil.svg?style=flat-square + :target: https://pypi.org/project/python-dateutil/ + :alt: pypi version + +.. |support| image:: https://img.shields.io/pypi/pyversions/python-dateutil.svg?style=flat-square + :target: https://pypi.org/project/python-dateutil/ + :alt: supported Python version + +.. |travis| image:: https://img.shields.io/travis/dateutil/dateutil/master.svg?style=flat-square&label=Travis%20Build + :target: https://travis-ci.org/dateutil/dateutil + :alt: travis build status + +.. |appveyor| image:: https://img.shields.io/appveyor/ci/dateutil/dateutil/master.svg?style=flat-square&logo=appveyor + :target: https://ci.appveyor.com/project/dateutil/dateutil + :alt: appveyor build status + +.. |coverage| image:: https://codecov.io/github/dateutil/dateutil/coverage.svg?branch=master + :target: https://codecov.io/github/dateutil/dateutil?branch=master + :alt: Code coverage + +.. |gitter| image:: https://badges.gitter.im/dateutil/dateutil.svg + :alt: Join the chat at https://gitter.im/dateutil/dateutil + :target: https://gitter.im/dateutil/dateutil + +.. |licence| image:: https://img.shields.io/pypi/l/python-dateutil.svg?style=flat-square + :target: https://pypi.org/project/python-dateutil/ + :alt: licence + +.. |readthedocs| image:: https://img.shields.io/readthedocs/dateutil/latest.svg?style=flat-square&label=Read%20the%20Docs + :alt: Read the documentation at https://dateutil.readthedocs.io/en/latest/ + :target: https://dateutil.readthedocs.io/en/latest/ + +The `dateutil` module provides powerful extensions to +the standard `datetime` module, available in Python. + + +Download +======== +dateutil is available on PyPI +https://pypi.org/project/python-dateutil/ + +The documentation is hosted at: +https://dateutil.readthedocs.io/en/stable/ + +Code +==== +The code and issue tracker are hosted on Github: +https://github.com/dateutil/dateutil/ + +Features +======== + +* Computing of relative deltas (next month, next year, + next monday, last week of month, etc); +* Computing of relative deltas between two given + date and/or datetime objects; +* Computing of dates based on very flexible recurrence rules, + using a superset of the `iCalendar <https://www.ietf.org/rfc/rfc2445.txt>`_ + specification. Parsing of RFC strings is supported as well. +* Generic parsing of dates in almost any string format; +* Timezone (tzinfo) implementations for tzfile(5) format + files (/etc/localtime, /usr/share/zoneinfo, etc), TZ + environment string (in all known formats), iCalendar + format files, given ranges (with help from relative deltas), + local machine timezone, fixed offset timezone, UTC timezone, + and Windows registry-based time zones. +* Internal up-to-date world timezone information based on + Olson's database. +* Computing of Easter Sunday dates for any given year, + using Western, Orthodox or Julian algorithms; +* A comprehensive test suite. + +Quick example +============= +Here's a snapshot, just to give an idea about the power of the +package. For more examples, look at the documentation. + +Suppose you want to know how much time is left, in +years/months/days/etc, before the next easter happening on a +year with a Friday 13th in August, and you want to get today's +date out of the "date" unix system command. Here is the code: + +.. doctest:: readmeexample + + >>> from dateutil.relativedelta import * + >>> from dateutil.easter import * + >>> from dateutil.rrule import * + >>> from dateutil.parser import * + >>> from datetime import * + >>> now = parse("Sat Oct 11 17:13:46 UTC 2003") + >>> today = now.date() + >>> year = rrule(YEARLY,dtstart=now,bymonth=8,bymonthday=13,byweekday=FR)[0].year + >>> rdelta = relativedelta(easter(year), today) + >>> print("Today is: %s" % today) + Today is: 2003-10-11 + >>> print("Year with next Aug 13th on a Friday is: %s" % year) + Year with next Aug 13th on a Friday is: 2004 + >>> print("How far is the Easter of that year: %s" % rdelta) + How far is the Easter of that year: relativedelta(months=+6) + >>> print("And the Easter of that year is: %s" % (today+rdelta)) + And the Easter of that year is: 2004-04-11 + +Being exactly 6 months ahead was **really** a coincidence :) + +Contributing +============ + +We welcome many types of contributions - bug reports, pull requests (code, infrastructure or documentation fixes). For more information about how to contribute to the project, see the ``CONTRIBUTING.md`` file in the repository. + + +Author +====== +The dateutil module was written by Gustavo Niemeyer <gustavo@niemeyer.net> +in 2003. + +It is maintained by: + +* Gustavo Niemeyer <gustavo@niemeyer.net> 2003-2011 +* Tomi Pieviläinen <tomi.pievilainen@iki.fi> 2012-2014 +* Yaron de Leeuw <me@jarondl.net> 2014-2016 +* Paul Ganssle <paul@ganssle.io> 2015- + +Starting with version 2.4.1, all source and binary distributions will be signed +by a PGP key that has, at the very least, been signed by the key which made the +previous release. A table of release signing keys can be found below: + +=========== ============================ +Releases Signing key fingerprint +=========== ============================ +2.4.1- `6B49 ACBA DCF6 BD1C A206 67AB CD54 FCE3 D964 BEFB`_ (|pgp_mirror|_) +=========== ============================ + + +Contact +======= +Our mailing list is available at `dateutil@python.org <https://mail.python.org/mailman/listinfo/dateutil>`_. As it is hosted by the PSF, it is subject to the `PSF code of +conduct <https://www.python.org/psf/codeofconduct/>`_. + +License +======= + +All contributions after December 1, 2017 released under dual license - either `Apache 2.0 License <https://www.apache.org/licenses/LICENSE-2.0>`_ or the `BSD 3-Clause License <https://opensource.org/licenses/BSD-3-Clause>`_. Contributions before December 1, 2017 - except those those explicitly relicensed - are released only under the BSD 3-Clause License. + + +.. _6B49 ACBA DCF6 BD1C A206 67AB CD54 FCE3 D964 BEFB: + https://pgp.mit.edu/pks/lookup?op=vindex&search=0xCD54FCE3D964BEFB + +.. |pgp_mirror| replace:: mirror +.. _pgp_mirror: https://sks-keyservers.net/pks/lookup?op=vindex&search=0xCD54FCE3D964BEFB diff --git a/libraries/dateutil/__init__.py b/libraries/dateutil/__init__.py new file mode 100644 index 00000000..a29ffaa9 --- /dev/null +++ b/libraries/dateutil/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +try: + from _version import version as __version__ +except ImportError: + __version__ = 'unknown' + +__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', + 'utils', 'zoneinfo', 'six'] diff --git a/libraries/dateutil/_common.py b/libraries/dateutil/_common.py new file mode 100644 index 00000000..4eb2659b --- /dev/null +++ b/libraries/dateutil/_common.py @@ -0,0 +1,43 @@ +""" +Common code used in multiple modules. +""" + + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __hash__(self): + return hash(( + self.weekday, + self.n, + )) + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +# vim:ts=4:sw=4:et diff --git a/libraries/dateutil/easter.py b/libraries/dateutil/easter.py new file mode 100644 index 00000000..53b7c789 --- /dev/null +++ b/libraries/dateutil/easter.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic easter computing method for any given year, using +Western, Orthodox or Julian algorithms. +""" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different easter + calculation methods: + + 1 - Original calculation in Julian calendar, valid in + dates after 326 AD + 2 - Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3 - Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + * ``EASTER_JULIAN = 1`` + * ``EASTER_ORTHODOX = 2`` + * ``EASTER_WESTERN = 3`` + + The default method is method 3. + + More about the algorithm may be found at: + + `GM Arts: Easter Algorithms <http://www.gmarts.org/index.php?go=415>`_ + + and + + `The Calendar FAQ: Easter <https://www.tondering.dk/claus/cal/easter.php>`_ + + """ + + if not (1 <= method <= 3): + raise ValueError("invalid method") + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g + 15) % 30 + j = (y + y//4 + i) % 7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e + y//100 - 16 - (y//100 - 16)//4 + else: + # New method + c = y//100 + h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 + i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) + j = (y + y//4 + i + 2 - c + c//4) % 7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i - j + e + d = 1 + (p + 27 + (p + 6)//40) % 31 + m = 3 + (p + 26)//30 + return datetime.date(int(y), int(m), int(d)) diff --git a/libraries/dateutil/parser/__init__.py b/libraries/dateutil/parser/__init__.py new file mode 100644 index 00000000..2d20777c --- /dev/null +++ b/libraries/dateutil/parser/__init__.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +from _parser import parse, parser, parserinfo +from _parser import DEFAULTPARSER, DEFAULTTZPARSER +from _parser import UnknownTimezoneWarning + +from _parser import __doc__ + +from isoparser import isoparser, isoparse + +__all__ = ['parse', 'parser', 'parserinfo', + 'isoparse', 'isoparser', + 'UnknownTimezoneWarning'] + + +### +# Deprecate portions of the private interface so that downstream code that +# is improperly relying on it is given *some* notice. + + +def __deprecated_private_func(f): + from functools import wraps + import warnings + + msg = ('{name} is a private function and may break without warning, ' + 'it will be moved and or renamed in future versions.') + msg = msg.format(name=f.__name__) + + @wraps(f) + def deprecated_func(*args, **kwargs): + warnings.warn(msg, DeprecationWarning) + return f(*args, **kwargs) + + return deprecated_func + +def __deprecate_private_class(c): + import warnings + + msg = ('{name} is a private class and may break without warning, ' + 'it will be moved and or renamed in future versions.') + msg = msg.format(name=c.__name__) + + class private_class(c): + __doc__ = c.__doc__ + + def __init__(self, *args, **kwargs): + warnings.warn(msg, DeprecationWarning) + super(private_class, self).__init__(*args, **kwargs) + + private_class.__name__ = c.__name__ + + return private_class + + +from ._parser import _timelex, _resultbase +from ._parser import _tzparser, _parsetz + +_timelex = __deprecate_private_class(_timelex) +_tzparser = __deprecate_private_class(_tzparser) +_resultbase = __deprecate_private_class(_resultbase) +_parsetz = __deprecated_private_func(_parsetz) diff --git a/libraries/dateutil/parser/_parser.py b/libraries/dateutil/parser/_parser.py new file mode 100644 index 00000000..66940802 --- /dev/null +++ b/libraries/dateutil/parser/_parser.py @@ -0,0 +1,1578 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic date/time string parser which is able to parse +most known formats to represent a date and/or time. + +This module attempts to be forgiving with regards to unlikely input formats, +returning a datetime object even for dates which are ambiguous. If an element +of a date/time stamp is omitted, the following rules are applied: + +- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour + on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is + specified. +- If a time zone is omitted, a timezone-naive datetime is returned. + +If any other elements are missing, they are taken from the +:class:`datetime.datetime` object passed to the parameter ``default``. If this +results in a day number exceeding the valid number of days per month, the +value falls back to the end of the month. + +Additional resources about date/time string formats can be found below: + +- `A summary of the international standard date and time notation + <http://www.cl.cam.ac.uk/~mgk25/iso-time.html>`_ +- `W3C Date and Time Formats <http://www.w3.org/TR/NOTE-datetime>`_ +- `Time Formats (Planetary Rings Node) <https://pds-rings.seti.org:443/tools/time_formats.html>`_ +- `CPAN ParseDate module + <http://search.cpan.org/~muir/Time-modules-2013.0912/lib/Time/ParseDate.pm>`_ +- `Java SimpleDateFormat Class + <https://docs.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html>`_ +""" +from __future__ import unicode_literals + +import datetime +import re +import string +import time +import warnings + +from calendar import monthrange +from io import StringIO + +import six +from six import binary_type, integer_types, text_type + +from decimal import Decimal + +from warnings import warn + +from dateutil import relativedelta +from dateutil import tz + +__all__ = ["parse", "parserinfo"] + + +# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth +# making public and/or figuring out if there is something we can +# take off their plate. +class _timelex(object): + # Fractional seconds are sometimes split by a comma + _split_decimal = re.compile("([.,])") + + def __init__(self, instream): + if six.PY2: + # In Python 2, we can't duck type properly because unicode has + # a 'decode' function, and we'd be double-decoding + if isinstance(instream, (binary_type, bytearray)): + instream = instream.decode() + else: + if getattr(instream, 'decode', None) is not None: + instream = instream.decode() + + if isinstance(instream, text_type): + instream = StringIO(instream) + elif getattr(instream, 'read', None) is None: + raise TypeError('Parser must be a string or character stream, not ' + '{itype}'.format(itype=instream.__class__.__name__)) + + self.instream = instream + self.charstack = [] + self.tokenstack = [] + self.eof = False + + def get_token(self): + """ + This function breaks the time string into lexical units (tokens), which + can be parsed by the parser. Lexical units are demarcated by changes in + the character set, so any continuous string of letters is considered + one unit, any continuous string of numbers is considered one unit. + + The main complication arises from the fact that dots ('.') can be used + both as separators (e.g. "Sep.20.2009") or decimal points (e.g. + "4:30:21.447"). As such, it is necessary to read the full context of + any dot-separated strings before breaking it into tokens; as such, this + function maintains a "token stack", for when the ambiguous context + demands that multiple tokens be parsed at once. + """ + if self.tokenstack: + return self.tokenstack.pop(0) + + seenletters = False + token = None + state = None + + while not self.eof: + # We only realize that we've reached the end of a token when we + # find a character that's not part of the current token - since + # that character may be part of the next token, it's stored in the + # charstack. + if self.charstack: + nextchar = self.charstack.pop(0) + else: + nextchar = self.instream.read(1) + while nextchar == '\x00': + nextchar = self.instream.read(1) + + if not nextchar: + self.eof = True + break + elif not state: + # First character of the token - determines if we're starting + # to parse a word, a number or something else. + token = nextchar + if self.isword(nextchar): + state = 'a' + elif self.isnum(nextchar): + state = '0' + elif self.isspace(nextchar): + token = ' ' + break # emit token + else: + break # emit token + elif state == 'a': + # If we've already started reading a word, we keep reading + # letters until we find something that's not part of a word. + seenletters = True + if self.isword(nextchar): + token += nextchar + elif nextchar == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0': + # If we've already started reading a number, we keep reading + # numbers until we find something that doesn't fit. + if self.isnum(nextchar): + token += nextchar + elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == 'a.': + # If we've seen some letters and a dot separator, continue + # parsing, and the tokens will be broken up later. + seenletters = True + if nextchar == '.' or self.isword(nextchar): + token += nextchar + elif self.isnum(nextchar) and token[-1] == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0.': + # If we've seen at least one dot separator, keep going, we'll + # break up the tokens later. + if nextchar == '.' or self.isnum(nextchar): + token += nextchar + elif self.isword(nextchar) and token[-1] == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + + if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or + token[-1] in '.,')): + l = self._split_decimal.split(token) + token = l[0] + for tok in l[1:]: + if tok: + self.tokenstack.append(tok) + + if state == '0.' and token.count('.') == 0: + token = token.replace(',', '.') + + return token + + def __iter__(self): + return self + + def __next__(self): + token = self.get_token() + if token is None: + raise StopIteration + + return token + + def next(self): + return self.__next__() # Python 2.x support + + @classmethod + def split(cls, s): + return list(cls(s)) + + @classmethod + def isword(cls, nextchar): + """ Whether or not the next character is part of a word """ + return nextchar.isalpha() + + @classmethod + def isnum(cls, nextchar): + """ Whether the next character is part of a number """ + return nextchar.isdigit() + + @classmethod + def isspace(cls, nextchar): + """ Whether the next character is whitespace """ + return nextchar.isspace() + + +class _resultbase(object): + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def _repr(self, classname): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (classname, ", ".join(l)) + + def __len__(self): + return (sum(getattr(self, attr) is not None + for attr in self.__slots__)) + + def __repr__(self): + return self._repr(self.__class__.__name__) + + +class parserinfo(object): + """ + Class which handles what inputs are accepted. Subclass this to customize + the language and acceptable values for each parameter. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. Default is ``False``. + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + Default is ``False``. + """ + + # m from a.m/p.m, t from ISO T separator + JUMP = [" ", ".", ",", ";", "-", "/", "'", + "at", "on", "and", "ad", "m", "t", "of", + "st", "nd", "rd", "th"] + + WEEKDAYS = [("Mon", "Monday"), + ("Tue", "Tuesday"), # TODO: "Tues" + ("Wed", "Wednesday"), + ("Thu", "Thursday"), # TODO: "Thurs" + ("Fri", "Friday"), + ("Sat", "Saturday"), + ("Sun", "Sunday")] + MONTHS = [("Jan", "January"), + ("Feb", "February"), # TODO: "Febr" + ("Mar", "March"), + ("Apr", "April"), + ("May", "May"), + ("Jun", "June"), + ("Jul", "July"), + ("Aug", "August"), + ("Sep", "Sept", "September"), + ("Oct", "October"), + ("Nov", "November"), + ("Dec", "December")] + HMS = [("h", "hour", "hours"), + ("m", "minute", "minutes"), + ("s", "second", "seconds")] + AMPM = [("am", "a"), + ("pm", "p")] + UTCZONE = ["UTC", "GMT", "Z"] + PERTAIN = ["of"] + TZOFFSET = {} + # TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate", + # "Anno Domini", "Year of Our Lord"] + + def __init__(self, dayfirst=False, yearfirst=False): + self._jump = self._convert(self.JUMP) + self._weekdays = self._convert(self.WEEKDAYS) + self._months = self._convert(self.MONTHS) + self._hms = self._convert(self.HMS) + self._ampm = self._convert(self.AMPM) + self._utczone = self._convert(self.UTCZONE) + self._pertain = self._convert(self.PERTAIN) + + self.dayfirst = dayfirst + self.yearfirst = yearfirst + + self._year = time.localtime().tm_year + self._century = self._year // 100 * 100 + + def _convert(self, lst): + dct = {} + for i, v in enumerate(lst): + if isinstance(v, tuple): + for v in v: + dct[v.lower()] = i + else: + dct[v.lower()] = i + return dct + + def jump(self, name): + return name.lower() in self._jump + + def weekday(self, name): + try: + return self._weekdays[name.lower()] + except KeyError: + pass + return None + + def month(self, name): + try: + return self._months[name.lower()] + 1 + except KeyError: + pass + return None + + def hms(self, name): + try: + return self._hms[name.lower()] + except KeyError: + return None + + def ampm(self, name): + try: + return self._ampm[name.lower()] + except KeyError: + return None + + def pertain(self, name): + return name.lower() in self._pertain + + def utczone(self, name): + return name.lower() in self._utczone + + def tzoffset(self, name): + if name in self._utczone: + return 0 + + return self.TZOFFSET.get(name) + + def convertyear(self, year, century_specified=False): + """ + Converts two-digit years to year within [-50, 49] + range of self._year (current local time) + """ + + # Function contract is that the year is always positive + assert year >= 0 + + if year < 100 and not century_specified: + # assume current century to start + year += self._century + + if year >= self._year + 50: # if too far in future + year -= 100 + elif year < self._year - 50: # if too far in past + year += 100 + + return year + + def validate(self, res): + # move to info + if res.year is not None: + res.year = self.convertyear(res.year, res.century_specified) + + if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z': + res.tzname = "UTC" + res.tzoffset = 0 + elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): + res.tzoffset = 0 + return True + + +class _ymd(list): + def __init__(self, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + self.century_specified = False + self.dstridx = None + self.mstridx = None + self.ystridx = None + + @property + def has_year(self): + return self.ystridx is not None + + @property + def has_month(self): + return self.mstridx is not None + + @property + def has_day(self): + return self.dstridx is not None + + def could_be_day(self, value): + if self.has_day: + return False + elif not self.has_month: + return 1 <= value <= 31 + elif not self.has_year: + # Be permissive, assume leapyear + month = self[self.mstridx] + return 1 <= value <= monthrange(2000, month)[1] + else: + month = self[self.mstridx] + year = self[self.ystridx] + return 1 <= value <= monthrange(year, month)[1] + + def append(self, val, label=None): + if hasattr(val, '__len__'): + if val.isdigit() and len(val) > 2: + self.century_specified = True + if label not in [None, 'Y']: # pragma: no cover + raise ValueError(label) + label = 'Y' + elif val > 100: + self.century_specified = True + if label not in [None, 'Y']: # pragma: no cover + raise ValueError(label) + label = 'Y' + + super(self.__class__, self).append(int(val)) + + if label == 'M': + if self.has_month: + raise ValueError('Month is already set') + self.mstridx = len(self) - 1 + elif label == 'D': + if self.has_day: + raise ValueError('Day is already set') + self.dstridx = len(self) - 1 + elif label == 'Y': + if self.has_year: + raise ValueError('Year is already set') + self.ystridx = len(self) - 1 + + def _resolve_from_stridxs(self, strids): + """ + Try to resolve the identities of year/month/day elements using + ystridx, mstridx, and dstridx, if enough of these are specified. + """ + if len(self) == 3 and len(strids) == 2: + # we can back out the remaining stridx value + missing = [x for x in range(3) if x not in strids.values()] + key = [x for x in ['y', 'm', 'd'] if x not in strids] + assert len(missing) == len(key) == 1 + key = key[0] + val = missing[0] + strids[key] = val + + assert len(self) == len(strids) # otherwise this should not be called + out = {key: self[strids[key]] for key in strids} + return (out.get('y'), out.get('m'), out.get('d')) + + def resolve_ymd(self, yearfirst, dayfirst): + len_ymd = len(self) + year, month, day = (None, None, None) + + strids = (('y', self.ystridx), + ('m', self.mstridx), + ('d', self.dstridx)) + + strids = {key: val for key, val in strids if val is not None} + if (len(self) == len(strids) > 0 or + (len(self) == 3 and len(strids) == 2)): + return self._resolve_from_stridxs(strids) + + mstridx = self.mstridx + + if len_ymd > 3: + raise ValueError("More than three YMD values") + elif len_ymd == 1 or (mstridx is not None and len_ymd == 2): + # One member, or two members with a month string + if mstridx is not None: + month = self[mstridx] + # since mstridx is 0 or 1, self[mstridx-1] always + # looks up the other element + other = self[mstridx - 1] + else: + other = self[0] + + if len_ymd > 1 or mstridx is None: + if other > 31: + year = other + else: + day = other + + elif len_ymd == 2: + # Two members with numbers + if self[0] > 31: + # 99-01 + year, month = self + elif self[1] > 31: + # 01-99 + month, year = self + elif dayfirst and self[1] <= 12: + # 13-01 + day, month = self + else: + # 01-13 + month, day = self + + elif len_ymd == 3: + # Three members + if mstridx == 0: + if self[1] > 31: + # Apr-2003-25 + month, year, day = self + else: + month, day, year = self + elif mstridx == 1: + if self[0] > 31 or (yearfirst and self[2] <= 31): + # 99-Jan-01 + year, month, day = self + else: + # 01-Jan-01 + # Give precendence to day-first, since + # two-digit years is usually hand-written. + day, month, year = self + + elif mstridx == 2: + # WTF!? + if self[1] > 31: + # 01-99-Jan + day, year, month = self + else: + # 99-01-Jan + year, day, month = self + + else: + if (self[0] > 31 or + self.ystridx == 0 or + (yearfirst and self[1] <= 12 and self[2] <= 31)): + # 99-01-01 + if dayfirst and self[2] <= 12: + year, day, month = self + else: + year, month, day = self + elif self[0] > 12 or (dayfirst and self[1] <= 12): + # 13-01-01 + day, month, year = self + else: + # 01-13-01 + month, day, year = self + + return year, month, day + + +class parser(object): + def __init__(self, info=None): + self.info = info or parserinfo() + + def parse(self, timestr, default=None, + ignoretz=False, tzinfos=None, **kwargs): + """ + Parse the date/time string into a :class:`datetime.datetime` object. + + :param timestr: + Any date/time string using the supported formats. + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a + naive :class:`datetime.datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in seconds or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param \\*\\*kwargs: + Keyword arguments as passed to ``_parse()``. + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises TypeError: + Raised for non-string or character stream input. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + + if default is None: + default = datetime.datetime.now().replace(hour=0, minute=0, + second=0, microsecond=0) + + res, skipped_tokens = self._parse(timestr, **kwargs) + + if res is None: + raise ValueError("Unknown string format:", timestr) + + if len(res) == 0: + raise ValueError("String does not contain a date:", timestr) + + ret = self._build_naive(res, default) + + if not ignoretz: + ret = self._build_tzaware(ret, res, tzinfos) + + if kwargs.get('fuzzy_with_tokens', False): + return ret, skipped_tokens + else: + return ret + + class _result(_resultbase): + __slots__ = ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond", + "tzname", "tzoffset", "ampm","any_unused_tokens"] + + def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, + fuzzy_with_tokens=False): + """ + Private method which performs the heavy lifting of parsing, called from + ``parse()``, which passes on its ``kwargs`` to this function. + + :param timestr: + The string to parse. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. If set to ``None``, this value is retrieved from the + current :class:`parserinfo` object (which itself defaults to + ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + If this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + """ + if fuzzy_with_tokens: + fuzzy = True + + info = self.info + + if dayfirst is None: + dayfirst = info.dayfirst + + if yearfirst is None: + yearfirst = info.yearfirst + + res = self._result() + l = _timelex.split(timestr) # Splits the timestr into tokens + + skipped_idxs = [] + + # year/month/day list + ymd = _ymd() + + len_l = len(l) + i = 0 + try: + while i < len_l: + + # Check if it's a number + value_repr = l[i] + try: + value = float(value_repr) + except ValueError: + value = None + + if value is not None: + # Numeric token + i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy) + + # Check weekday + elif info.weekday(l[i]) is not None: + value = info.weekday(l[i]) + res.weekday = value + + # Check month name + elif info.month(l[i]) is not None: + value = info.month(l[i]) + ymd.append(value, 'M') + + if i + 1 < len_l: + if l[i + 1] in ('-', '/'): + # Jan-01[-99] + sep = l[i + 1] + ymd.append(l[i + 2]) + + if i + 3 < len_l and l[i + 3] == sep: + # Jan-01-99 + ymd.append(l[i + 4]) + i += 2 + + i += 2 + + elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and + info.pertain(l[i + 2])): + # Jan of 01 + # In this case, 01 is clearly year + if l[i + 4].isdigit(): + # Convert it here to become unambiguous + value = int(l[i + 4]) + year = str(info.convertyear(value)) + ymd.append(year, 'Y') + else: + # Wrong guess + pass + # TODO: not hit in tests + i += 4 + + # Check am/pm + elif info.ampm(l[i]) is not None: + value = info.ampm(l[i]) + val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy) + + if val_is_ampm: + res.hour = self._adjust_ampm(res.hour, value) + res.ampm = value + + elif fuzzy: + skipped_idxs.append(i) + + # Check for a timezone name + elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]): + res.tzname = l[i] + res.tzoffset = info.tzoffset(res.tzname) + + # Check for something like GMT+3, or BRST+3. Notice + # that it doesn't mean "I am 3 hours after GMT", but + # "my time +3 is GMT". If found, we reverse the + # logic so that timezone parsing code will get it + # right. + if i + 1 < len_l and l[i + 1] in ('+', '-'): + l[i + 1] = ('+', '-')[l[i + 1] == '+'] + res.tzoffset = None + if info.utczone(res.tzname): + # With something like GMT+3, the timezone + # is *not* GMT. + res.tzname = None + + # Check for a numbered timezone + elif res.hour is not None and l[i] in ('+', '-'): + signal = (-1, 1)[l[i] == '+'] + len_li = len(l[i + 1]) + + # TODO: check that l[i + 1] is integer? + if len_li == 4: + # -0300 + hour_offset = int(l[i + 1][:2]) + min_offset = int(l[i + 1][2:]) + elif i + 2 < len_l and l[i + 2] == ':': + # -03:00 + hour_offset = int(l[i + 1]) + min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like? + i += 2 + elif len_li <= 2: + # -[0]3 + hour_offset = int(l[i + 1][:2]) + min_offset = 0 + else: + raise ValueError(timestr) + + res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60) + + # Look for a timezone name between parenthesis + if (i + 5 < len_l and + info.jump(l[i + 2]) and l[i + 3] == '(' and + l[i + 5] == ')' and + 3 <= len(l[i + 4]) and + self._could_be_tzname(res.hour, res.tzname, + None, l[i + 4])): + # -0300 (BRST) + res.tzname = l[i + 4] + i += 4 + + i += 1 + + # Check jumps + elif not (info.jump(l[i]) or fuzzy): + raise ValueError(timestr) + + else: + skipped_idxs.append(i) + i += 1 + + # Process year/month/day + year, month, day = ymd.resolve_ymd(yearfirst, dayfirst) + + res.century_specified = ymd.century_specified + res.year = year + res.month = month + res.day = day + + except (IndexError, ValueError): + return None, None + + if not info.validate(res): + return None, None + + if fuzzy_with_tokens: + skipped_tokens = self._recombine_skipped(l, skipped_idxs) + return res, tuple(skipped_tokens) + else: + return res, None + + def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy): + # Token is a number + value_repr = tokens[idx] + try: + value = self._to_decimal(value_repr) + except Exception as e: + six.raise_from(ValueError('Unknown numeric token'), e) + + len_li = len(value_repr) + + len_l = len(tokens) + + if (len(ymd) == 3 and len_li in (2, 4) and + res.hour is None and + (idx + 1 >= len_l or + (tokens[idx + 1] != ':' and + info.hms(tokens[idx + 1]) is None))): + # 19990101T23[59] + s = tokens[idx] + res.hour = int(s[:2]) + + if len_li == 4: + res.minute = int(s[2:]) + + elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6): + # YYMMDD or HHMMSS[.ss] + s = tokens[idx] + + if not ymd and '.' not in tokens[idx]: + ymd.append(s[:2]) + ymd.append(s[2:4]) + ymd.append(s[4:]) + else: + # 19990101T235959[.59] + + # TODO: Check if res attributes already set. + res.hour = int(s[:2]) + res.minute = int(s[2:4]) + res.second, res.microsecond = self._parsems(s[4:]) + + elif len_li in (8, 12, 14): + # YYYYMMDD + s = tokens[idx] + ymd.append(s[:4], 'Y') + ymd.append(s[4:6]) + ymd.append(s[6:8]) + + if len_li > 8: + res.hour = int(s[8:10]) + res.minute = int(s[10:12]) + + if len_li > 12: + res.second = int(s[12:]) + + elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None: + # HH[ ]h or MM[ ]m or SS[.ss][ ]s + hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True) + (idx, hms) = self._parse_hms(idx, tokens, info, hms_idx) + if hms is not None: + # TODO: checking that hour/minute/second are not + # already set? + self._assign_hms(res, value_repr, hms) + + elif idx + 2 < len_l and tokens[idx + 1] == ':': + # HH:MM[:SS[.ss]] + res.hour = int(value) + value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this? + (res.minute, res.second) = self._parse_min_sec(value) + + if idx + 4 < len_l and tokens[idx + 3] == ':': + res.second, res.microsecond = self._parsems(tokens[idx + 4]) + + idx += 2 + + idx += 2 + + elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'): + sep = tokens[idx + 1] + ymd.append(value_repr) + + if idx + 2 < len_l and not info.jump(tokens[idx + 2]): + if tokens[idx + 2].isdigit(): + # 01-01[-01] + ymd.append(tokens[idx + 2]) + else: + # 01-Jan[-01] + value = info.month(tokens[idx + 2]) + + if value is not None: + ymd.append(value, 'M') + else: + raise ValueError() + + if idx + 3 < len_l and tokens[idx + 3] == sep: + # We have three members + value = info.month(tokens[idx + 4]) + + if value is not None: + ymd.append(value, 'M') + else: + ymd.append(tokens[idx + 4]) + idx += 2 + + idx += 1 + idx += 1 + + elif idx + 1 >= len_l or info.jump(tokens[idx + 1]): + if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None: + # 12 am + hour = int(value) + res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2])) + idx += 1 + else: + # Year, month or day + ymd.append(value) + idx += 1 + + elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24): + # 12am + hour = int(value) + res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1])) + idx += 1 + + elif ymd.could_be_day(value): + ymd.append(value) + + elif not fuzzy: + raise ValueError() + + return idx + + def _find_hms_idx(self, idx, tokens, info, allow_jump): + len_l = len(tokens) + + if idx+1 < len_l and info.hms(tokens[idx+1]) is not None: + # There is an "h", "m", or "s" label following this token. We take + # assign the upcoming label to the current token. + # e.g. the "12" in 12h" + hms_idx = idx + 1 + + elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and + info.hms(tokens[idx+2]) is not None): + # There is a space and then an "h", "m", or "s" label. + # e.g. the "12" in "12 h" + hms_idx = idx + 2 + + elif idx > 0 and info.hms(tokens[idx-1]) is not None: + # There is a "h", "m", or "s" preceeding this token. Since neither + # of the previous cases was hit, there is no label following this + # token, so we use the previous label. + # e.g. the "04" in "12h04" + hms_idx = idx-1 + + elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and + info.hms(tokens[idx-2]) is not None): + # If we are looking at the final token, we allow for a + # backward-looking check to skip over a space. + # TODO: Are we sure this is the right condition here? + hms_idx = idx - 2 + + else: + hms_idx = None + + return hms_idx + + def _assign_hms(self, res, value_repr, hms): + # See GH issue #427, fixing float rounding + value = self._to_decimal(value_repr) + + if hms == 0: + # Hour + res.hour = int(value) + if value % 1: + res.minute = int(60*(value % 1)) + + elif hms == 1: + (res.minute, res.second) = self._parse_min_sec(value) + + elif hms == 2: + (res.second, res.microsecond) = self._parsems(value_repr) + + def _could_be_tzname(self, hour, tzname, tzoffset, token): + return (hour is not None and + tzname is None and + tzoffset is None and + len(token) <= 5 and + all(x in string.ascii_uppercase for x in token)) + + def _ampm_valid(self, hour, ampm, fuzzy): + """ + For fuzzy parsing, 'a' or 'am' (both valid English words) + may erroneously trigger the AM/PM flag. Deal with that + here. + """ + val_is_ampm = True + + # If there's already an AM/PM flag, this one isn't one. + if fuzzy and ampm is not None: + val_is_ampm = False + + # If AM/PM is found and hour is not, raise a ValueError + if hour is None: + if fuzzy: + val_is_ampm = False + else: + raise ValueError('No hour specified with AM or PM flag.') + elif not 0 <= hour <= 12: + # If AM/PM is found, it's a 12 hour clock, so raise + # an error for invalid range + if fuzzy: + val_is_ampm = False + else: + raise ValueError('Invalid hour specified for 12-hour clock.') + + return val_is_ampm + + def _adjust_ampm(self, hour, ampm): + if hour < 12 and ampm == 1: + hour += 12 + elif hour == 12 and ampm == 0: + hour = 0 + return hour + + def _parse_min_sec(self, value): + # TODO: Every usage of this function sets res.second to the return + # value. Are there any cases where second will be returned as None and + # we *dont* want to set res.second = None? + minute = int(value) + second = None + + sec_remainder = value % 1 + if sec_remainder: + second = int(60 * sec_remainder) + return (minute, second) + + def _parsems(self, value): + """Parse a I[.F] seconds value into (seconds, microseconds).""" + if "." not in value: + return int(value), 0 + else: + i, f = value.split(".") + return int(i), int(f.ljust(6, "0")[:6]) + + def _parse_hms(self, idx, tokens, info, hms_idx): + # TODO: Is this going to admit a lot of false-positives for when we + # just happen to have digits and "h", "m" or "s" characters in non-date + # text? I guess hex hashes won't have that problem, but there's plenty + # of random junk out there. + if hms_idx is None: + hms = None + new_idx = idx + elif hms_idx > idx: + hms = info.hms(tokens[hms_idx]) + new_idx = hms_idx + else: + # Looking backwards, increment one. + hms = info.hms(tokens[hms_idx]) + 1 + new_idx = idx + + return (new_idx, hms) + + def _recombine_skipped(self, tokens, skipped_idxs): + """ + >>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"] + >>> skipped_idxs = [0, 1, 2, 5] + >>> _recombine_skipped(tokens, skipped_idxs) + ["foo bar", "baz"] + """ + skipped_tokens = [] + for i, idx in enumerate(sorted(skipped_idxs)): + if i > 0 and idx - 1 == skipped_idxs[i - 1]: + skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx] + else: + skipped_tokens.append(tokens[idx]) + + return skipped_tokens + + def _build_tzinfo(self, tzinfos, tzname, tzoffset): + if callable(tzinfos): + tzdata = tzinfos(tzname, tzoffset) + else: + tzdata = tzinfos.get(tzname) + # handle case where tzinfo is paased an options that returns None + # eg tzinfos = {'BRST' : None} + if isinstance(tzdata, datetime.tzinfo) or tzdata is None: + tzinfo = tzdata + elif isinstance(tzdata, text_type): + tzinfo = tz.tzstr(tzdata) + elif isinstance(tzdata, integer_types): + tzinfo = tz.tzoffset(tzname, tzdata) + return tzinfo + + def _build_tzaware(self, naive, res, tzinfos): + if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)): + tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset) + aware = naive.replace(tzinfo=tzinfo) + aware = self._assign_tzname(aware, res.tzname) + + elif res.tzname and res.tzname in time.tzname: + aware = naive.replace(tzinfo=tz.tzlocal()) + + # Handle ambiguous local datetime + aware = self._assign_tzname(aware, res.tzname) + + # This is mostly relevant for winter GMT zones parsed in the UK + if (aware.tzname() != res.tzname and + res.tzname in self.info.UTCZONE): + aware = aware.replace(tzinfo=tz.tzutc()) + + elif res.tzoffset == 0: + aware = naive.replace(tzinfo=tz.tzutc()) + + elif res.tzoffset: + aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) + + elif not res.tzname and not res.tzoffset: + # i.e. no timezone information was found. + aware = naive + + elif res.tzname: + # tz-like string was parsed but we don't know what to do + # with it + warnings.warn("tzname {tzname} identified but not understood. " + "Pass `tzinfos` argument in order to correctly " + "return a timezone-aware datetime. In a future " + "version, this will raise an " + "exception.".format(tzname=res.tzname), + category=UnknownTimezoneWarning) + aware = naive + + return aware + + def _build_naive(self, res, default): + repl = {} + for attr in ("year", "month", "day", "hour", + "minute", "second", "microsecond"): + value = getattr(res, attr) + if value is not None: + repl[attr] = value + + if 'day' not in repl: + # If the default day exceeds the last day of the month, fall back + # to the end of the month. + cyear = default.year if res.year is None else res.year + cmonth = default.month if res.month is None else res.month + cday = default.day if res.day is None else res.day + + if cday > monthrange(cyear, cmonth)[1]: + repl['day'] = monthrange(cyear, cmonth)[1] + + naive = default.replace(**repl) + + if res.weekday is not None and not res.day: + naive = naive + relativedelta.relativedelta(weekday=res.weekday) + + return naive + + def _assign_tzname(self, dt, tzname): + if dt.tzname() != tzname: + new_dt = tz.enfold(dt, fold=1) + if new_dt.tzname() == tzname: + return new_dt + + return dt + + def _to_decimal(self, val): + try: + decimal_value = Decimal(val) + # See GH 662, edge case, infinite value should not be converted via `_to_decimal` + if not decimal_value.is_finite(): + raise ValueError("Converted decimal value is infinite or NaN") + except Exception as e: + msg = "Could not convert %s to decimal" % val + six.raise_from(ValueError(msg), e) + else: + return decimal_value + + +DEFAULTPARSER = parser() + + +def parse(timestr, parserinfo=None, **kwargs): + """ + + Parse a string in one of the supported formats, using the + ``parserinfo`` parameters. + + :param timestr: + A string containing a date/time stamp. + + :param parserinfo: + A :class:`parserinfo` object containing parameters for the parser. + If ``None``, the default arguments to the :class:`parserinfo` + constructor are used. + + The ``**kwargs`` parameter takes the following keyword arguments: + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in seconds or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM and + YMD. If set to ``None``, this value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken to + be the year, otherwise the last number is taken to be the year. If + this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + if parserinfo: + return parser(parserinfo).parse(timestr, **kwargs) + else: + return DEFAULTPARSER.parse(timestr, **kwargs) + + +class _tzparser(object): + + class _result(_resultbase): + + __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", + "start", "end"] + + class _attr(_resultbase): + __slots__ = ["month", "week", "weekday", + "yday", "jyday", "day", "time"] + + def __repr__(self): + return self._repr("") + + def __init__(self): + _resultbase.__init__(self) + self.start = self._attr() + self.end = self._attr() + + def parse(self, tzstr): + res = self._result() + l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x] + used_idxs = list() + try: + + len_l = len(l) + + i = 0 + while i < len_l: + # BRST+3[BRDT[+2]] + j = i + while j < len_l and not [x for x in l[j] + if x in "0123456789:,-+"]: + j += 1 + if j != i: + if not res.stdabbr: + offattr = "stdoffset" + res.stdabbr = "".join(l[i:j]) + else: + offattr = "dstoffset" + res.dstabbr = "".join(l[i:j]) + + for ii in range(j): + used_idxs.append(ii) + i = j + if (i < len_l and (l[i] in ('+', '-') or l[i][0] in + "0123456789")): + if l[i] in ('+', '-'): + # Yes, that's right. See the TZ variable + # documentation. + signal = (1, -1)[l[i] == '+'] + used_idxs.append(i) + i += 1 + else: + signal = -1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + setattr(res, offattr, (int(l[i][:2]) * 3600 + + int(l[i][2:]) * 60) * signal) + elif i + 1 < len_l and l[i + 1] == ':': + # -03:00 + setattr(res, offattr, + (int(l[i]) * 3600 + + int(l[i + 2]) * 60) * signal) + used_idxs.append(i) + i += 2 + elif len_li <= 2: + # -[0]3 + setattr(res, offattr, + int(l[i][:2]) * 3600 * signal) + else: + return None + used_idxs.append(i) + i += 1 + if res.dstabbr: + break + else: + break + + + if i < len_l: + for j in range(i, len_l): + if l[j] == ';': + l[j] = ',' + + assert l[i] == ',' + + i += 1 + + if i >= len_l: + pass + elif (8 <= l.count(',') <= 9 and + not [y for x in l[i:] if x != ',' + for y in x if y not in "0123456789+-"]): + # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] + for x in (res.start, res.end): + x.month = int(l[i]) + used_idxs.append(i) + i += 2 + if l[i] == '-': + value = int(l[i + 1]) * -1 + used_idxs.append(i) + i += 1 + else: + value = int(l[i]) + used_idxs.append(i) + i += 2 + if value: + x.week = value + x.weekday = (int(l[i]) - 1) % 7 + else: + x.day = int(l[i]) + used_idxs.append(i) + i += 2 + x.time = int(l[i]) + used_idxs.append(i) + i += 2 + if i < len_l: + if l[i] in ('-', '+'): + signal = (-1, 1)[l[i] == "+"] + used_idxs.append(i) + i += 1 + else: + signal = 1 + used_idxs.append(i) + res.dstoffset = (res.stdoffset + int(l[i]) * signal) + + # This was a made-up format that is not in normal use + warn(('Parsed time zone "%s"' % tzstr) + + 'is in a non-standard dateutil-specific format, which ' + + 'is now deprecated; support for parsing this format ' + + 'will be removed in future versions. It is recommended ' + + 'that you switch to a standard format like the GNU ' + + 'TZ variable format.', tz.DeprecatedTzFormatWarning) + elif (l.count(',') == 2 and l[i:].count('/') <= 2 and + not [y for x in l[i:] if x not in (',', '/', 'J', 'M', + '.', '-', ':') + for y in x if y not in "0123456789"]): + for x in (res.start, res.end): + if l[i] == 'J': + # non-leap year day (1 based) + used_idxs.append(i) + i += 1 + x.jyday = int(l[i]) + elif l[i] == 'M': + # month[-.]week[-.]weekday + used_idxs.append(i) + i += 1 + x.month = int(l[i]) + used_idxs.append(i) + i += 1 + assert l[i] in ('-', '.') + used_idxs.append(i) + i += 1 + x.week = int(l[i]) + if x.week == 5: + x.week = -1 + used_idxs.append(i) + i += 1 + assert l[i] in ('-', '.') + used_idxs.append(i) + i += 1 + x.weekday = (int(l[i]) - 1) % 7 + else: + # year day (zero based) + x.yday = int(l[i]) + 1 + + used_idxs.append(i) + i += 1 + + if i < len_l and l[i] == '/': + used_idxs.append(i) + i += 1 + # start time + len_li = len(l[i]) + if len_li == 4: + # -0300 + x.time = (int(l[i][:2]) * 3600 + + int(l[i][2:]) * 60) + elif i + 1 < len_l and l[i + 1] == ':': + # -03:00 + x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60 + used_idxs.append(i) + i += 2 + if i + 1 < len_l and l[i + 1] == ':': + used_idxs.append(i) + i += 2 + x.time += int(l[i]) + elif len_li <= 2: + # -[0]3 + x.time = (int(l[i][:2]) * 3600) + else: + return None + used_idxs.append(i) + i += 1 + + assert i == len_l or l[i] == ',' + + i += 1 + + assert i >= len_l + + except (IndexError, ValueError, AssertionError): + return None + + unused_idxs = set(range(len_l)).difference(used_idxs) + res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"}) + return res + + +DEFAULTTZPARSER = _tzparser() + + +def _parsetz(tzstr): + return DEFAULTTZPARSER.parse(tzstr) + +class UnknownTimezoneWarning(RuntimeWarning): + """Raised when the parser finds a timezone it cannot parse into a tzinfo""" +# vim:ts=4:sw=4:et diff --git a/libraries/dateutil/parser/isoparser.py b/libraries/dateutil/parser/isoparser.py new file mode 100644 index 00000000..cd27f93d --- /dev/null +++ b/libraries/dateutil/parser/isoparser.py @@ -0,0 +1,406 @@ +# -*- coding: utf-8 -*- +""" +This module offers a parser for ISO-8601 strings + +It is intended to support all valid date, time and datetime formats per the +ISO-8601 specification. + +..versionadded:: 2.7.0 +""" +from datetime import datetime, timedelta, time, date +import calendar +from dateutil import tz + +from functools import wraps + +import re +import six + +__all__ = ["isoparse", "isoparser"] + + +def _takes_ascii(f): + @wraps(f) + def func(self, str_in, *args, **kwargs): + # If it's a stream, read the whole thing + str_in = getattr(str_in, 'read', lambda: str_in)() + + # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII + if isinstance(str_in, six.text_type): + # ASCII is the same in UTF-8 + try: + str_in = str_in.encode('ascii') + except UnicodeEncodeError as e: + msg = 'ISO-8601 strings should contain only ASCII characters' + six.raise_from(ValueError(msg), e) + + return f(self, str_in, *args, **kwargs) + + return func + + +class isoparser(object): + def __init__(self, sep=None): + """ + :param sep: + A single character that separates date and time portions. If + ``None``, the parser will accept any single character. + For strict ISO-8601 adherence, pass ``'T'``. + """ + if sep is not None: + if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'): + raise ValueError('Separator must be a single, non-numeric ' + + 'ASCII character') + + sep = sep.encode('ascii') + + self._sep = sep + + @_takes_ascii + def isoparse(self, dt_str): + """ + Parse an ISO-8601 datetime string into a :class:`datetime.datetime`. + + An ISO-8601 datetime string consists of a date portion, followed + optionally by a time portion - the date and time portions are separated + by a single character separator, which is ``T`` in the official + standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be + combined with a time portion. + + Supported date formats are: + + Common: + + - ``YYYY`` + - ``YYYY-MM`` or ``YYYYMM`` + - ``YYYY-MM-DD`` or ``YYYYMMDD`` + + Uncommon: + + - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0) + - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day + + The ISO week and day numbering follows the same logic as + :func:`datetime.date.isocalendar`. + + Supported time formats are: + + - ``hh`` + - ``hh:mm`` or ``hhmm`` + - ``hh:mm:ss`` or ``hhmmss`` + - ``hh:mm:ss.sss`` or ``hh:mm:ss.ssssss`` (3-6 sub-second digits) + + Midnight is a special case for `hh`, as the standard supports both + 00:00 and 24:00 as a representation. + + .. caution:: + + Support for fractional components other than seconds is part of the + ISO-8601 standard, but is not currently implemented in this parser. + + Supported time zone offset formats are: + + - `Z` (UTC) + - `±HH:MM` + - `±HHMM` + - `±HH` + + Offsets will be represented as :class:`dateutil.tz.tzoffset` objects, + with the exception of UTC, which will be represented as + :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such + as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`. + + :param dt_str: + A string or stream containing only an ISO-8601 datetime string + + :return: + Returns a :class:`datetime.datetime` representing the string. + Unspecified components default to their lowest value. + + .. warning:: + + As of version 2.7.0, the strictness of the parser should not be + considered a stable part of the contract. Any valid ISO-8601 string + that parses correctly with the default settings will continue to + parse correctly in future versions, but invalid strings that + currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not + guaranteed to continue failing in future versions if they encode + a valid date. + + .. versionadded:: 2.7.0 + """ + components, pos = self._parse_isodate(dt_str) + + if len(dt_str) > pos: + if self._sep is None or dt_str[pos:pos + 1] == self._sep: + components += self._parse_isotime(dt_str[pos + 1:]) + else: + raise ValueError('String contains unknown ISO components') + + return datetime(*components) + + @_takes_ascii + def parse_isodate(self, datestr): + """ + Parse the date portion of an ISO string. + + :param datestr: + The string portion of an ISO string, without a separator + + :return: + Returns a :class:`datetime.date` object + """ + components, pos = self._parse_isodate(datestr) + if pos < len(datestr): + raise ValueError('String contains unknown ISO ' + + 'components: {}'.format(datestr)) + return date(*components) + + @_takes_ascii + def parse_isotime(self, timestr): + """ + Parse the time portion of an ISO string. + + :param timestr: + The time portion of an ISO string, without a separator + + :return: + Returns a :class:`datetime.time` object + """ + return time(*self._parse_isotime(timestr)) + + @_takes_ascii + def parse_tzstr(self, tzstr, zero_as_utc=True): + """ + Parse a valid ISO time zone string. + + See :func:`isoparser.isoparse` for details on supported formats. + + :param tzstr: + A string representing an ISO time zone offset + + :param zero_as_utc: + Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones + + :return: + Returns :class:`dateutil.tz.tzoffset` for offsets and + :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is + specified) offsets equivalent to UTC. + """ + return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc) + + # Constants + _MICROSECOND_END_REGEX = re.compile(b'[-+Z]+') + _DATE_SEP = b'-' + _TIME_SEP = b':' + _MICRO_SEP = b'.' + + def _parse_isodate(self, dt_str): + try: + return self._parse_isodate_common(dt_str) + except ValueError: + return self._parse_isodate_uncommon(dt_str) + + def _parse_isodate_common(self, dt_str): + len_str = len(dt_str) + components = [1, 1, 1] + + if len_str < 4: + raise ValueError('ISO string too short') + + # Year + components[0] = int(dt_str[0:4]) + pos = 4 + if pos >= len_str: + return components, pos + + has_sep = dt_str[pos:pos + 1] == self._DATE_SEP + if has_sep: + pos += 1 + + # Month + if len_str - pos < 2: + raise ValueError('Invalid common month') + + components[1] = int(dt_str[pos:pos + 2]) + pos += 2 + + if pos >= len_str: + if has_sep: + return components, pos + else: + raise ValueError('Invalid ISO format') + + if has_sep: + if dt_str[pos:pos + 1] != self._DATE_SEP: + raise ValueError('Invalid separator in ISO string') + pos += 1 + + # Day + if len_str - pos < 2: + raise ValueError('Invalid common day') + components[2] = int(dt_str[pos:pos + 2]) + return components, pos + 2 + + def _parse_isodate_uncommon(self, dt_str): + if len(dt_str) < 4: + raise ValueError('ISO string too short') + + # All ISO formats start with the year + year = int(dt_str[0:4]) + + has_sep = dt_str[4:5] == self._DATE_SEP + + pos = 4 + has_sep # Skip '-' if it's there + if dt_str[pos:pos + 1] == b'W': + # YYYY-?Www-?D? + pos += 1 + weekno = int(dt_str[pos:pos + 2]) + pos += 2 + + dayno = 1 + if len(dt_str) > pos: + if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep: + raise ValueError('Inconsistent use of dash separator') + + pos += has_sep + + dayno = int(dt_str[pos:pos + 1]) + pos += 1 + + base_date = self._calculate_weekdate(year, weekno, dayno) + else: + # YYYYDDD or YYYY-DDD + if len(dt_str) - pos < 3: + raise ValueError('Invalid ordinal day') + + ordinal_day = int(dt_str[pos:pos + 3]) + pos += 3 + + if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)): + raise ValueError('Invalid ordinal day' + + ' {} for year {}'.format(ordinal_day, year)) + + base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1) + + components = [base_date.year, base_date.month, base_date.day] + return components, pos + + def _calculate_weekdate(self, year, week, day): + """ + Calculate the day of corresponding to the ISO year-week-day calendar. + + This function is effectively the inverse of + :func:`datetime.date.isocalendar`. + + :param year: + The year in the ISO calendar + + :param week: + The week in the ISO calendar - range is [1, 53] + + :param day: + The day in the ISO calendar - range is [1 (MON), 7 (SUN)] + + :return: + Returns a :class:`datetime.date` + """ + if not 0 < week < 54: + raise ValueError('Invalid week: {}'.format(week)) + + if not 0 < day < 8: # Range is 1-7 + raise ValueError('Invalid weekday: {}'.format(day)) + + # Get week 1 for the specific year: + jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it + week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1) + + # Now add the specific number of weeks and days to get what we want + week_offset = (week - 1) * 7 + (day - 1) + return week_1 + timedelta(days=week_offset) + + def _parse_isotime(self, timestr): + len_str = len(timestr) + components = [0, 0, 0, 0, None] + pos = 0 + comp = -1 + + if len(timestr) < 2: + raise ValueError('ISO time too short') + + has_sep = len_str >= 3 and timestr[2:3] == self._TIME_SEP + + while pos < len_str and comp < 5: + comp += 1 + + if timestr[pos:pos + 1] in b'-+Z': + # Detect time zone boundary + components[-1] = self._parse_tzstr(timestr[pos:]) + pos = len_str + break + + if comp < 3: + # Hour, minute, second + components[comp] = int(timestr[pos:pos + 2]) + pos += 2 + if (has_sep and pos < len_str and + timestr[pos:pos + 1] == self._TIME_SEP): + pos += 1 + + if comp == 3: + # Microsecond + if timestr[pos:pos + 1] != self._MICRO_SEP: + continue + + pos += 1 + us_str = self._MICROSECOND_END_REGEX.split(timestr[pos:pos + 6], + 1)[0] + + components[comp] = int(us_str) * 10**(6 - len(us_str)) + pos += len(us_str) + + if pos < len_str: + raise ValueError('Unused components in ISO string') + + if components[0] == 24: + # Standard supports 00:00 and 24:00 as representations of midnight + if any(component != 0 for component in components[1:4]): + raise ValueError('Hour may only be 24 at 24:00:00.000') + components[0] = 0 + + return components + + def _parse_tzstr(self, tzstr, zero_as_utc=True): + if tzstr == b'Z': + return tz.tzutc() + + if len(tzstr) not in {3, 5, 6}: + raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters') + + if tzstr[0:1] == b'-': + mult = -1 + elif tzstr[0:1] == b'+': + mult = 1 + else: + raise ValueError('Time zone offset requires sign') + + hours = int(tzstr[1:3]) + if len(tzstr) == 3: + minutes = 0 + else: + minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):]) + + if zero_as_utc and hours == 0 and minutes == 0: + return tz.tzutc() + else: + if minutes > 59: + raise ValueError('Invalid minutes in time zone offset') + + if hours > 23: + raise ValueError('Invalid hours in time zone offset') + + return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60) + + +DEFAULT_ISOPARSER = isoparser() +isoparse = DEFAULT_ISOPARSER.isoparse diff --git a/libraries/dateutil/relativedelta.py b/libraries/dateutil/relativedelta.py new file mode 100644 index 00000000..1e0d6165 --- /dev/null +++ b/libraries/dateutil/relativedelta.py @@ -0,0 +1,590 @@ +# -*- coding: utf-8 -*- +import datetime +import calendar + +import operator +from math import copysign + +from six import integer_types +from warnings import warn + +from ._common import weekday + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + + +class relativedelta(object): + """ + The relativedelta type is based on the specification of the excellent + work done by M.-A. Lemburg in his + `mx.DateTime <https://www.egenix.com/products/python/mxBase/mxDateTime/>`_ extension. + However, notice that this type does *NOT* implement the same algorithm as + his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + + There are two different ways to build a relativedelta instance. The + first one is passing it two date/datetime classes:: + + relativedelta(datetime1, datetime2) + + The second one is passing it any number of the following keyword arguments:: + + relativedelta(arg1=x,arg2=y,arg3=z...) + + year, month, day, hour, minute, second, microsecond: + Absolute information (argument is singular); adding or subtracting a + relativedelta with absolute information does not perform an arithmetic + operation, but rather REPLACES the corresponding value in the + original datetime with the value(s) in relativedelta. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative (argument is plural); adding + or subtracting a relativedelta with relative information performs + the corresponding aritmetic operation on the original datetime value + with the information in the relativedelta. + + weekday: + One of the weekday instances (MO, TU, etc). These + instances may receive a parameter N, specifying the Nth + weekday, which could be positive or negative (like MO(+1) + or MO(-2). Not specifying it is the same as specifying + +1. You can also use an integer, where 0=MO. Notice that + if the calculated date is already Monday, for example, + using MO(1) or MO(-1) won't change the day. + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + + There are relative and absolute forms of the keyword + arguments. The plural is relative, and the singular is + absolute. For each argument in the order below, the absolute form + is applied first (by setting each attribute to that value) and + then the relative form (by adding the value to the attribute). + + The order of attributes considered when this relativedelta is + added to a datetime is: + + 1. Year + 2. Month + 3. Day + 4. Hours + 5. Minutes + 6. Seconds + 7. Microseconds + + Finally, weekday is applied, using the rule described above. + + For example + + >>> dt = datetime(2018, 4, 9, 13, 37, 0) + >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) + datetime(2018, 4, 2, 14, 37, 0) + + First, the day is set to 1 (the first of the month), then 25 hours + are added, to get to the 2nd day and 14th hour, finally the + weekday is applied, but since the 2nd is already a Monday there is + no effect. + + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + + if dt1 and dt2: + # datetime is a subclass of date. So both must be date + if not (isinstance(dt1, datetime.date) and + isinstance(dt2, datetime.date)): + raise TypeError("relativedelta only diffs datetime/date") + + # We allow two dates, or two datetimes, so we coerce them to be + # of the same type + if (isinstance(dt1, datetime.datetime) != + isinstance(dt2, datetime.datetime)): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + # Get year / month delta between the two + months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) + self._set_months(months) + + # Remove the year/month delta so the timedelta is just well-defined + # time units (seconds, days and microseconds) + dtm = self.__radd__(dt2) + + # If we've overshot our target, make an adjustment + if dt1 < dt2: + compare = operator.gt + increment = 1 + else: + compare = operator.lt + increment = -1 + + while compare(dt1, dtm): + months += increment + self._set_months(months) + dtm = self.__radd__(dt2) + + # Get the timedelta between the "months-adjusted" date and dt1 + delta = dt1 - dtm + self.seconds = delta.seconds + delta.days * 86400 + self.microseconds = delta.microseconds + else: + # Check for non-integer values in integer-only quantities + if any(x is not None and x != int(x) for x in (years, months)): + raise ValueError("Non-integer years and months are " + "ambiguous and not currently supported.") + + # Relative information + self.years = int(years) + self.months = int(months) + self.days = days + weeks * 7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + + # Absolute information + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if any(x is not None and int(x) != x + for x in (year, month, day, hour, + minute, second, microsecond)): + # For now we'll deprecate floats - later it'll be an error. + warn("Non-integer value passed as absolute information. " + + "This is not a well-defined condition and will raise " + + "errors in future versions.", DeprecationWarning) + + if isinstance(weekday, integer_types): + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31, 59, 90, 120, 151, 181, 212, + 243, 273, 304, 334, 366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError("invalid year day (%d)" % yday) + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = _sign(self.microseconds) + div, mod = divmod(self.microseconds * s, 1000000) + self.microseconds = mod * s + self.seconds += div * s + if abs(self.seconds) > 59: + s = _sign(self.seconds) + div, mod = divmod(self.seconds * s, 60) + self.seconds = mod * s + self.minutes += div * s + if abs(self.minutes) > 59: + s = _sign(self.minutes) + div, mod = divmod(self.minutes * s, 60) + self.minutes = mod * s + self.hours += div * s + if abs(self.hours) > 23: + s = _sign(self.hours) + div, mod = divmod(self.hours * s, 24) + self.hours = mod * s + self.days += div * s + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years += div * s + if (self.hours or self.minutes or self.seconds or self.microseconds + or self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + @property + def weeks(self): + return int(self.days / 7.0) + + @weeks.setter + def weeks(self, value): + self.days = self.days - (self.weeks * 7) + value * 7 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years = div * s + else: + self.years = 0 + + def normalized(self): + """ + Return a version of this object represented entirely using integer + values for the relative attributes. + + >>> relativedelta(days=1.5, hours=2).normalized() + relativedelta(days=1, hours=14) + + :return: + Returns a :class:`dateutil.relativedelta.relativedelta` object. + """ + # Cascade remainders down (rounding each to roughly nearest microsecond) + days = int(self.days) + + hours_f = round(self.hours + 24 * (self.days - days), 11) + hours = int(hours_f) + + minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) + minutes = int(minutes_f) + + seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) + seconds = int(seconds_f) + + microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) + + # Constructor carries overflow back up with call to _fix() + return self.__class__(years=self.years, months=self.months, + days=days, hours=hours, minutes=minutes, + seconds=seconds, microseconds=microseconds, + leapdays=self.leapdays, year=self.year, + month=self.month, day=self.day, + weekday=self.weekday, hour=self.hour, + minute=self.minute, second=self.second, + microsecond=self.microsecond) + + def __add__(self, other): + if isinstance(other, relativedelta): + return self.__class__(years=other.years + self.years, + months=other.months + self.months, + days=other.days + self.days, + hours=other.hours + self.hours, + minutes=other.minutes + self.minutes, + seconds=other.seconds + self.seconds, + microseconds=(other.microseconds + + self.microseconds), + leapdays=other.leapdays or self.leapdays, + year=(other.year if other.year is not None + else self.year), + month=(other.month if other.month is not None + else self.month), + day=(other.day if other.day is not None + else self.day), + weekday=(other.weekday if other.weekday is not None + else self.weekday), + hour=(other.hour if other.hour is not None + else self.hour), + minute=(other.minute if other.minute is not None + else self.minute), + second=(other.second if other.second is not None + else self.second), + microsecond=(other.microsecond if other.microsecond + is not None else + self.microsecond)) + if isinstance(other, datetime.timedelta): + return self.__class__(years=self.years, + months=self.months, + days=self.days + other.days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds + other.seconds, + microseconds=self.microseconds + other.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + if not isinstance(other, datetime.date): + return NotImplemented + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth) - 1) * 7 + if nth > 0: + jumpdays += (7 - ret.weekday() + weekday) % 7 + else: + jumpdays += (ret.weekday() - weekday) % 7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented # In case the other object defines __rsub__ + return self.__class__(years=self.years - other.years, + months=self.months - other.months, + days=self.days - other.days, + hours=self.hours - other.hours, + minutes=self.minutes - other.minutes, + seconds=self.seconds - other.seconds, + microseconds=self.microseconds - other.microseconds, + leapdays=self.leapdays or other.leapdays, + year=(self.year if self.year is not None + else other.year), + month=(self.month if self.month is not None else + other.month), + day=(self.day if self.day is not None else + other.day), + weekday=(self.weekday if self.weekday is not None else + other.weekday), + hour=(self.hour if self.hour is not None else + other.hour), + minute=(self.minute if self.minute is not None else + other.minute), + second=(self.second if self.second is not None else + other.second), + microsecond=(self.microsecond if self.microsecond + is not None else + other.microsecond)) + + def __abs__(self): + return self.__class__(years=abs(self.years), + months=abs(self.months), + days=abs(self.days), + hours=abs(self.hours), + minutes=abs(self.minutes), + seconds=abs(self.seconds), + microseconds=abs(self.microseconds), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __neg__(self): + return self.__class__(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __bool__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + # Compatibility with Python 2.x + __nonzero__ = __bool__ + + def __mul__(self, other): + try: + f = float(other) + except TypeError: + return NotImplemented + + return self.__class__(years=int(self.years * f), + months=int(self.months * f), + days=int(self.days * f), + hours=int(self.hours * f), + minutes=int(self.minutes * f), + seconds=int(self.seconds * f), + microseconds=int(self.microseconds * f), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + __rmul__ = __mul__ + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.microseconds == other.microseconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + def __hash__(self): + return hash(( + self.weekday, + self.years, + self.months, + self.days, + self.hours, + self.minutes, + self.seconds, + self.microseconds, + self.leapdays, + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + )) + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + try: + reciprocal = 1 / float(other) + except TypeError: + return NotImplemented + + return self.__mul__(reciprocal) + + __truediv__ = __div__ + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("{attr}={value:+g}".format(attr=attr, value=value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("{attr}={value}".format(attr=attr, value=repr(value))) + return "{classname}({attrs})".format(classname=self.__class__.__name__, + attrs=", ".join(l)) + + +def _sign(x): + return int(copysign(1, x)) + +# vim:ts=4:sw=4:et diff --git a/libraries/dateutil/rrule.py b/libraries/dateutil/rrule.py new file mode 100644 index 00000000..8e9c2af1 --- /dev/null +++ b/libraries/dateutil/rrule.py @@ -0,0 +1,1672 @@ +# -*- coding: utf-8 -*- +""" +The rrule module offers a small, complete, and very fast, implementation of +the recurrence rules documented in the +`iCalendar RFC <https://tools.ietf.org/html/rfc5545>`_, +including support for caching of results. +""" +import itertools +import datetime +import calendar +import re +import sys + +try: + from math import gcd +except ImportError: + from fractions import gcd + +from six import advance_iterator, integer_types +from six.moves import _thread, range +import heapq + +from ._common import weekday as weekdaybase +from .tz import tzutc, tzlocal + +# For warning about deprecation of until and count +from warnings import warn + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) +WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY'] + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = list(range(7)) + +# Imported on demand. +easter = None +parser = None + + +class weekday(weekdaybase): + """ + This version of weekday does not allow n = 0. + """ + def __init__(self, wkday, n=None): + if n == 0: + raise ValueError("Can't create weekday with n==0") + + super(weekday, self).__init__(wkday, n) + + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + + +def _invalidates_cache(f): + """ + Decorator for rruleset methods which may invalidate the + cached length. + """ + def inner_func(self, *args, **kwargs): + rv = f(self, *args, **kwargs) + self._invalidate_cache() + return rv + + return inner_func + + +class rrulebase(object): + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = _thread.allocate_lock() + self._invalidate_cache() + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _invalidate_cache(self): + if self._cache is not None: + self._cache = [] + self._cache_complete = False + self._cache_gen = self._iter() + + if self._cache_lock.locked(): + self._cache_lock.release() + + self._len = None + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(advance_iterator(gen)) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxsize, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = advance_iterator(gen) + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penality. + def count(self): + """ Returns the number of recurrences in this set. It will have go + trough the whole recurrence, if this hasn't been done before. """ + if self._len is None: + for x in self: + pass + return self._len + + def before(self, dt, inc=False): + """ Returns the last recurrence before the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + """ Returns the first recurrence after the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def xafter(self, dt, count=None, inc=False): + """ + Generator which yields up to `count` recurrences after the given + datetime instance, equivalent to `after`. + + :param dt: + The datetime at which to start generating recurrences. + + :param count: + The maximum number of recurrences to generate. If `None` (default), + dates are generated until the recurrence rule is exhausted. + + :param inc: + If `dt` is an instance of the rule and `inc` is `True`, it is + included in the output. + + :yields: Yields a sequence of `datetime` objects. + """ + + if self._cache_complete: + gen = self._cache + else: + gen = self + + # Select the comparison function + if inc: + comp = lambda dc, dtc: dc >= dtc + else: + comp = lambda dc, dtc: dc > dtc + + # Generate dates + n = 0 + for d in gen: + if comp(d, dt): + if count is not None: + n += 1 + if n > count: + break + + yield d + + def between(self, after, before, inc=False, count=1): + """ Returns all the occurrences of the rrule between after and before. + The inc keyword defines what happens if after and/or before are + themselves occurrences. With inc=True, they will be included in the + list, if they are found in the recurrence set. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + + +class rrule(rrulebase): + """ + That's the base of the rrule operation. It accepts all the keywords + defined in the RFC as its constructor parameters (except byday, + which was renamed to byweekday) and more. The constructor prototype is:: + + rrule(freq) + + Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, + or SECONDLY. + + .. note:: + Per RFC section 3.3.10, recurrence instances falling on invalid dates + and times are ignored rather than coerced: + + Recurrence rules may generate recurrence instances with an invalid + date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM + on a day where the local time is moved forward by an hour at 1:00 + AM). Such recurrence instances MUST be ignored and MUST NOT be + counted as part of the recurrence set. + + This can lead to possibly surprising behavior when, for example, the + start date occurs at the end of the month: + + >>> from dateutil.rrule import rrule, MONTHLY + >>> from datetime import datetime + >>> start_date = datetime(2014, 12, 31) + >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) + ... # doctest: +NORMALIZE_WHITESPACE + [datetime.datetime(2014, 12, 31, 0, 0), + datetime.datetime(2015, 1, 31, 0, 0), + datetime.datetime(2015, 3, 31, 0, 0), + datetime.datetime(2015, 5, 31, 0, 0)] + + Additionally, it supports the following keyword arguments: + + :param dtstart: + The recurrence start. Besides being the base for the recurrence, + missing parameters in the final recurrence instances will also be + extracted from this date. If not given, datetime.now() will be used + instead. + :param interval: + The interval between each freq iteration. For example, when using + YEARLY, an interval of 2 means once every two years, but with HOURLY, + it means once every two hours. The default interval is 1. + :param wkst: + The week start day. Must be one of the MO, TU, WE constants, or an + integer, specifying the first day of the week. This will affect + recurrences based on weekly periods. The default week start is got + from calendar.firstweekday(), and may be modified by + calendar.setfirstweekday(). + :param count: + How many occurrences will be generated. + + .. note:: + As of version 2.5.0, the use of the ``until`` keyword together + with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10. + :param until: + If given, this must be a datetime instance, that will specify the + limit of the recurrence. The last recurrence in the rule is the greatest + datetime that is less than or equal to the value specified in the + ``until`` parameter. + + .. note:: + As of version 2.5.0, the use of the ``until`` keyword together + with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10. + :param bysetpos: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each given integer will specify an occurrence + number, corresponding to the nth occurrence of the rule inside the + frequency period. For example, a bysetpos of -1 if combined with a + MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will + result in the last work day of every month. + :param bymonth: + If given, it must be either an integer, or a sequence of integers, + meaning the months to apply the recurrence to. + :param bymonthday: + If given, it must be either an integer, or a sequence of integers, + meaning the month days to apply the recurrence to. + :param byyearday: + If given, it must be either an integer, or a sequence of integers, + meaning the year days to apply the recurrence to. + :param byeaster: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each integer will define an offset from the + Easter Sunday. Passing the offset 0 to byeaster will yield the Easter + Sunday itself. This is an extension to the RFC specification. + :param byweekno: + If given, it must be either an integer, or a sequence of integers, + meaning the week numbers to apply the recurrence to. Week numbers + have the meaning described in ISO8601, that is, the first week of + the year is that containing at least four days of the new year. + :param byweekday: + If given, it must be either an integer (0 == MO), a sequence of + integers, one of the weekday constants (MO, TU, etc), or a sequence + of these constants. When given, these variables will define the + weekdays where the recurrence will be applied. It's also possible to + use an argument n for the weekday instances, which will mean the nth + occurrence of this weekday in the period. For example, with MONTHLY, + or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the + first friday of the month where the recurrence happens. Notice that in + the RFC documentation, this is specified as BYDAY, but was renamed to + avoid the ambiguity of that keyword. + :param byhour: + If given, it must be either an integer, or a sequence of integers, + meaning the hours to apply the recurrence to. + :param byminute: + If given, it must be either an integer, or a sequence of integers, + meaning the minutes to apply the recurrence to. + :param bysecond: + If given, it must be either an integer, or a sequence of integers, + meaning the seconds to apply the recurrence to. + :param cache: + If given, it must be a boolean value specifying to enable or disable + caching of results. If you will use the same rrule instance multiple + times, enabling caching will improve the performance considerably. + """ + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + super(rrule, self).__init__(cache) + global easter + if not dtstart: + if until and until.tzinfo: + dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0) + else: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + + # Cache the original byxxx rules, if they are provided, as the _byxxx + # attributes do not necessarily map to the inputs, and this can be + # a problem in generating the strings. Only store things if they've + # been supplied (the string retrieval will just use .get()) + self._original_rule = {} + + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + + if self._dtstart and self._until: + if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None): + # According to RFC5545 Section 3.3.10: + # https://tools.ietf.org/html/rfc5545#section-3.3.10 + # + # > If the "DTSTART" property is specified as a date with UTC + # > time or a date with local time and time zone reference, + # > then the UNTIL rule part MUST be specified as a date with + # > UTC time. + raise ValueError( + 'RRULE UNTIL values must be specified in UTC when DTSTART ' + 'is timezone-aware' + ) + + if count is not None and until: + warn("Using both 'count' and 'until' is inconsistent with RFC 5545" + " and has been deprecated in dateutil. Future versions will " + "raise an error.", DeprecationWarning) + + if wkst is None: + self._wkst = calendar.firstweekday() + elif isinstance(wkst, integer_types): + self._wkst = wkst + else: + self._wkst = wkst.weekday + + if bysetpos is None: + self._bysetpos = None + elif isinstance(bysetpos, integer_types): + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + + if self._bysetpos: + self._original_rule['bysetpos'] = self._bysetpos + + if (byweekno is None and byyearday is None and bymonthday is None and + byweekday is None and byeaster is None): + if freq == YEARLY: + if bymonth is None: + bymonth = dtstart.month + self._original_rule['bymonth'] = None + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == MONTHLY: + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == WEEKLY: + byweekday = dtstart.weekday() + self._original_rule['byweekday'] = None + + # bymonth + if bymonth is None: + self._bymonth = None + else: + if isinstance(bymonth, integer_types): + bymonth = (bymonth,) + + self._bymonth = tuple(sorted(set(bymonth))) + + if 'bymonth' not in self._original_rule: + self._original_rule['bymonth'] = self._bymonth + + # byyearday + if byyearday is None: + self._byyearday = None + else: + if isinstance(byyearday, integer_types): + byyearday = (byyearday,) + + self._byyearday = tuple(sorted(set(byyearday))) + self._original_rule['byyearday'] = self._byyearday + + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if isinstance(byeaster, integer_types): + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(sorted(byeaster)) + + self._original_rule['byeaster'] = self._byeaster + else: + self._byeaster = None + + # bymonthday + if bymonthday is None: + self._bymonthday = () + self._bynmonthday = () + else: + if isinstance(bymonthday, integer_types): + bymonthday = (bymonthday,) + + bymonthday = set(bymonthday) # Ensure it's unique + + self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0)) + self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0)) + + # Storing positive numbers first, then negative numbers + if 'bymonthday' not in self._original_rule: + self._original_rule['bymonthday'] = tuple( + itertools.chain(self._bymonthday, self._bynmonthday)) + + # byweekno + if byweekno is None: + self._byweekno = None + else: + if isinstance(byweekno, integer_types): + byweekno = (byweekno,) + + self._byweekno = tuple(sorted(set(byweekno))) + + self._original_rule['byweekno'] = self._byweekno + + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + else: + # If it's one of the valid non-sequence types, convert to a + # single-element sequence before the iterator that builds the + # byweekday set. + if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): + byweekday = (byweekday,) + + self._byweekday = set() + self._bynweekday = set() + for wday in byweekday: + if isinstance(wday, integer_types): + self._byweekday.add(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.add(wday.weekday) + else: + self._bynweekday.add((wday.weekday, wday.n)) + + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + + if self._byweekday is not None: + self._byweekday = tuple(sorted(self._byweekday)) + orig_byweekday = [weekday(x) for x in self._byweekday] + else: + orig_byweekday = () + + if self._bynweekday is not None: + self._bynweekday = tuple(sorted(self._bynweekday)) + orig_bynweekday = [weekday(*x) for x in self._bynweekday] + else: + orig_bynweekday = () + + if 'byweekday' not in self._original_rule: + self._original_rule['byweekday'] = tuple(itertools.chain( + orig_byweekday, orig_bynweekday)) + + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = {dtstart.hour} + else: + self._byhour = None + else: + if isinstance(byhour, integer_types): + byhour = (byhour,) + + if freq == HOURLY: + self._byhour = self.__construct_byset(start=dtstart.hour, + byxxx=byhour, + base=24) + else: + self._byhour = set(byhour) + + self._byhour = tuple(sorted(self._byhour)) + self._original_rule['byhour'] = self._byhour + + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = {dtstart.minute} + else: + self._byminute = None + else: + if isinstance(byminute, integer_types): + byminute = (byminute,) + + if freq == MINUTELY: + self._byminute = self.__construct_byset(start=dtstart.minute, + byxxx=byminute, + base=60) + else: + self._byminute = set(byminute) + + self._byminute = tuple(sorted(self._byminute)) + self._original_rule['byminute'] = self._byminute + + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = ((dtstart.second,)) + else: + self._bysecond = None + else: + if isinstance(bysecond, integer_types): + bysecond = (bysecond,) + + self._bysecond = set(bysecond) + + if freq == SECONDLY: + self._bysecond = self.__construct_byset(start=dtstart.second, + byxxx=bysecond, + base=60) + else: + self._bysecond = set(bysecond) + + self._bysecond = tuple(sorted(self._bysecond)) + self._original_rule['bysecond'] = self._bysecond + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def __str__(self): + """ + Output a string that would generate this RRULE if passed to rrulestr. + This is mostly compatible with RFC5545, except for the + dateutil-specific extension BYEASTER. + """ + + output = [] + h, m, s = [None] * 3 + if self._dtstart: + output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) + h, m, s = self._dtstart.timetuple()[3:6] + + parts = ['FREQ=' + FREQNAMES[self._freq]] + if self._interval != 1: + parts.append('INTERVAL=' + str(self._interval)) + + if self._wkst: + parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) + + if self._count is not None: + parts.append('COUNT=' + str(self._count)) + + if self._until: + parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) + + if self._original_rule.get('byweekday') is not None: + # The str() method on weekday objects doesn't generate + # RFC5545-compliant strings, so we should modify that. + original_rule = dict(self._original_rule) + wday_strings = [] + for wday in original_rule['byweekday']: + if wday.n: + wday_strings.append('{n:+d}{wday}'.format( + n=wday.n, + wday=repr(wday)[0:2])) + else: + wday_strings.append(repr(wday)) + + original_rule['byweekday'] = wday_strings + else: + original_rule = self._original_rule + + partfmt = '{name}={vals}' + for name, key in [('BYSETPOS', 'bysetpos'), + ('BYMONTH', 'bymonth'), + ('BYMONTHDAY', 'bymonthday'), + ('BYYEARDAY', 'byyearday'), + ('BYWEEKNO', 'byweekno'), + ('BYDAY', 'byweekday'), + ('BYHOUR', 'byhour'), + ('BYMINUTE', 'byminute'), + ('BYSECOND', 'bysecond'), + ('BYEASTER', 'byeaster')]: + value = original_rule.get(key) + if value: + parts.append(partfmt.format(name=name, vals=(','.join(str(v) + for v in value)))) + + output.append('RRULE:' + ';'.join(parts)) + return '\n'.join(output) + + def replace(self, **kwargs): + """Return new rrule with same attributes except for those attributes given new + values by whichever keyword arguments are specified.""" + new_kwargs = {"interval": self._interval, + "count": self._count, + "dtstart": self._dtstart, + "freq": self._freq, + "until": self._until, + "wkst": self._wkst, + "cache": False if self._cache is None else True } + new_kwargs.update(self._original_rule) + new_kwargs.update(kwargs) + return rrule(**new_kwargs) + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY: ii.ydayset, + MONTHLY: ii.mdayset, + WEEKLY: ii.wdayset, + DAILY: ii.ddayset, + HOURLY: ii.ddayset, + MINUTELY: ii.ddayset, + SECONDLY: ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY: ii.htimeset, + MINUTELY: ii.mtimeset, + SECONDLY: ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday and + -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and + -ii.nextyearlen+i-ii.yearlen not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + total += 1 + yield res + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal + i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + + total += 1 + yield res + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + + if byhour: + ndays, hour = self.__mod_distance(value=hour, + byxxx=self._byhour, + base=24) + else: + ndays, hour = divmod(hour+interval, 24) + + if ndays: + day += ndays + fixday = True + + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + + valid = False + rep_rate = (24*60) + for j in range(rep_rate // gcd(interval, rep_rate)): + if byminute: + nhours, minute = \ + self.__mod_distance(value=minute, + byxxx=self._byminute, + base=60) + else: + nhours, minute = divmod(minute+interval, 60) + + div, hour = divmod(hour+nhours, 24) + if div: + day += div + fixday = True + filtered = False + + if not byhour or hour in byhour: + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval and ' + + 'byhour resulting in empty rule.') + + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399 - (hour * 3600 + minute * 60 + second)) + // interval) * interval) + + rep_rate = (24 * 3600) + valid = False + for j in range(0, rep_rate // gcd(interval, rep_rate)): + if bysecond: + nminutes, second = \ + self.__mod_distance(value=second, + byxxx=self._bysecond, + base=60) + else: + nminutes, second = divmod(second+interval, 60) + + div, minute = divmod(minute+nminutes, 60) + if div: + hour += div + div, hour = divmod(hour, 24) + if div: + day += div + fixday = True + + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval, ' + + 'byhour and byminute resulting in empty' + + ' rule.') + + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + + def __construct_byset(self, start, byxxx, base): + """ + If a `BYXXX` sequence is passed to the constructor at the same level as + `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some + specifications which cannot be reached given some starting conditions. + + This occurs whenever the interval is not coprime with the base of a + given unit and the difference between the starting position and the + ending position is not coprime with the greatest common denominator + between the interval and the base. For example, with a FREQ of hourly + starting at 17:00 and an interval of 4, the only valid values for + BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not + coprime. + + :param start: + Specifies the starting position. + :param byxxx: + An iterable containing the list of allowed values. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + This does not preserve the type of the iterable, returning a set, since + the values should be unique and the order is irrelevant, this will + speed up later lookups. + + In the event of an empty set, raises a :exception:`ValueError`, as this + results in an empty rrule. + """ + + cset = set() + + # Support a single byxxx value. + if isinstance(byxxx, integer_types): + byxxx = (byxxx, ) + + for num in byxxx: + i_gcd = gcd(self._interval, base) + # Use divmod rather than % because we need to wrap negative nums. + if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: + cset.add(num) + + if len(cset) == 0: + raise ValueError("Invalid rrule byxxx generates an empty set.") + + return cset + + def __mod_distance(self, value, byxxx, base): + """ + Calculates the next value in a sequence where the `FREQ` parameter is + specified along with a `BYXXX` parameter at the same "level" + (e.g. `HOURLY` specified with `BYHOUR`). + + :param value: + The old value of the component. + :param byxxx: + The `BYXXX` set, which should have been generated by + `rrule._construct_byset`, or something else which checks that a + valid rule is present. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + If a valid value is not found after `base` iterations (the maximum + number before the sequence would start to repeat), this raises a + :exception:`ValueError`, as no valid values were found. + + This returns a tuple of `divmod(n*interval, base)`, where `n` is the + smallest number of `interval` repetitions until the next specified + value in `byxxx` is found. + """ + accumulator = 0 + for ii in range(1, base + 1): + # Using divmod() over % to account for negative intervals + div, value = divmod(value + self._interval, base) + accumulator += div + if value in byxxx: + return (accumulator, value) + + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365 + calendar.isleap(year) + self.nextyearlen = 365 + calendar.isleap(year + 1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1, 1, 1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst) % 7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen + + (lyearweekday-rr._wkst) % 7) % 7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and (month != self.lastmonth or + year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday) % 7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday) % 7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return list(range(self.yearlen)), 0, self.yearlen + + def mdayset(self, year, month, day): + dset = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + dset[i] = i + return dset, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + dset = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + dset[i] = i + i += 1 + # if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return dset, start, i + + def ddayset(self, year, month, day): + dset = [None] * self.yearlen + i = datetime.date(year, month, day).toordinal() - self.yearordinal + dset[i] = i + return dset, i, i + 1 + + def htimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def mtimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + """ The rruleset type allows more complex recurrence setups, mixing + multiple rules, dates, exclusion rules, and exclusion dates. The type + constructor takes the following keyword arguments: + + :param cache: If True, caching of results will be enabled, improving + performance of multiple queries considerably. """ + + class _genitem(object): + def __init__(self, genlist, gen): + try: + self.dt = advance_iterator(gen) + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def __next__(self): + try: + self.dt = advance_iterator(self.gen) + except StopIteration: + if self.genlist[0] is self: + heapq.heappop(self.genlist) + else: + self.genlist.remove(self) + heapq.heapify(self.genlist) + + next = __next__ + + def __lt__(self, other): + return self.dt < other.dt + + def __gt__(self, other): + return self.dt > other.dt + + def __eq__(self, other): + return self.dt == other.dt + + def __ne__(self, other): + return self.dt != other.dt + + def __init__(self, cache=False): + super(rruleset, self).__init__(cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + @_invalidates_cache + def rrule(self, rrule): + """ Include the given :py:class:`rrule` instance in the recurrence set + generation. """ + self._rrule.append(rrule) + + @_invalidates_cache + def rdate(self, rdate): + """ Include the given :py:class:`datetime` instance in the recurrence + set generation. """ + self._rdate.append(rdate) + + @_invalidates_cache + def exrule(self, exrule): + """ Include the given rrule instance in the recurrence set exclusion + list. Dates which are part of the given recurrence rules will not + be generated, even if some inclusive rrule or rdate matches them. + """ + self._exrule.append(exrule) + + @_invalidates_cache + def exdate(self, exdate): + """ Include the given datetime instance in the recurrence set + exclusion list. Dates included that way will not be generated, + even if some inclusive rrule or rdate matches them. """ + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate)) + for gen in [iter(x) for x in self._rrule]: + self._genitem(rlist, gen) + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate)) + for gen in [iter(x) for x in self._exrule]: + self._genitem(exlist, gen) + lastdt = None + total = 0 + heapq.heapify(rlist) + heapq.heapify(exlist) + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exitem = exlist[0] + advance_iterator(exitem) + if exlist and exlist[0] is exitem: + heapq.heapreplace(exlist, exitem) + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + advance_iterator(ritem) + if rlist and rlist[0] is ritem: + heapq.heapreplace(rlist, ritem) + self._len = total + + +class _rrulestr(object): + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, + "FR": 4, "SA": 5, "SU": 6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError("invalid until date") + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): + """ + Two ways to specify this: +1MO or MO(+1) + """ + l = [] + for wday in value.split(','): + if '(' in wday: + # If it's of the form TH(+1), etc. + splt = wday.split('(') + w = splt[0] + n = int(splt[1][:-1]) + elif len(wday): + # If it's of the form +1MO + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: + n = int(n) + else: + raise ValueError("Invalid (empty) BYDAY specification.") + + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError("unknown parameter name") + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError("unknown parameter '%s'" % name) + except (KeyError, ValueError): + raise ValueError("invalid '%s': %s" % (name, value)) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzids=None, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + + TZID_NAMES = dict(map( + lambda x: (x.upper(), x), + re.findall('TZID=(?P<name>[^:]+):', s) + )) + s = s.upper() + if not s.strip(): + raise ValueError("empty string") + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and (s.find(':') == -1 or + s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError("unsupported RRULE parm: "+parm) + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError("unsupported EXRULE parm: "+parm) + exrulevals.append(value) + elif name == "EXDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported EXDATE parm: "+parm) + exdatevals.append(value) + elif name == "DTSTART": + # RFC 5445 3.8.2.4: The VALUE parameter is optional, but + # may be found only once. + value_found = False + TZID = None + valid_values = {"VALUE=DATE-TIME", "VALUE=DATE"} + for parm in parms: + if parm.startswith("TZID="): + try: + tzkey = TZID_NAMES[parm.split('TZID=')[-1]] + except KeyError: + continue + if tzids is None: + from . import tz + tzlookup = tz.gettz + elif callable(tzids): + tzlookup = tzids + else: + tzlookup = getattr(tzids, 'get', None) + if tzlookup is None: + msg = ('tzids must be a callable, ' + + 'mapping, or None, ' + + 'not %s' % tzids) + raise ValueError(msg) + + TZID = tzlookup(tzkey) + continue + if parm not in valid_values: + raise ValueError("unsupported DTSTART parm: "+parm) + else: + if value_found: + msg = ("Duplicate value parameter found in " + + "DTSTART: " + parm) + raise ValueError(msg) + value_found = True + if not parser: + from dateutil import parser + dtstart = parser.parse(value, ignoretz=ignoretz, + tzinfos=tzinfos) + if TZID is not None: + if dtstart.tzinfo is None: + dtstart = dtstart.replace(tzinfo=TZID) + else: + raise ValueError('DTSTART specifies multiple timezones') + else: + raise ValueError("unsupported property: "+name) + if (forceset or len(rrulevals) > 1 or rdatevals + or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + rset = rruleset(cache=cache) + for value in rrulevals: + rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + rset.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + for datestr in value.split(','): + rset.exdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + if compatible and dtstart: + rset.rdate(dtstart) + return rset + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/libraries/dateutil/test/__init__.py b/libraries/dateutil/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/libraries/dateutil/test/_common.py b/libraries/dateutil/test/_common.py new file mode 100644 index 00000000..264dfbda --- /dev/null +++ b/libraries/dateutil/test/_common.py @@ -0,0 +1,275 @@ +from __future__ import unicode_literals +import os +import time +import subprocess +import warnings +import tempfile +import pickle + + +class WarningTestMixin(object): + # Based on https://stackoverflow.com/a/12935176/467366 + class _AssertWarnsContext(warnings.catch_warnings): + def __init__(self, expected_warnings, parent, **kwargs): + super(WarningTestMixin._AssertWarnsContext, self).__init__(**kwargs) + + self.parent = parent + try: + self.expected_warnings = list(expected_warnings) + except TypeError: + self.expected_warnings = [expected_warnings] + + self._warning_log = [] + + def __enter__(self, *args, **kwargs): + rv = super(WarningTestMixin._AssertWarnsContext, self).__enter__(*args, **kwargs) + + if self._showwarning is not self._module.showwarning: + super_showwarning = self._module.showwarning + else: + super_showwarning = None + + def showwarning(*args, **kwargs): + if super_showwarning is not None: + super_showwarning(*args, **kwargs) + + self._warning_log.append(warnings.WarningMessage(*args, **kwargs)) + + self._module.showwarning = showwarning + return rv + + def __exit__(self, *args, **kwargs): + super(WarningTestMixin._AssertWarnsContext, self).__exit__(self, *args, **kwargs) + + self.parent.assertTrue(any(issubclass(item.category, warning) + for warning in self.expected_warnings + for item in self._warning_log)) + + def assertWarns(self, warning, callable=None, *args, **kwargs): + warnings.simplefilter('always') + context = self.__class__._AssertWarnsContext(warning, self) + if callable is None: + return context + else: + with context: + callable(*args, **kwargs) + + +class PicklableMixin(object): + def _get_nobj_bytes(self, obj, dump_kwargs, load_kwargs): + """ + Pickle and unpickle an object using ``pickle.dumps`` / ``pickle.loads`` + """ + pkl = pickle.dumps(obj, **dump_kwargs) + return pickle.loads(pkl, **load_kwargs) + + def _get_nobj_file(self, obj, dump_kwargs, load_kwargs): + """ + Pickle and unpickle an object using ``pickle.dump`` / ``pickle.load`` on + a temporary file. + """ + with tempfile.TemporaryFile('w+b') as pkl: + pickle.dump(obj, pkl, **dump_kwargs) + pkl.seek(0) # Reset the file to the beginning to read it + nobj = pickle.load(pkl, **load_kwargs) + + return nobj + + def assertPicklable(self, obj, singleton=False, asfile=False, + dump_kwargs=None, load_kwargs=None): + """ + Assert that an object can be pickled and unpickled. This assertion + assumes that the desired behavior is that the unpickled object compares + equal to the original object, but is not the same object. + """ + get_nobj = self._get_nobj_file if asfile else self._get_nobj_bytes + dump_kwargs = dump_kwargs or {} + load_kwargs = load_kwargs or {} + + nobj = get_nobj(obj, dump_kwargs, load_kwargs) + if not singleton: + self.assertIsNot(obj, nobj) + self.assertEqual(obj, nobj) + + +class TZContextBase(object): + """ + Base class for a context manager which allows changing of time zones. + + Subclasses may define a guard variable to either block or or allow time + zone changes by redefining ``_guard_var_name`` and ``_guard_allows_change``. + The default is that the guard variable must be affirmatively set. + + Subclasses must define ``get_current_tz`` and ``set_current_tz``. + """ + _guard_var_name = "DATEUTIL_MAY_CHANGE_TZ" + _guard_allows_change = True + + def __init__(self, tzval): + self.tzval = tzval + self._old_tz = None + + @classmethod + def tz_change_allowed(cls): + """ + Class method used to query whether or not this class allows time zone + changes. + """ + guard = bool(os.environ.get(cls._guard_var_name, False)) + + # _guard_allows_change gives the "default" behavior - if True, the + # guard is overcoming a block. If false, the guard is causing a block. + # Whether tz_change is allowed is therefore the XNOR of the two. + return guard == cls._guard_allows_change + + @classmethod + def tz_change_disallowed_message(cls): + """ Generate instructions on how to allow tz changes """ + msg = ('Changing time zone not allowed. Set {envar} to {gval} ' + 'if you would like to allow this behavior') + + return msg.format(envar=cls._guard_var_name, + gval=cls._guard_allows_change) + + def __enter__(self): + if not self.tz_change_allowed(): + raise ValueError(self.tz_change_disallowed_message()) + + self._old_tz = self.get_current_tz() + self.set_current_tz(self.tzval) + + def __exit__(self, type, value, traceback): + if self._old_tz is not None: + self.set_current_tz(self._old_tz) + + self._old_tz = None + + def get_current_tz(self): + raise NotImplementedError + + def set_current_tz(self): + raise NotImplementedError + + +class TZEnvContext(TZContextBase): + """ + Context manager that temporarily sets the `TZ` variable (for use on + *nix-like systems). Because the effect is local to the shell anyway, this + will apply *unless* a guard is set. + + If you do not want the TZ environment variable set, you may set the + ``DATEUTIL_MAY_NOT_CHANGE_TZ_VAR`` variable to a truthy value. + """ + _guard_var_name = "DATEUTIL_MAY_NOT_CHANGE_TZ_VAR" + _guard_allows_change = False + + def get_current_tz(self): + return os.environ.get('TZ', UnsetTz) + + def set_current_tz(self, tzval): + if tzval is UnsetTz and 'TZ' in os.environ: + del os.environ['TZ'] + else: + os.environ['TZ'] = tzval + + time.tzset() + + +class TZWinContext(TZContextBase): + """ + Context manager for changing local time zone on Windows. + + Because the effect of this is system-wide and global, it may have + unintended side effect. Set the ``DATEUTIL_MAY_CHANGE_TZ`` environment + variable to a truthy value before using this context manager. + """ + def get_current_tz(self): + p = subprocess.Popen(['tzutil', '/g'], stdout=subprocess.PIPE) + + ctzname, err = p.communicate() + ctzname = ctzname.decode() # Popen returns + + if p.returncode: + raise OSError('Failed to get current time zone: ' + err) + + return ctzname + + def set_current_tz(self, tzname): + p = subprocess.Popen('tzutil /s "' + tzname + '"') + + out, err = p.communicate() + + if p.returncode: + raise OSError('Failed to set current time zone: ' + + (err or 'Unknown error.')) + + +### +# Utility classes +class NotAValueClass(object): + """ + A class analogous to NaN that has operations defined for any type. + """ + def _op(self, other): + return self # Operation with NotAValue returns NotAValue + + def _cmp(self, other): + return False + + __add__ = __radd__ = _op + __sub__ = __rsub__ = _op + __mul__ = __rmul__ = _op + __div__ = __rdiv__ = _op + __truediv__ = __rtruediv__ = _op + __floordiv__ = __rfloordiv__ = _op + + __lt__ = __rlt__ = _op + __gt__ = __rgt__ = _op + __eq__ = __req__ = _op + __le__ = __rle__ = _op + __ge__ = __rge__ = _op + + +NotAValue = NotAValueClass() + + +class ComparesEqualClass(object): + """ + A class that is always equal to whatever you compare it to. + """ + + def __eq__(self, other): + return True + + def __ne__(self, other): + return False + + def __le__(self, other): + return True + + def __ge__(self, other): + return True + + def __lt__(self, other): + return False + + def __gt__(self, other): + return False + + __req__ = __eq__ + __rne__ = __ne__ + __rle__ = __le__ + __rge__ = __ge__ + __rlt__ = __lt__ + __rgt__ = __gt__ + + +ComparesEqual = ComparesEqualClass() + + +class UnsetTzClass(object): + """ Sentinel class for unset time zone variable """ + pass + + +UnsetTz = UnsetTzClass() diff --git a/libraries/dateutil/test/property/test_isoparse_prop.py b/libraries/dateutil/test/property/test_isoparse_prop.py new file mode 100644 index 00000000..c6a4b82a --- /dev/null +++ b/libraries/dateutil/test/property/test_isoparse_prop.py @@ -0,0 +1,27 @@ +from hypothesis import given, assume +from hypothesis import strategies as st + +from dateutil import tz +from dateutil.parser import isoparse + +import pytest + +# Strategies +TIME_ZONE_STRATEGY = st.sampled_from([None, tz.tzutc()] + + [tz.gettz(zname) for zname in ('US/Eastern', 'US/Pacific', + 'Australia/Sydney', 'Europe/London')]) +ASCII_STRATEGY = st.characters(max_codepoint=127) + + +@pytest.mark.isoparser +@given(dt=st.datetimes(timezones=TIME_ZONE_STRATEGY), sep=ASCII_STRATEGY) +def test_timespec_auto(dt, sep): + if dt.tzinfo is not None: + # Assume offset has no sub-second components + assume(dt.utcoffset().total_seconds() % 60 == 0) + + sep = str(sep) # Python 2.7 requires bytes + dtstr = dt.isoformat(sep=sep) + dt_rt = isoparse(dtstr) + + assert dt_rt == dt diff --git a/libraries/dateutil/test/property/test_parser_prop.py b/libraries/dateutil/test/property/test_parser_prop.py new file mode 100644 index 00000000..fdfd171e --- /dev/null +++ b/libraries/dateutil/test/property/test_parser_prop.py @@ -0,0 +1,22 @@ +from hypothesis.strategies import integers +from hypothesis import given + +import pytest + +from dateutil.parser import parserinfo + + +@pytest.mark.parserinfo +@given(integers(min_value=100, max_value=9999)) +def test_convertyear(n): + assert n == parserinfo().convertyear(n) + + +@pytest.mark.parserinfo +@given(integers(min_value=-50, + max_value=49)) +def test_convertyear_no_specified_century(n): + p = parserinfo() + new_year = p._year + n + result = p.convertyear(new_year % 100, century_specified=False) + assert result == new_year diff --git a/libraries/dateutil/test/test_easter.py b/libraries/dateutil/test/test_easter.py new file mode 100644 index 00000000..eeb094ee --- /dev/null +++ b/libraries/dateutil/test/test_easter.py @@ -0,0 +1,95 @@ +from dateutil.easter import easter +from dateutil.easter import EASTER_WESTERN, EASTER_ORTHODOX, EASTER_JULIAN + +from datetime import date +import unittest + +# List of easters between 1990 and 2050 +western_easter_dates = [ + date(1990, 4, 15), date(1991, 3, 31), date(1992, 4, 19), date(1993, 4, 11), + date(1994, 4, 3), date(1995, 4, 16), date(1996, 4, 7), date(1997, 3, 30), + date(1998, 4, 12), date(1999, 4, 4), + + date(2000, 4, 23), date(2001, 4, 15), date(2002, 3, 31), date(2003, 4, 20), + date(2004, 4, 11), date(2005, 3, 27), date(2006, 4, 16), date(2007, 4, 8), + date(2008, 3, 23), date(2009, 4, 12), + + date(2010, 4, 4), date(2011, 4, 24), date(2012, 4, 8), date(2013, 3, 31), + date(2014, 4, 20), date(2015, 4, 5), date(2016, 3, 27), date(2017, 4, 16), + date(2018, 4, 1), date(2019, 4, 21), + + date(2020, 4, 12), date(2021, 4, 4), date(2022, 4, 17), date(2023, 4, 9), + date(2024, 3, 31), date(2025, 4, 20), date(2026, 4, 5), date(2027, 3, 28), + date(2028, 4, 16), date(2029, 4, 1), + + date(2030, 4, 21), date(2031, 4, 13), date(2032, 3, 28), date(2033, 4, 17), + date(2034, 4, 9), date(2035, 3, 25), date(2036, 4, 13), date(2037, 4, 5), + date(2038, 4, 25), date(2039, 4, 10), + + date(2040, 4, 1), date(2041, 4, 21), date(2042, 4, 6), date(2043, 3, 29), + date(2044, 4, 17), date(2045, 4, 9), date(2046, 3, 25), date(2047, 4, 14), + date(2048, 4, 5), date(2049, 4, 18), date(2050, 4, 10) + ] + +orthodox_easter_dates = [ + date(1990, 4, 15), date(1991, 4, 7), date(1992, 4, 26), date(1993, 4, 18), + date(1994, 5, 1), date(1995, 4, 23), date(1996, 4, 14), date(1997, 4, 27), + date(1998, 4, 19), date(1999, 4, 11), + + date(2000, 4, 30), date(2001, 4, 15), date(2002, 5, 5), date(2003, 4, 27), + date(2004, 4, 11), date(2005, 5, 1), date(2006, 4, 23), date(2007, 4, 8), + date(2008, 4, 27), date(2009, 4, 19), + + date(2010, 4, 4), date(2011, 4, 24), date(2012, 4, 15), date(2013, 5, 5), + date(2014, 4, 20), date(2015, 4, 12), date(2016, 5, 1), date(2017, 4, 16), + date(2018, 4, 8), date(2019, 4, 28), + + date(2020, 4, 19), date(2021, 5, 2), date(2022, 4, 24), date(2023, 4, 16), + date(2024, 5, 5), date(2025, 4, 20), date(2026, 4, 12), date(2027, 5, 2), + date(2028, 4, 16), date(2029, 4, 8), + + date(2030, 4, 28), date(2031, 4, 13), date(2032, 5, 2), date(2033, 4, 24), + date(2034, 4, 9), date(2035, 4, 29), date(2036, 4, 20), date(2037, 4, 5), + date(2038, 4, 25), date(2039, 4, 17), + + date(2040, 5, 6), date(2041, 4, 21), date(2042, 4, 13), date(2043, 5, 3), + date(2044, 4, 24), date(2045, 4, 9), date(2046, 4, 29), date(2047, 4, 21), + date(2048, 4, 5), date(2049, 4, 25), date(2050, 4, 17) +] + +# A random smattering of Julian dates. +# Pulled values from http://www.kevinlaughery.com/east4099.html +julian_easter_dates = [ + date( 326, 4, 3), date( 375, 4, 5), date( 492, 4, 5), date( 552, 3, 31), + date( 562, 4, 9), date( 569, 4, 21), date( 597, 4, 14), date( 621, 4, 19), + date( 636, 3, 31), date( 655, 3, 29), date( 700, 4, 11), date( 725, 4, 8), + date( 750, 3, 29), date( 782, 4, 7), date( 835, 4, 18), date( 849, 4, 14), + date( 867, 3, 30), date( 890, 4, 12), date( 922, 4, 21), date( 934, 4, 6), + date(1049, 3, 26), date(1058, 4, 19), date(1113, 4, 6), date(1119, 3, 30), + date(1242, 4, 20), date(1255, 3, 28), date(1257, 4, 8), date(1258, 3, 24), + date(1261, 4, 24), date(1278, 4, 17), date(1333, 4, 4), date(1351, 4, 17), + date(1371, 4, 6), date(1391, 3, 26), date(1402, 3, 26), date(1412, 4, 3), + date(1439, 4, 5), date(1445, 3, 28), date(1531, 4, 9), date(1555, 4, 14) +] + + +class EasterTest(unittest.TestCase): + def testEasterWestern(self): + for easter_date in western_easter_dates: + self.assertEqual(easter_date, + easter(easter_date.year, EASTER_WESTERN)) + + def testEasterOrthodox(self): + for easter_date in orthodox_easter_dates: + self.assertEqual(easter_date, + easter(easter_date.year, EASTER_ORTHODOX)) + + def testEasterJulian(self): + for easter_date in julian_easter_dates: + self.assertEqual(easter_date, + easter(easter_date.year, EASTER_JULIAN)) + + def testEasterBadMethod(self): + # Invalid methods raise ValueError + with self.assertRaises(ValueError): + easter(1975, 4) diff --git a/libraries/dateutil/test/test_import_star.py b/libraries/dateutil/test/test_import_star.py new file mode 100644 index 00000000..8e66f38a --- /dev/null +++ b/libraries/dateutil/test/test_import_star.py @@ -0,0 +1,33 @@ +"""Test for the "import *" functionality. + +As imort * can be only done at module level, it has been added in a separate file +""" +import unittest + +prev_locals = list(locals()) +from dateutil import * +new_locals = {name:value for name,value in locals().items() + if name not in prev_locals} +new_locals.pop('prev_locals') + +class ImportStarTest(unittest.TestCase): + """ Test that `from dateutil import *` adds the modules in __all__ locally""" + + def testImportedModules(self): + import dateutil.easter + import dateutil.parser + import dateutil.relativedelta + import dateutil.rrule + import dateutil.tz + import dateutil.utils + import dateutil.zoneinfo + + self.assertEquals(dateutil.easter, new_locals.pop("easter")) + self.assertEquals(dateutil.parser, new_locals.pop("parser")) + self.assertEquals(dateutil.relativedelta, new_locals.pop("relativedelta")) + self.assertEquals(dateutil.rrule, new_locals.pop("rrule")) + self.assertEquals(dateutil.tz, new_locals.pop("tz")) + self.assertEquals(dateutil.utils, new_locals.pop("utils")) + self.assertEquals(dateutil.zoneinfo, new_locals.pop("zoneinfo")) + + self.assertFalse(new_locals) diff --git a/libraries/dateutil/test/test_imports.py b/libraries/dateutil/test/test_imports.py new file mode 100644 index 00000000..2a19b62a --- /dev/null +++ b/libraries/dateutil/test/test_imports.py @@ -0,0 +1,166 @@ +import sys +import unittest + +class ImportVersionTest(unittest.TestCase): + """ Test that dateutil.__version__ can be imported""" + + def testImportVersionStr(self): + from dateutil import __version__ + + def testImportRoot(self): + import dateutil + + self.assertTrue(hasattr(dateutil, '__version__')) + + +class ImportEasterTest(unittest.TestCase): + """ Test that dateutil.easter-related imports work properly """ + + def testEasterDirect(self): + import dateutil.easter + + def testEasterFrom(self): + from dateutil import easter + + def testEasterStar(self): + from dateutil.easter import easter + + +class ImportParserTest(unittest.TestCase): + """ Test that dateutil.parser-related imports work properly """ + def testParserDirect(self): + import dateutil.parser + + def testParserFrom(self): + from dateutil import parser + + def testParserAll(self): + # All interface + from dateutil.parser import parse + from dateutil.parser import parserinfo + + # Other public classes + from dateutil.parser import parser + + for var in (parse, parserinfo, parser): + self.assertIsNot(var, None) + + +class ImportRelativeDeltaTest(unittest.TestCase): + """ Test that dateutil.relativedelta-related imports work properly """ + def testRelativeDeltaDirect(self): + import dateutil.relativedelta + + def testRelativeDeltaFrom(self): + from dateutil import relativedelta + + def testRelativeDeltaAll(self): + from dateutil.relativedelta import relativedelta + from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU + + for var in (relativedelta, MO, TU, WE, TH, FR, SA, SU): + self.assertIsNot(var, None) + + # In the public interface but not in all + from dateutil.relativedelta import weekday + self.assertIsNot(weekday, None) + + +class ImportRRuleTest(unittest.TestCase): + """ Test that dateutil.rrule related imports work properly """ + def testRRuleDirect(self): + import dateutil.rrule + + def testRRuleFrom(self): + from dateutil import rrule + + def testRRuleAll(self): + from dateutil.rrule import rrule + from dateutil.rrule import rruleset + from dateutil.rrule import rrulestr + from dateutil.rrule import YEARLY, MONTHLY, WEEKLY, DAILY + from dateutil.rrule import HOURLY, MINUTELY, SECONDLY + from dateutil.rrule import MO, TU, WE, TH, FR, SA, SU + + rr_all = (rrule, rruleset, rrulestr, + YEARLY, MONTHLY, WEEKLY, DAILY, + HOURLY, MINUTELY, SECONDLY, + MO, TU, WE, TH, FR, SA, SU) + + for var in rr_all: + self.assertIsNot(var, None) + + # In the public interface but not in all + from dateutil.rrule import weekday + self.assertIsNot(weekday, None) + + +class ImportTZTest(unittest.TestCase): + """ Test that dateutil.tz related imports work properly """ + def testTzDirect(self): + import dateutil.tz + + def testTzFrom(self): + from dateutil import tz + + def testTzAll(self): + from dateutil.tz import tzutc + from dateutil.tz import tzoffset + from dateutil.tz import tzlocal + from dateutil.tz import tzfile + from dateutil.tz import tzrange + from dateutil.tz import tzstr + from dateutil.tz import tzical + from dateutil.tz import gettz + from dateutil.tz import tzwin + from dateutil.tz import tzwinlocal + from dateutil.tz import UTC + from dateutil.tz import datetime_ambiguous + from dateutil.tz import datetime_exists + from dateutil.tz import resolve_imaginary + + tz_all = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", + "tzstr", "tzical", "gettz", "datetime_ambiguous", + "datetime_exists", "resolve_imaginary", "UTC"] + + tz_all += ["tzwin", "tzwinlocal"] if sys.platform.startswith("win") else [] + lvars = locals() + + for var in tz_all: + self.assertIsNot(lvars[var], None) + +@unittest.skipUnless(sys.platform.startswith('win'), "Requires Windows") +class ImportTZWinTest(unittest.TestCase): + """ Test that dateutil.tzwin related imports work properly """ + def testTzwinDirect(self): + import dateutil.tzwin + + def testTzwinFrom(self): + from dateutil import tzwin + + def testTzwinStar(self): + from dateutil.tzwin import tzwin + from dateutil.tzwin import tzwinlocal + + tzwin_all = [tzwin, tzwinlocal] + + for var in tzwin_all: + self.assertIsNot(var, None) + + +class ImportZoneInfoTest(unittest.TestCase): + def testZoneinfoDirect(self): + import dateutil.zoneinfo + + def testZoneinfoFrom(self): + from dateutil import zoneinfo + + def testZoneinfoStar(self): + from dateutil.zoneinfo import gettz + from dateutil.zoneinfo import gettz_db_metadata + from dateutil.zoneinfo import rebuild + + zi_all = (gettz, gettz_db_metadata, rebuild) + + for var in zi_all: + self.assertIsNot(var, None) diff --git a/libraries/dateutil/test/test_internals.py b/libraries/dateutil/test/test_internals.py new file mode 100644 index 00000000..a64c5148 --- /dev/null +++ b/libraries/dateutil/test/test_internals.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +""" +Tests for implementation details, not necessarily part of the user-facing +API. + +The motivating case for these tests is #483, where we want to smoke-test +code that may be difficult to reach through the standard API calls. +""" + +import unittest +import sys + +import pytest + +from dateutil.parser._parser import _ymd +from dateutil import tz + +IS_PY32 = sys.version_info[0:2] == (3, 2) + + +class TestYMD(unittest.TestCase): + + # @pytest.mark.smoke + def test_could_be_day(self): + ymd = _ymd('foo bar 124 baz') + + ymd.append(2, 'M') + assert ymd.has_month + assert not ymd.has_year + assert ymd.could_be_day(4) + assert not ymd.could_be_day(-6) + assert not ymd.could_be_day(32) + + # Assumes leapyear + assert ymd.could_be_day(29) + + ymd.append(1999) + assert ymd.has_year + assert not ymd.could_be_day(29) + + ymd.append(16, 'D') + assert ymd.has_day + assert not ymd.could_be_day(1) + + ymd = _ymd('foo bar 124 baz') + ymd.append(1999) + assert ymd.could_be_day(31) + + +### +# Test that private interfaces in _parser are deprecated properly +@pytest.mark.skipif(IS_PY32, reason='pytest.warns not supported on Python 3.2') +def test_parser_private_warns(): + from dateutil.parser import _timelex, _tzparser + from dateutil.parser import _parsetz + + with pytest.warns(DeprecationWarning): + _tzparser() + + with pytest.warns(DeprecationWarning): + _timelex('2014-03-03') + + with pytest.warns(DeprecationWarning): + _parsetz('+05:00') + + +@pytest.mark.skipif(IS_PY32, reason='pytest.warns not supported on Python 3.2') +def test_parser_parser_private_not_warns(): + from dateutil.parser._parser import _timelex, _tzparser + from dateutil.parser._parser import _parsetz + + with pytest.warns(None) as recorder: + _tzparser() + assert len(recorder) == 0 + + with pytest.warns(None) as recorder: + _timelex('2014-03-03') + + assert len(recorder) == 0 + + with pytest.warns(None) as recorder: + _parsetz('+05:00') + assert len(recorder) == 0 + + +@pytest.mark.tzstr +def test_tzstr_internal_timedeltas(): + with pytest.warns(tz.DeprecatedTzFormatWarning): + tz1 = tz.tzstr("EST5EDT,5,4,0,7200,11,-3,0,7200") + + with pytest.warns(tz.DeprecatedTzFormatWarning): + tz2 = tz.tzstr("EST5EDT,4,1,0,7200,10,-1,0,7200") + + assert tz1._start_delta != tz2._start_delta + assert tz1._end_delta != tz2._end_delta diff --git a/libraries/dateutil/test/test_isoparser.py b/libraries/dateutil/test/test_isoparser.py new file mode 100644 index 00000000..28c1bf76 --- /dev/null +++ b/libraries/dateutil/test/test_isoparser.py @@ -0,0 +1,482 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from datetime import datetime, timedelta, date, time +import itertools as it + +from dateutil.tz import tz +from dateutil.parser import isoparser, isoparse + +import pytest +import six + +UTC = tz.tzutc() + +def _generate_tzoffsets(limited): + def _mkoffset(hmtuple, fmt): + h, m = hmtuple + m_td = (-1 if h < 0 else 1) * m + + tzo = tz.tzoffset(None, timedelta(hours=h, minutes=m_td)) + return tzo, fmt.format(h, m) + + out = [] + if not limited: + # The subset that's just hours + hm_out_h = [(h, 0) for h in (-23, -5, 0, 5, 23)] + out.extend([_mkoffset(hm, '{:+03d}') for hm in hm_out_h]) + + # Ones that have hours and minutes + hm_out = [] + hm_out_h + hm_out += [(-12, 15), (11, 30), (10, 2), (5, 15), (-5, 30)] + else: + hm_out = [(-5, -0)] + + fmts = ['{:+03d}:{:02d}', '{:+03d}{:02d}'] + out += [_mkoffset(hm, fmt) for hm in hm_out for fmt in fmts] + + # Also add in UTC and naive + out.append((tz.tzutc(), 'Z')) + out.append((None, '')) + + return out + +FULL_TZOFFSETS = _generate_tzoffsets(False) +FULL_TZOFFSETS_AWARE = [x for x in FULL_TZOFFSETS if x[1]] +TZOFFSETS = _generate_tzoffsets(True) + +DATES = [datetime(1996, 1, 1), datetime(2017, 1, 1)] +@pytest.mark.parametrize('dt', tuple(DATES)) +def test_year_only(dt): + dtstr = dt.strftime('%Y') + + assert isoparse(dtstr) == dt + +DATES += [datetime(2000, 2, 1), datetime(2017, 4, 1)] +@pytest.mark.parametrize('dt', tuple(DATES)) +def test_year_month(dt): + fmt = '%Y-%m' + dtstr = dt.strftime(fmt) + + assert isoparse(dtstr) == dt + +DATES += [datetime(2016, 2, 29), datetime(2018, 3, 15)] +YMD_FMTS = ('%Y%m%d', '%Y-%m-%d') +@pytest.mark.parametrize('dt', tuple(DATES)) +@pytest.mark.parametrize('fmt', YMD_FMTS) +def test_year_month_day(dt, fmt): + dtstr = dt.strftime(fmt) + + assert isoparse(dtstr) == dt + +def _isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset, + microsecond_precision=None): + tzi, offset_str = tzoffset + fmt = date_fmt + 'T' + time_fmt + dt = dt.replace(tzinfo=tzi) + dtstr = dt.strftime(fmt) + + if microsecond_precision is not None: + if not fmt.endswith('%f'): + raise ValueError('Time format has no microseconds!') + + if microsecond_precision != 6: + dtstr = dtstr[:-(6 - microsecond_precision)] + elif microsecond_precision > 6: + raise ValueError('Precision must be 1-6') + + dtstr += offset_str + + assert isoparse(dtstr) == dt + +DATETIMES = [datetime(1998, 4, 16, 12), + datetime(2019, 11, 18, 23), + datetime(2014, 12, 16, 4)] +@pytest.mark.parametrize('dt', tuple(DATETIMES)) +@pytest.mark.parametrize('date_fmt', YMD_FMTS) +@pytest.mark.parametrize('tzoffset', TZOFFSETS) +def test_ymd_h(dt, date_fmt, tzoffset): + _isoparse_date_and_time(dt, date_fmt, '%H', tzoffset) + +DATETIMES = [datetime(2012, 1, 6, 9, 37)] +@pytest.mark.parametrize('dt', tuple(DATETIMES)) +@pytest.mark.parametrize('date_fmt', YMD_FMTS) +@pytest.mark.parametrize('time_fmt', ('%H%M', '%H:%M')) +@pytest.mark.parametrize('tzoffset', TZOFFSETS) +def test_ymd_hm(dt, date_fmt, time_fmt, tzoffset): + _isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset) + +DATETIMES = [datetime(2003, 9, 2, 22, 14, 2), + datetime(2003, 8, 8, 14, 9, 14), + datetime(2003, 4, 7, 6, 14, 59)] +HMS_FMTS = ('%H%M%S', '%H:%M:%S') +@pytest.mark.parametrize('dt', tuple(DATETIMES)) +@pytest.mark.parametrize('date_fmt', YMD_FMTS) +@pytest.mark.parametrize('time_fmt', HMS_FMTS) +@pytest.mark.parametrize('tzoffset', TZOFFSETS) +def test_ymd_hms(dt, date_fmt, time_fmt, tzoffset): + _isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset) + +DATETIMES = [datetime(2017, 11, 27, 6, 14, 30, 123456)] +@pytest.mark.parametrize('dt', tuple(DATETIMES)) +@pytest.mark.parametrize('date_fmt', YMD_FMTS) +@pytest.mark.parametrize('time_fmt', (x + '.%f' for x in HMS_FMTS)) +@pytest.mark.parametrize('tzoffset', TZOFFSETS) +@pytest.mark.parametrize('precision', list(range(3, 7))) +def test_ymd_hms_micro(dt, date_fmt, time_fmt, tzoffset, precision): + # Truncate the microseconds to the desired precision for the representation + dt = dt.replace(microsecond=int(round(dt.microsecond, precision-6))) + + _isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset, precision) + +@pytest.mark.parametrize('tzoffset', FULL_TZOFFSETS) +def test_full_tzoffsets(tzoffset): + dt = datetime(2017, 11, 27, 6, 14, 30, 123456) + date_fmt = '%Y-%m-%d' + time_fmt = '%H:%M:%S.%f' + + _isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset) + +@pytest.mark.parametrize('dt_str', [ + '2014-04-11T00', + '2014-04-11T24', + '2014-04-11T00:00', + '2014-04-11T24:00', + '2014-04-11T00:00:00', + '2014-04-11T24:00:00', + '2014-04-11T00:00:00.000', + '2014-04-11T24:00:00.000', + '2014-04-11T00:00:00.000000', + '2014-04-11T24:00:00.000000'] +) +def test_datetime_midnight(dt_str): + assert isoparse(dt_str) == datetime(2014, 4, 11, 0, 0, 0, 0) + +@pytest.mark.parametrize('datestr', [ + '2014-01-01', + '20140101', +]) +@pytest.mark.parametrize('sep', [' ', 'a', 'T', '_', '-']) +def test_isoparse_sep_none(datestr, sep): + isostr = datestr + sep + '14:33:09' + assert isoparse(isostr) == datetime(2014, 1, 1, 14, 33, 9) + +## +# Uncommon date formats +TIME_ARGS = ('time_args', + ((None, time(0), None), ) + tuple(('%H:%M:%S.%f', _t, _tz) + for _t, _tz in it.product([time(0), time(9, 30), time(14, 47)], + TZOFFSETS))) + +@pytest.mark.parametrize('isocal,dt_expected',[ + ((2017, 10), datetime(2017, 3, 6)), + ((2020, 1), datetime(2019, 12, 30)), # ISO year != Cal year + ((2004, 53), datetime(2004, 12, 27)), # Only half the week is in 2014 +]) +def test_isoweek(isocal, dt_expected): + # TODO: Figure out how to parametrize this on formats, too + for fmt in ('{:04d}-W{:02d}', '{:04d}W{:02d}'): + dtstr = fmt.format(*isocal) + assert isoparse(dtstr) == dt_expected + +@pytest.mark.parametrize('isocal,dt_expected',[ + ((2016, 13, 7), datetime(2016, 4, 3)), + ((2004, 53, 7), datetime(2005, 1, 2)), # ISO year != Cal year + ((2009, 1, 2), datetime(2008, 12, 30)), # ISO year < Cal year + ((2009, 53, 6), datetime(2010, 1, 2)) # ISO year > Cal year +]) +def test_isoweek_day(isocal, dt_expected): + # TODO: Figure out how to parametrize this on formats, too + for fmt in ('{:04d}-W{:02d}-{:d}', '{:04d}W{:02d}{:d}'): + dtstr = fmt.format(*isocal) + assert isoparse(dtstr) == dt_expected + +@pytest.mark.parametrize('isoord,dt_expected', [ + ((2004, 1), datetime(2004, 1, 1)), + ((2016, 60), datetime(2016, 2, 29)), + ((2017, 60), datetime(2017, 3, 1)), + ((2016, 366), datetime(2016, 12, 31)), + ((2017, 365), datetime(2017, 12, 31)) +]) +def test_iso_ordinal(isoord, dt_expected): + for fmt in ('{:04d}-{:03d}', '{:04d}{:03d}'): + dtstr = fmt.format(*isoord) + + assert isoparse(dtstr) == dt_expected + + +### +# Acceptance of bytes +@pytest.mark.parametrize('isostr,dt', [ + (b'2014', datetime(2014, 1, 1)), + (b'20140204', datetime(2014, 2, 4)), + (b'2014-02-04', datetime(2014, 2, 4)), + (b'2014-02-04T12', datetime(2014, 2, 4, 12)), + (b'2014-02-04T12:30', datetime(2014, 2, 4, 12, 30)), + (b'2014-02-04T12:30:15', datetime(2014, 2, 4, 12, 30, 15)), + (b'2014-02-04T12:30:15.224', datetime(2014, 2, 4, 12, 30, 15, 224000)), + (b'20140204T123015.224', datetime(2014, 2, 4, 12, 30, 15, 224000)), + (b'2014-02-04T12:30:15.224Z', datetime(2014, 2, 4, 12, 30, 15, 224000, + tz.tzutc())), + (b'2014-02-04T12:30:15.224+05:00', + datetime(2014, 2, 4, 12, 30, 15, 224000, + tzinfo=tz.tzoffset(None, timedelta(hours=5))))]) +def test_bytes(isostr, dt): + assert isoparse(isostr) == dt + + +### +# Invalid ISO strings +@pytest.mark.parametrize('isostr,exception', [ + ('201', ValueError), # ISO string too short + ('2012-0425', ValueError), # Inconsistent date separators + ('201204-25', ValueError), # Inconsistent date separators + ('20120425T0120:00', ValueError), # Inconsistent time separators + ('20120425T012500-334', ValueError), # Wrong microsecond separator + ('2001-1', ValueError), # YYYY-M not valid + ('2012-04-9', ValueError), # YYYY-MM-D not valid + ('201204', ValueError), # YYYYMM not valid + ('20120411T03:30+', ValueError), # Time zone too short + ('20120411T03:30+1234567', ValueError), # Time zone too long + ('20120411T03:30-25:40', ValueError), # Time zone invalid + ('2012-1a', ValueError), # Invalid month + ('20120411T03:30+00:60', ValueError), # Time zone invalid minutes + ('20120411T03:30+00:61', ValueError), # Time zone invalid minutes + ('20120411T033030.123456012:00', # No sign in time zone + ValueError), + ('2012-W00', ValueError), # Invalid ISO week + ('2012-W55', ValueError), # Invalid ISO week + ('2012-W01-0', ValueError), # Invalid ISO week day + ('2012-W01-8', ValueError), # Invalid ISO week day + ('2013-000', ValueError), # Invalid ordinal day + ('2013-366', ValueError), # Invalid ordinal day + ('2013366', ValueError), # Invalid ordinal day + ('2014-03-12Т12:30:14', ValueError), # Cyrillic T + ('2014-04-21T24:00:01', ValueError), # Invalid use of 24 for midnight + ('2014_W01-1', ValueError), # Invalid separator + ('2014W01-1', ValueError), # Inconsistent use of dashes + ('2014-W011', ValueError), # Inconsistent use of dashes + +]) +def test_iso_raises(isostr, exception): + with pytest.raises(exception): + isoparse(isostr) + + +@pytest.mark.parametrize('sep_act,valid_sep', [ + ('C', 'T'), + ('T', 'C') +]) +def test_iso_raises_sep(sep_act, valid_sep): + isostr = '2012-04-25' + sep_act + '01:25:00' + + +@pytest.mark.xfail() +@pytest.mark.parametrize('isostr,exception', [ + ('20120425T01:2000', ValueError), # Inconsistent time separators +]) +def test_iso_raises_failing(isostr, exception): + # These are test cases where the current implementation is too lenient + # and need to be fixed + with pytest.raises(exception): + isoparse(isostr) + + +### +# Test ISOParser constructor +@pytest.mark.parametrize('sep', [' ', '9', '🍛']) +def test_isoparser_invalid_sep(sep): + with pytest.raises(ValueError): + isoparser(sep=sep) + + +# This only fails on Python 3 +@pytest.mark.xfail(six.PY3, reason="Fails on Python 3 only") +def test_isoparser_byte_sep(): + dt = datetime(2017, 12, 6, 12, 30, 45) + dt_str = dt.isoformat(sep=str('T')) + + dt_rt = isoparser(sep=b'T').isoparse(dt_str) + + assert dt == dt_rt + + +### +# Test parse_tzstr +@pytest.mark.parametrize('tzoffset', FULL_TZOFFSETS) +def test_parse_tzstr(tzoffset): + dt = datetime(2017, 11, 27, 6, 14, 30, 123456) + date_fmt = '%Y-%m-%d' + time_fmt = '%H:%M:%S.%f' + + _isoparse_date_and_time(dt, date_fmt, time_fmt, tzoffset) + + +@pytest.mark.parametrize('tzstr', [ + '-00:00', '+00:00', '+00', '-00', '+0000', '-0000' +]) +@pytest.mark.parametrize('zero_as_utc', [True, False]) +def test_parse_tzstr_zero_as_utc(tzstr, zero_as_utc): + tzi = isoparser().parse_tzstr(tzstr, zero_as_utc=zero_as_utc) + assert tzi == tz.tzutc() + assert (type(tzi) == tz.tzutc) == zero_as_utc + + +@pytest.mark.parametrize('tzstr,exception', [ + ('00:00', ValueError), # No sign + ('05:00', ValueError), # No sign + ('_00:00', ValueError), # Invalid sign + ('+25:00', ValueError), # Offset too large + ('00:0000', ValueError), # String too long +]) +def test_parse_tzstr_fails(tzstr, exception): + with pytest.raises(exception): + isoparser().parse_tzstr(tzstr) + +### +# Test parse_isodate +def __make_date_examples(): + dates_no_day = [ + date(1999, 12, 1), + date(2016, 2, 1) + ] + + if six.PY3: + # strftime does not support dates before 1900 in Python 2 + dates_no_day.append(date(1000, 11, 1)) + + # Only one supported format for dates with no day + o = zip(dates_no_day, it.repeat('%Y-%m')) + + dates_w_day = [ + date(1969, 12, 31), + date(1900, 1, 1), + date(2016, 2, 29), + date(2017, 11, 14) + ] + + dates_w_day_fmts = ('%Y%m%d', '%Y-%m-%d') + o = it.chain(o, it.product(dates_w_day, dates_w_day_fmts)) + + return list(o) + + +@pytest.mark.parametrize('d,dt_fmt', __make_date_examples()) +@pytest.mark.parametrize('as_bytes', [True, False]) +def test_parse_isodate(d, dt_fmt, as_bytes): + d_str = d.strftime(dt_fmt) + if isinstance(d_str, six.text_type) and as_bytes: + d_str = d_str.encode('ascii') + elif isinstance(d_str, six.binary_type) and not as_bytes: + d_str = d_str.decode('ascii') + + iparser = isoparser() + assert iparser.parse_isodate(d_str) == d + + +@pytest.mark.parametrize('isostr,exception', [ + ('243', ValueError), # ISO string too short + ('2014-0423', ValueError), # Inconsistent date separators + ('201404-23', ValueError), # Inconsistent date separators + ('2014日03月14', ValueError), # Not ASCII + ('2013-02-29', ValueError), # Not a leap year + ('2014/12/03', ValueError), # Wrong separators + ('2014-04-19T', ValueError), # Unknown components +]) +def test_isodate_raises(isostr, exception): + with pytest.raises(exception): + isoparser().parse_isodate(isostr) + + +### +# Test parse_isotime +def __make_time_examples(): + outputs = [] + + # HH + time_h = [time(0), time(8), time(22)] + time_h_fmts = ['%H'] + + outputs.append(it.product(time_h, time_h_fmts)) + + # HHMM / HH:MM + time_hm = [time(0, 0), time(0, 30), time(8, 47), time(16, 1)] + time_hm_fmts = ['%H%M', '%H:%M'] + + outputs.append(it.product(time_hm, time_hm_fmts)) + + # HHMMSS / HH:MM:SS + time_hms = [time(0, 0, 0), time(0, 15, 30), + time(8, 2, 16), time(12, 0), time(16, 2), time(20, 45)] + + time_hms_fmts = ['%H%M%S', '%H:%M:%S'] + + outputs.append(it.product(time_hms, time_hms_fmts)) + + # HHMMSS.ffffff / HH:MM:SS.ffffff + time_hmsu = [time(0, 0, 0, 0), time(4, 15, 3, 247993), + time(14, 21, 59, 948730), + time(23, 59, 59, 999999)] + + time_hmsu_fmts = ['%H%M%S.%f', '%H:%M:%S.%f'] + + outputs.append(it.product(time_hmsu, time_hmsu_fmts)) + + outputs = list(map(list, outputs)) + + # Time zones + ex_naive = list(it.chain.from_iterable(x[0:2] for x in outputs)) + o = it.product(ex_naive, TZOFFSETS) # ((time, fmt), (tzinfo, offsetstr)) + o = ((t.replace(tzinfo=tzi), fmt + off_str) + for (t, fmt), (tzi, off_str) in o) + + outputs.append(o) + + return list(it.chain.from_iterable(outputs)) + + +@pytest.mark.parametrize('time_val,time_fmt', __make_time_examples()) +@pytest.mark.parametrize('as_bytes', [True, False]) +def test_isotime(time_val, time_fmt, as_bytes): + tstr = time_val.strftime(time_fmt) + if isinstance(time_val, six.text_type) and as_bytes: + tstr = tstr.encode('ascii') + elif isinstance(time_val, six.binary_type) and not as_bytes: + tstr = tstr.decode('ascii') + + iparser = isoparser() + + assert iparser.parse_isotime(tstr) == time_val + +@pytest.mark.parametrize('isostr,exception', [ + ('3', ValueError), # ISO string too short + ('14時30分15秒', ValueError), # Not ASCII + ('14_30_15', ValueError), # Invalid separators + ('1430:15', ValueError), # Inconsistent separator use + ('14:30:15.3684000309', ValueError), # Too much us precision + ('25', ValueError), # Invalid hours + ('25:15', ValueError), # Invalid hours + ('14:60', ValueError), # Invalid minutes + ('14:59:61', ValueError), # Invalid seconds + ('14:30:15.3446830500', ValueError), # No sign in time zone + ('14:30:15+', ValueError), # Time zone too short + ('14:30:15+1234567', ValueError), # Time zone invalid + ('14:59:59+25:00', ValueError), # Invalid tz hours + ('14:59:59+12:62', ValueError), # Invalid tz minutes + ('14:59:30_344583', ValueError), # Invalid microsecond separator +]) +def test_isotime_raises(isostr, exception): + iparser = isoparser() + with pytest.raises(exception): + iparser.parse_isotime(isostr) + + +@pytest.mark.xfail() +@pytest.mark.parametrize('isostr,exception', [ + ('14:3015', ValueError), # Inconsistent separator use + ('201202', ValueError) # Invalid ISO format +]) +def test_isotime_raises_xfail(isostr, exception): + iparser = isoparser() + with pytest.raises(exception): + iparser.parse_isotime(isostr) diff --git a/libraries/dateutil/test/test_parser.py b/libraries/dateutil/test/test_parser.py new file mode 100644 index 00000000..f8c20720 --- /dev/null +++ b/libraries/dateutil/test/test_parser.py @@ -0,0 +1,1114 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import itertools +from datetime import datetime, timedelta +import unittest +import sys + +from dateutil import tz +from dateutil.tz import tzoffset +from dateutil.parser import parse, parserinfo +from dateutil.parser import UnknownTimezoneWarning + +from ._common import TZEnvContext + +from six import assertRaisesRegex, PY3 +from six.moves import StringIO + +import pytest + +# Platform info +IS_WIN = sys.platform.startswith('win') + +try: + datetime.now().strftime('%-d') + PLATFORM_HAS_DASH_D = True +except ValueError: + PLATFORM_HAS_DASH_D = False + + +class TestFormat(unittest.TestCase): + + def test_ybd(self): + # If we have a 4-digit year, a non-numeric month (abbreviated or not), + # and a day (1 or 2 digits), then there is no ambiguity as to which + # token is a year/month/day. This holds regardless of what order the + # terms are in and for each of the separators below. + + seps = ['-', ' ', '/', '.'] + + year_tokens = ['%Y'] + month_tokens = ['%b', '%B'] + day_tokens = ['%d'] + if PLATFORM_HAS_DASH_D: + day_tokens.append('%-d') + + prods = itertools.product(year_tokens, month_tokens, day_tokens) + perms = [y for x in prods for y in itertools.permutations(x)] + unambig_fmts = [sep.join(perm) for sep in seps for perm in perms] + + actual = datetime(2003, 9, 25) + + for fmt in unambig_fmts: + dstr = actual.strftime(fmt) + res = parse(dstr) + self.assertEqual(res, actual) + + +class ParserTest(unittest.TestCase): + + def setUp(self): + self.tzinfos = {"BRST": -10800} + self.brsttz = tzoffset("BRST", -10800) + self.default = datetime(2003, 9, 25) + + # Parser should be able to handle bytestring and unicode + self.uni_str = '2014-05-01 08:00:00' + self.str_str = self.uni_str.encode() + + def testEmptyString(self): + with self.assertRaises(ValueError): + parse('') + + def testNone(self): + with self.assertRaises(TypeError): + parse(None) + + def testInvalidType(self): + with self.assertRaises(TypeError): + parse(13) + + def testDuckTyping(self): + # We want to support arbitrary classes that implement the stream + # interface. + + class StringPassThrough(object): + def __init__(self, stream): + self.stream = stream + + def read(self, *args, **kwargs): + return self.stream.read(*args, **kwargs) + + dstr = StringPassThrough(StringIO('2014 January 19')) + + self.assertEqual(parse(dstr), datetime(2014, 1, 19)) + + def testParseStream(self): + dstr = StringIO('2014 January 19') + + self.assertEqual(parse(dstr), datetime(2014, 1, 19)) + + def testParseStr(self): + self.assertEqual(parse(self.str_str), + parse(self.uni_str)) + + def testParseBytes(self): + self.assertEqual(parse(b'2014 January 19'), datetime(2014, 1, 19)) + + def testParseBytearray(self): + # GH #417 + self.assertEqual(parse(bytearray(b'2014 January 19')), + datetime(2014, 1, 19)) + + def testParserParseStr(self): + from dateutil.parser import parser + + self.assertEqual(parser().parse(self.str_str), + parser().parse(self.uni_str)) + + def testParseUnicodeWords(self): + + class rus_parserinfo(parserinfo): + MONTHS = [("янв", "Январь"), + ("фев", "Февраль"), + ("мар", "Март"), + ("апр", "Апрель"), + ("май", "Май"), + ("июн", "Июнь"), + ("июл", "Июль"), + ("авг", "Август"), + ("сен", "Сентябрь"), + ("окт", "Октябрь"), + ("ноя", "Ноябрь"), + ("дек", "Декабрь")] + + self.assertEqual(parse('10 Сентябрь 2015 10:20', + parserinfo=rus_parserinfo()), + datetime(2015, 9, 10, 10, 20)) + + def testParseWithNulls(self): + # This relies on the from __future__ import unicode_literals, because + # explicitly specifying a unicode literal is a syntax error in Py 3.2 + # May want to switch to u'...' if we ever drop Python 3.2 support. + pstring = '\x00\x00August 29, 1924' + + self.assertEqual(parse(pstring), + datetime(1924, 8, 29)) + + def testDateCommandFormat(self): + self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003", + tzinfos=self.tzinfos), + datetime(2003, 9, 25, 10, 36, 28, + tzinfo=self.brsttz)) + + def testDateCommandFormatUnicode(self): + self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003", + tzinfos=self.tzinfos), + datetime(2003, 9, 25, 10, 36, 28, + tzinfo=self.brsttz)) + + def testDateCommandFormatReversed(self): + self.assertEqual(parse("2003 10:36:28 BRST 25 Sep Thu", + tzinfos=self.tzinfos), + datetime(2003, 9, 25, 10, 36, 28, + tzinfo=self.brsttz)) + + def testDateCommandFormatWithLong(self): + if not PY3: + self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003", + tzinfos={"BRST": long(-10800)}), + datetime(2003, 9, 25, 10, 36, 28, + tzinfo=self.brsttz)) + def testDateCommandFormatIgnoreTz(self): + self.assertEqual(parse("Thu Sep 25 10:36:28 BRST 2003", + ignoretz=True), + datetime(2003, 9, 25, 10, 36, 28)) + + def testDateCommandFormatStrip1(self): + self.assertEqual(parse("Thu Sep 25 10:36:28 2003"), + datetime(2003, 9, 25, 10, 36, 28)) + + def testDateCommandFormatStrip2(self): + self.assertEqual(parse("Thu Sep 25 10:36:28", default=self.default), + datetime(2003, 9, 25, 10, 36, 28)) + + def testDateCommandFormatStrip3(self): + self.assertEqual(parse("Thu Sep 10:36:28", default=self.default), + datetime(2003, 9, 25, 10, 36, 28)) + + def testDateCommandFormatStrip4(self): + self.assertEqual(parse("Thu 10:36:28", default=self.default), + datetime(2003, 9, 25, 10, 36, 28)) + + def testDateCommandFormatStrip5(self): + self.assertEqual(parse("Sep 10:36:28", default=self.default), + datetime(2003, 9, 25, 10, 36, 28)) + + def testDateCommandFormatStrip6(self): + self.assertEqual(parse("10:36:28", default=self.default), + datetime(2003, 9, 25, 10, 36, 28)) + + def testDateCommandFormatStrip7(self): + self.assertEqual(parse("10:36", default=self.default), + datetime(2003, 9, 25, 10, 36)) + + def testDateCommandFormatStrip8(self): + self.assertEqual(parse("Thu Sep 25 2003"), + datetime(2003, 9, 25)) + + def testDateCommandFormatStrip10(self): + self.assertEqual(parse("Sep 2003", default=self.default), + datetime(2003, 9, 25)) + + def testDateCommandFormatStrip11(self): + self.assertEqual(parse("Sep", default=self.default), + datetime(2003, 9, 25)) + + def testDateCommandFormatStrip12(self): + self.assertEqual(parse("2003", default=self.default), + datetime(2003, 9, 25)) + + def testDateRCommandFormat(self): + self.assertEqual(parse("Thu, 25 Sep 2003 10:49:41 -0300"), + datetime(2003, 9, 25, 10, 49, 41, + tzinfo=self.brsttz)) + + def testISOFormat(self): + self.assertEqual(parse("2003-09-25T10:49:41.5-03:00"), + datetime(2003, 9, 25, 10, 49, 41, 500000, + tzinfo=self.brsttz)) + + def testISOFormatStrip1(self): + self.assertEqual(parse("2003-09-25T10:49:41-03:00"), + datetime(2003, 9, 25, 10, 49, 41, + tzinfo=self.brsttz)) + + def testISOFormatStrip2(self): + self.assertEqual(parse("2003-09-25T10:49:41"), + datetime(2003, 9, 25, 10, 49, 41)) + + def testISOFormatStrip3(self): + self.assertEqual(parse("2003-09-25T10:49"), + datetime(2003, 9, 25, 10, 49)) + + def testISOFormatStrip4(self): + self.assertEqual(parse("2003-09-25T10"), + datetime(2003, 9, 25, 10)) + + def testISOFormatStrip5(self): + self.assertEqual(parse("2003-09-25"), + datetime(2003, 9, 25)) + + def testISOStrippedFormat(self): + self.assertEqual(parse("20030925T104941.5-0300"), + datetime(2003, 9, 25, 10, 49, 41, 500000, + tzinfo=self.brsttz)) + + def testISOStrippedFormatStrip1(self): + self.assertEqual(parse("20030925T104941-0300"), + datetime(2003, 9, 25, 10, 49, 41, + tzinfo=self.brsttz)) + + def testISOStrippedFormatStrip2(self): + self.assertEqual(parse("20030925T104941"), + datetime(2003, 9, 25, 10, 49, 41)) + + def testISOStrippedFormatStrip3(self): + self.assertEqual(parse("20030925T1049"), + datetime(2003, 9, 25, 10, 49, 0)) + + def testISOStrippedFormatStrip4(self): + self.assertEqual(parse("20030925T10"), + datetime(2003, 9, 25, 10)) + + def testISOStrippedFormatStrip5(self): + self.assertEqual(parse("20030925"), + datetime(2003, 9, 25)) + + def testPythonLoggerFormat(self): + self.assertEqual(parse("2003-09-25 10:49:41,502"), + datetime(2003, 9, 25, 10, 49, 41, 502000)) + + def testNoSeparator1(self): + self.assertEqual(parse("199709020908"), + datetime(1997, 9, 2, 9, 8)) + + def testNoSeparator2(self): + self.assertEqual(parse("19970902090807"), + datetime(1997, 9, 2, 9, 8, 7)) + + def testDateWithDash1(self): + self.assertEqual(parse("2003-09-25"), + datetime(2003, 9, 25)) + + def testDateWithDash6(self): + self.assertEqual(parse("09-25-2003"), + datetime(2003, 9, 25)) + + def testDateWithDash7(self): + self.assertEqual(parse("25-09-2003"), + datetime(2003, 9, 25)) + + def testDateWithDash8(self): + self.assertEqual(parse("10-09-2003", dayfirst=True), + datetime(2003, 9, 10)) + + def testDateWithDash9(self): + self.assertEqual(parse("10-09-2003"), + datetime(2003, 10, 9)) + + def testDateWithDash10(self): + self.assertEqual(parse("10-09-03"), + datetime(2003, 10, 9)) + + def testDateWithDash11(self): + self.assertEqual(parse("10-09-03", yearfirst=True), + datetime(2010, 9, 3)) + + def testDateWithDot1(self): + self.assertEqual(parse("2003.09.25"), + datetime(2003, 9, 25)) + + def testDateWithDot6(self): + self.assertEqual(parse("09.25.2003"), + datetime(2003, 9, 25)) + + def testDateWithDot7(self): + self.assertEqual(parse("25.09.2003"), + datetime(2003, 9, 25)) + + def testDateWithDot8(self): + self.assertEqual(parse("10.09.2003", dayfirst=True), + datetime(2003, 9, 10)) + + def testDateWithDot9(self): + self.assertEqual(parse("10.09.2003"), + datetime(2003, 10, 9)) + + def testDateWithDot10(self): + self.assertEqual(parse("10.09.03"), + datetime(2003, 10, 9)) + + def testDateWithDot11(self): + self.assertEqual(parse("10.09.03", yearfirst=True), + datetime(2010, 9, 3)) + + def testDateWithSlash1(self): + self.assertEqual(parse("2003/09/25"), + datetime(2003, 9, 25)) + + def testDateWithSlash6(self): + self.assertEqual(parse("09/25/2003"), + datetime(2003, 9, 25)) + + def testDateWithSlash7(self): + self.assertEqual(parse("25/09/2003"), + datetime(2003, 9, 25)) + + def testDateWithSlash8(self): + self.assertEqual(parse("10/09/2003", dayfirst=True), + datetime(2003, 9, 10)) + + def testDateWithSlash9(self): + self.assertEqual(parse("10/09/2003"), + datetime(2003, 10, 9)) + + def testDateWithSlash10(self): + self.assertEqual(parse("10/09/03"), + datetime(2003, 10, 9)) + + def testDateWithSlash11(self): + self.assertEqual(parse("10/09/03", yearfirst=True), + datetime(2010, 9, 3)) + + def testDateWithSpace1(self): + self.assertEqual(parse("2003 09 25"), + datetime(2003, 9, 25)) + + def testDateWithSpace6(self): + self.assertEqual(parse("09 25 2003"), + datetime(2003, 9, 25)) + + def testDateWithSpace7(self): + self.assertEqual(parse("25 09 2003"), + datetime(2003, 9, 25)) + + def testDateWithSpace8(self): + self.assertEqual(parse("10 09 2003", dayfirst=True), + datetime(2003, 9, 10)) + + def testDateWithSpace9(self): + self.assertEqual(parse("10 09 2003"), + datetime(2003, 10, 9)) + + def testDateWithSpace10(self): + self.assertEqual(parse("10 09 03"), + datetime(2003, 10, 9)) + + def testDateWithSpace11(self): + self.assertEqual(parse("10 09 03", yearfirst=True), + datetime(2010, 9, 3)) + + def testDateWithSpace12(self): + self.assertEqual(parse("25 09 03"), + datetime(2003, 9, 25)) + + def testStrangelyOrderedDate1(self): + self.assertEqual(parse("03 25 Sep"), + datetime(2003, 9, 25)) + + def testStrangelyOrderedDate3(self): + self.assertEqual(parse("25 03 Sep"), + datetime(2025, 9, 3)) + + def testHourWithLetters(self): + self.assertEqual(parse("10h36m28.5s", default=self.default), + datetime(2003, 9, 25, 10, 36, 28, 500000)) + + def testHourWithLettersStrip1(self): + self.assertEqual(parse("10h36m28s", default=self.default), + datetime(2003, 9, 25, 10, 36, 28)) + + def testHourWithLettersStrip2(self): + self.assertEqual(parse("10h36m", default=self.default), + datetime(2003, 9, 25, 10, 36)) + + def testHourWithLettersStrip3(self): + self.assertEqual(parse("10h", default=self.default), + datetime(2003, 9, 25, 10)) + + def testHourWithLettersStrip4(self): + self.assertEqual(parse("10 h 36", default=self.default), + datetime(2003, 9, 25, 10, 36)) + + def testHourWithLetterStrip5(self): + self.assertEqual(parse("10 h 36.5", default=self.default), + datetime(2003, 9, 25, 10, 36, 30)) + + def testMinuteWithLettersSpaces1(self): + self.assertEqual(parse("36 m 5", default=self.default), + datetime(2003, 9, 25, 0, 36, 5)) + + def testMinuteWithLettersSpaces2(self): + self.assertEqual(parse("36 m 5 s", default=self.default), + datetime(2003, 9, 25, 0, 36, 5)) + + def testMinuteWithLettersSpaces3(self): + self.assertEqual(parse("36 m 05", default=self.default), + datetime(2003, 9, 25, 0, 36, 5)) + + def testMinuteWithLettersSpaces4(self): + self.assertEqual(parse("36 m 05 s", default=self.default), + datetime(2003, 9, 25, 0, 36, 5)) + + def testAMPMNoHour(self): + with self.assertRaises(ValueError): + parse("AM") + + with self.assertRaises(ValueError): + parse("Jan 20, 2015 PM") + + def testHourAmPm1(self): + self.assertEqual(parse("10h am", default=self.default), + datetime(2003, 9, 25, 10)) + + def testHourAmPm2(self): + self.assertEqual(parse("10h pm", default=self.default), + datetime(2003, 9, 25, 22)) + + def testHourAmPm3(self): + self.assertEqual(parse("10am", default=self.default), + datetime(2003, 9, 25, 10)) + + def testHourAmPm4(self): + self.assertEqual(parse("10pm", default=self.default), + datetime(2003, 9, 25, 22)) + + def testHourAmPm5(self): + self.assertEqual(parse("10:00 am", default=self.default), + datetime(2003, 9, 25, 10)) + + def testHourAmPm6(self): + self.assertEqual(parse("10:00 pm", default=self.default), + datetime(2003, 9, 25, 22)) + + def testHourAmPm7(self): + self.assertEqual(parse("10:00am", default=self.default), + datetime(2003, 9, 25, 10)) + + def testHourAmPm8(self): + self.assertEqual(parse("10:00pm", default=self.default), + datetime(2003, 9, 25, 22)) + + def testHourAmPm9(self): + self.assertEqual(parse("10:00a.m", default=self.default), + datetime(2003, 9, 25, 10)) + + def testHourAmPm10(self): + self.assertEqual(parse("10:00p.m", default=self.default), + datetime(2003, 9, 25, 22)) + + def testHourAmPm11(self): + self.assertEqual(parse("10:00a.m.", default=self.default), + datetime(2003, 9, 25, 10)) + + def testHourAmPm12(self): + self.assertEqual(parse("10:00p.m.", default=self.default), + datetime(2003, 9, 25, 22)) + + def testAMPMRange(self): + with self.assertRaises(ValueError): + parse("13:44 AM") + + with self.assertRaises(ValueError): + parse("January 25, 1921 23:13 PM") + + def testPertain(self): + self.assertEqual(parse("Sep 03", default=self.default), + datetime(2003, 9, 3)) + self.assertEqual(parse("Sep of 03", default=self.default), + datetime(2003, 9, 25)) + + def testWeekdayAlone(self): + self.assertEqual(parse("Wed", default=self.default), + datetime(2003, 10, 1)) + + def testLongWeekday(self): + self.assertEqual(parse("Wednesday", default=self.default), + datetime(2003, 10, 1)) + + def testLongMonth(self): + self.assertEqual(parse("October", default=self.default), + datetime(2003, 10, 25)) + + def testZeroYear(self): + self.assertEqual(parse("31-Dec-00", default=self.default), + datetime(2000, 12, 31)) + + def testFuzzy(self): + s = "Today is 25 of September of 2003, exactly " \ + "at 10:49:41 with timezone -03:00." + self.assertEqual(parse(s, fuzzy=True), + datetime(2003, 9, 25, 10, 49, 41, + tzinfo=self.brsttz)) + + def testFuzzyWithTokens(self): + s1 = "Today is 25 of September of 2003, exactly " \ + "at 10:49:41 with timezone -03:00." + self.assertEqual(parse(s1, fuzzy_with_tokens=True), + (datetime(2003, 9, 25, 10, 49, 41, + tzinfo=self.brsttz), + ('Today is ', 'of ', ', exactly at ', + ' with timezone ', '.'))) + + s2 = "http://biz.yahoo.com/ipo/p/600221.html" + self.assertEqual(parse(s2, fuzzy_with_tokens=True), + (datetime(2060, 2, 21, 0, 0, 0), + ('http://biz.yahoo.com/ipo/p/', '.html'))) + + def testFuzzyAMPMProblem(self): + # Sometimes fuzzy parsing results in AM/PM flag being set without + # hours - if it's fuzzy it should ignore that. + s1 = "I have a meeting on March 1, 1974." + s2 = "On June 8th, 2020, I am going to be the first man on Mars" + + # Also don't want any erroneous AM or PMs changing the parsed time + s3 = "Meet me at the AM/PM on Sunset at 3:00 AM on December 3rd, 2003" + s4 = "Meet me at 3:00AM on December 3rd, 2003 at the AM/PM on Sunset" + + self.assertEqual(parse(s1, fuzzy=True), datetime(1974, 3, 1)) + self.assertEqual(parse(s2, fuzzy=True), datetime(2020, 6, 8)) + self.assertEqual(parse(s3, fuzzy=True), datetime(2003, 12, 3, 3)) + self.assertEqual(parse(s4, fuzzy=True), datetime(2003, 12, 3, 3)) + + def testFuzzyIgnoreAMPM(self): + s1 = "Jan 29, 1945 14:45 AM I going to see you there?" + with pytest.warns(UnknownTimezoneWarning): + res = parse(s1, fuzzy=True) + self.assertEqual(res, datetime(1945, 1, 29, 14, 45)) + + def testExtraSpace(self): + self.assertEqual(parse(" July 4 , 1976 12:01:02 am "), + datetime(1976, 7, 4, 0, 1, 2)) + + def testRandomFormat1(self): + self.assertEqual(parse("Wed, July 10, '96"), + datetime(1996, 7, 10, 0, 0)) + + def testRandomFormat2(self): + self.assertEqual(parse("1996.07.10 AD at 15:08:56 PDT", + ignoretz=True), + datetime(1996, 7, 10, 15, 8, 56)) + + def testRandomFormat3(self): + self.assertEqual(parse("1996.July.10 AD 12:08 PM"), + datetime(1996, 7, 10, 12, 8)) + + def testRandomFormat4(self): + self.assertEqual(parse("Tuesday, April 12, 1952 AD 3:30:42pm PST", + ignoretz=True), + datetime(1952, 4, 12, 15, 30, 42)) + + def testRandomFormat5(self): + self.assertEqual(parse("November 5, 1994, 8:15:30 am EST", + ignoretz=True), + datetime(1994, 11, 5, 8, 15, 30)) + + def testRandomFormat6(self): + self.assertEqual(parse("1994-11-05T08:15:30-05:00", + ignoretz=True), + datetime(1994, 11, 5, 8, 15, 30)) + + def testRandomFormat7(self): + self.assertEqual(parse("1994-11-05T08:15:30Z", + ignoretz=True), + datetime(1994, 11, 5, 8, 15, 30)) + + def testRandomFormat8(self): + self.assertEqual(parse("July 4, 1976"), datetime(1976, 7, 4)) + + def testRandomFormat9(self): + self.assertEqual(parse("7 4 1976"), datetime(1976, 7, 4)) + + def testRandomFormat10(self): + self.assertEqual(parse("4 jul 1976"), datetime(1976, 7, 4)) + + def testRandomFormat11(self): + self.assertEqual(parse("7-4-76"), datetime(1976, 7, 4)) + + def testRandomFormat12(self): + self.assertEqual(parse("19760704"), datetime(1976, 7, 4)) + + def testRandomFormat13(self): + self.assertEqual(parse("0:01:02", default=self.default), + datetime(2003, 9, 25, 0, 1, 2)) + + def testRandomFormat14(self): + self.assertEqual(parse("12h 01m02s am", default=self.default), + datetime(2003, 9, 25, 0, 1, 2)) + + def testRandomFormat15(self): + self.assertEqual(parse("0:01:02 on July 4, 1976"), + datetime(1976, 7, 4, 0, 1, 2)) + + def testRandomFormat16(self): + self.assertEqual(parse("0:01:02 on July 4, 1976"), + datetime(1976, 7, 4, 0, 1, 2)) + + def testRandomFormat17(self): + self.assertEqual(parse("1976-07-04T00:01:02Z", ignoretz=True), + datetime(1976, 7, 4, 0, 1, 2)) + + def testRandomFormat18(self): + self.assertEqual(parse("July 4, 1976 12:01:02 am"), + datetime(1976, 7, 4, 0, 1, 2)) + + def testRandomFormat19(self): + self.assertEqual(parse("Mon Jan 2 04:24:27 1995"), + datetime(1995, 1, 2, 4, 24, 27)) + + def testRandomFormat20(self): + self.assertEqual(parse("Tue Apr 4 00:22:12 PDT 1995", ignoretz=True), + datetime(1995, 4, 4, 0, 22, 12)) + + def testRandomFormat21(self): + self.assertEqual(parse("04.04.95 00:22"), + datetime(1995, 4, 4, 0, 22)) + + def testRandomFormat22(self): + self.assertEqual(parse("Jan 1 1999 11:23:34.578"), + datetime(1999, 1, 1, 11, 23, 34, 578000)) + + def testRandomFormat23(self): + self.assertEqual(parse("950404 122212"), + datetime(1995, 4, 4, 12, 22, 12)) + + def testRandomFormat24(self): + self.assertEqual(parse("0:00 PM, PST", default=self.default, + ignoretz=True), + datetime(2003, 9, 25, 12, 0)) + + def testRandomFormat25(self): + self.assertEqual(parse("12:08 PM", default=self.default), + datetime(2003, 9, 25, 12, 8)) + + def testRandomFormat26(self): + with pytest.warns(UnknownTimezoneWarning): + res = parse("5:50 A.M. on June 13, 1990") + + self.assertEqual(res, datetime(1990, 6, 13, 5, 50)) + + def testRandomFormat27(self): + self.assertEqual(parse("3rd of May 2001"), datetime(2001, 5, 3)) + + def testRandomFormat28(self): + self.assertEqual(parse("5th of March 2001"), datetime(2001, 3, 5)) + + def testRandomFormat29(self): + self.assertEqual(parse("1st of May 2003"), datetime(2003, 5, 1)) + + def testRandomFormat30(self): + self.assertEqual(parse("01h02m03", default=self.default), + datetime(2003, 9, 25, 1, 2, 3)) + + def testRandomFormat31(self): + self.assertEqual(parse("01h02", default=self.default), + datetime(2003, 9, 25, 1, 2)) + + def testRandomFormat32(self): + self.assertEqual(parse("01h02s", default=self.default), + datetime(2003, 9, 25, 1, 0, 2)) + + def testRandomFormat33(self): + self.assertEqual(parse("01m02", default=self.default), + datetime(2003, 9, 25, 0, 1, 2)) + + def testRandomFormat34(self): + self.assertEqual(parse("01m02h", default=self.default), + datetime(2003, 9, 25, 2, 1)) + + def testRandomFormat35(self): + self.assertEqual(parse("2004 10 Apr 11h30m", default=self.default), + datetime(2004, 4, 10, 11, 30)) + + def test_99_ad(self): + self.assertEqual(parse('0099-01-01T00:00:00'), + datetime(99, 1, 1, 0, 0)) + + def test_31_ad(self): + self.assertEqual(parse('0031-01-01T00:00:00'), + datetime(31, 1, 1, 0, 0)) + + def testInvalidDay(self): + with self.assertRaises(ValueError): + parse("Feb 30, 2007") + + def testUnspecifiedDayFallback(self): + # Test that for an unspecified day, the fallback behavior is correct. + self.assertEqual(parse("April 2009", default=datetime(2010, 1, 31)), + datetime(2009, 4, 30)) + + def testUnspecifiedDayFallbackFebNoLeapYear(self): + self.assertEqual(parse("Feb 2007", default=datetime(2010, 1, 31)), + datetime(2007, 2, 28)) + + def testUnspecifiedDayFallbackFebLeapYear(self): + self.assertEqual(parse("Feb 2008", default=datetime(2010, 1, 31)), + datetime(2008, 2, 29)) + + def testTzinfoDictionaryCouldReturnNone(self): + self.assertEqual(parse('2017-02-03 12:40 BRST', tzinfos={"BRST": None}), + datetime(2017, 2, 3, 12, 40)) + + def testTzinfosCallableCouldReturnNone(self): + self.assertEqual(parse('2017-02-03 12:40 BRST', tzinfos=lambda *args: None), + datetime(2017, 2, 3, 12, 40)) + + def testErrorType01(self): + self.assertRaises(ValueError, + parse, 'shouldfail') + + def testCorrectErrorOnFuzzyWithTokens(self): + assertRaisesRegex(self, ValueError, 'Unknown string format', + parse, '04/04/32/423', fuzzy_with_tokens=True) + assertRaisesRegex(self, ValueError, 'Unknown string format', + parse, '04/04/04 +32423', fuzzy_with_tokens=True) + assertRaisesRegex(self, ValueError, 'Unknown string format', + parse, '04/04/0d4', fuzzy_with_tokens=True) + + def testIncreasingCTime(self): + # This test will check 200 different years, every month, every day, + # every hour, every minute, every second, and every weekday, using + # a delta of more or less 1 year, 1 month, 1 day, 1 minute and + # 1 second. + delta = timedelta(days=365+31+1, seconds=1+60+60*60) + dt = datetime(1900, 1, 1, 0, 0, 0, 0) + for i in range(200): + self.assertEqual(parse(dt.ctime()), dt) + dt += delta + + def testIncreasingISOFormat(self): + delta = timedelta(days=365+31+1, seconds=1+60+60*60) + dt = datetime(1900, 1, 1, 0, 0, 0, 0) + for i in range(200): + self.assertEqual(parse(dt.isoformat()), dt) + dt += delta + + def testMicrosecondsPrecisionError(self): + # Skip found out that sad precision problem. :-( + dt1 = parse("00:11:25.01") + dt2 = parse("00:12:10.01") + self.assertEqual(dt1.microsecond, 10000) + self.assertEqual(dt2.microsecond, 10000) + + def testMicrosecondPrecisionErrorReturns(self): + # One more precision issue, discovered by Eric Brown. This should + # be the last one, as we're no longer using floating points. + for ms in [100001, 100000, 99999, 99998, + 10001, 10000, 9999, 9998, + 1001, 1000, 999, 998, + 101, 100, 99, 98]: + dt = datetime(2008, 2, 27, 21, 26, 1, ms) + self.assertEqual(parse(dt.isoformat()), dt) + + def testHighPrecisionSeconds(self): + self.assertEqual(parse("20080227T21:26:01.123456789"), + datetime(2008, 2, 27, 21, 26, 1, 123456)) + + def testCustomParserInfo(self): + # Custom parser info wasn't working, as Michael Elsdörfer discovered. + from dateutil.parser import parserinfo, parser + + class myparserinfo(parserinfo): + MONTHS = parserinfo.MONTHS[:] + MONTHS[0] = ("Foo", "Foo") + myparser = parser(myparserinfo()) + dt = myparser.parse("01/Foo/2007") + self.assertEqual(dt, datetime(2007, 1, 1)) + + def testCustomParserShortDaynames(self): + # Horacio Hoyos discovered that day names shorter than 3 characters, + # for example two letter German day name abbreviations, don't work: + # https://github.com/dateutil/dateutil/issues/343 + from dateutil.parser import parserinfo, parser + + class GermanParserInfo(parserinfo): + WEEKDAYS = [("Mo", "Montag"), + ("Di", "Dienstag"), + ("Mi", "Mittwoch"), + ("Do", "Donnerstag"), + ("Fr", "Freitag"), + ("Sa", "Samstag"), + ("So", "Sonntag")] + + myparser = parser(GermanParserInfo()) + dt = myparser.parse("Sa 21. Jan 2017") + self.assertEqual(dt, datetime(2017, 1, 21)) + + def testNoYearFirstNoDayFirst(self): + dtstr = '090107' + + # Should be MMDDYY + self.assertEqual(parse(dtstr), + datetime(2007, 9, 1)) + + self.assertEqual(parse(dtstr, yearfirst=False, dayfirst=False), + datetime(2007, 9, 1)) + + def testYearFirst(self): + dtstr = '090107' + + # Should be MMDDYY + self.assertEqual(parse(dtstr, yearfirst=True), + datetime(2009, 1, 7)) + + self.assertEqual(parse(dtstr, yearfirst=True, dayfirst=False), + datetime(2009, 1, 7)) + + def testDayFirst(self): + dtstr = '090107' + + # Should be DDMMYY + self.assertEqual(parse(dtstr, dayfirst=True), + datetime(2007, 1, 9)) + + self.assertEqual(parse(dtstr, yearfirst=False, dayfirst=True), + datetime(2007, 1, 9)) + + def testDayFirstYearFirst(self): + dtstr = '090107' + # Should be YYDDMM + self.assertEqual(parse(dtstr, yearfirst=True, dayfirst=True), + datetime(2009, 7, 1)) + + def testUnambiguousYearFirst(self): + dtstr = '2015 09 25' + self.assertEqual(parse(dtstr, yearfirst=True), + datetime(2015, 9, 25)) + + def testUnambiguousDayFirst(self): + dtstr = '2015 09 25' + self.assertEqual(parse(dtstr, dayfirst=True), + datetime(2015, 9, 25)) + + def testUnambiguousDayFirstYearFirst(self): + dtstr = '2015 09 25' + self.assertEqual(parse(dtstr, dayfirst=True, yearfirst=True), + datetime(2015, 9, 25)) + + def test_mstridx(self): + # See GH408 + dtstr = '2015-15-May' + self.assertEqual(parse(dtstr), + datetime(2015, 5, 15)) + + def test_idx_check(self): + dtstr = '2017-07-17 06:15:' + # Pre-PR, the trailing colon will cause an IndexError at 824-825 + # when checking `i < len_l` and then accessing `l[i+1]` + res = parse(dtstr, fuzzy=True) + self.assertEqual(res, datetime(2017, 7, 17, 6, 15)) + + def test_dBY(self): + # See GH360 + dtstr = '13NOV2017' + res = parse(dtstr) + self.assertEqual(res, datetime(2017, 11, 13)) + + def test_hmBY(self): + # See GH#483 + dtstr = '02:17NOV2017' + res = parse(dtstr, default=self.default) + self.assertEqual(res, datetime(2017, 11, self.default.day, 2, 17)) + + def test_validate_hour(self): + # See GH353 + invalid = "201A-01-01T23:58:39.239769+03:00" + with self.assertRaises(ValueError): + parse(invalid) + + def test_era_trailing_year(self): + dstr = 'AD2001' + res = parse(dstr) + assert res.year == 2001, res + + def test_pre_12_year_same_month(self): + # See GH PR #293 + dtstr = '0003-03-04' + assert parse(dtstr) == datetime(3, 3, 4) + + +class TestParseUnimplementedCases(object): + @pytest.mark.xfail + def test_somewhat_ambiguous_string(self): + # Ref: github issue #487 + # The parser is choosing the wrong part for hour + # causing datetime to raise an exception. + dtstr = '1237 PM BRST Mon Oct 30 2017' + res = parse(dtstr, tzinfo=self.tzinfos) + assert res == datetime(2017, 10, 30, 12, 37, tzinfo=self.tzinfos) + + @pytest.mark.xfail + def test_YmdH_M_S(self): + # found in nasdaq's ftp data + dstr = '1991041310:19:24' + expected = datetime(1991, 4, 13, 10, 19, 24) + res = parse(dstr) + assert res == expected, (res, expected) + + @pytest.mark.xfail + def test_first_century(self): + dstr = '0031 Nov 03' + expected = datetime(31, 11, 3) + res = parse(dstr) + assert res == expected, res + + @pytest.mark.xfail + def test_era_trailing_year_with_dots(self): + dstr = 'A.D.2001' + res = parse(dstr) + assert res.year == 2001, res + + @pytest.mark.xfail + def test_ad_nospace(self): + expected = datetime(6, 5, 19) + for dstr in [' 6AD May 19', ' 06AD May 19', + ' 006AD May 19', ' 0006AD May 19']: + res = parse(dstr) + assert res == expected, (dstr, res) + + @pytest.mark.xfail + def test_four_letter_day(self): + dstr = 'Frid Dec 30, 2016' + expected = datetime(2016, 12, 30) + res = parse(dstr) + assert res == expected + + @pytest.mark.xfail + def test_non_date_number(self): + dstr = '1,700' + with pytest.raises(ValueError): + parse(dstr) + + @pytest.mark.xfail + def test_on_era(self): + # This could be classified as an "eras" test, but the relevant part + # about this is the ` on ` + dstr = '2:15 PM on January 2nd 1973 A.D.' + expected = datetime(1973, 1, 2, 14, 15) + res = parse(dstr) + assert res == expected + + @pytest.mark.xfail + def test_extraneous_year(self): + # This was found in the wild at insidertrading.org + dstr = "2011 MARTIN CHILDREN'S IRREVOCABLE TRUST u/a/d NOVEMBER 7, 2012" + res = parse(dstr, fuzzy_with_tokens=True) + expected = datetime(2012, 11, 7) + assert res == expected + + @pytest.mark.xfail + def test_extraneous_year_tokens(self): + # This was found in the wild at insidertrading.org + # Unlike in the case above, identifying the first "2012" as the year + # would not be a problem, but infering that the latter 2012 is hhmm + # is a problem. + dstr = "2012 MARTIN CHILDREN'S IRREVOCABLE TRUST u/a/d NOVEMBER 7, 2012" + expected = datetime(2012, 11, 7) + (res, tokens) = parse(dstr, fuzzy_with_tokens=True) + assert res == expected + assert tokens == ("2012 MARTIN CHILDREN'S IRREVOCABLE TRUST u/a/d ",) + + @pytest.mark.xfail + def test_extraneous_year2(self): + # This was found in the wild at insidertrading.org + dstr = ("Berylson Amy Smith 1998 Grantor Retained Annuity Trust " + "u/d/t November 2, 1998 f/b/o Jennifer L Berylson") + res = parse(dstr, fuzzy_with_tokens=True) + expected = datetime(1998, 11, 2) + assert res == expected + + @pytest.mark.xfail + def test_extraneous_year3(self): + # This was found in the wild at insidertrading.org + dstr = "SMITH R & WEISS D 94 CHILD TR FBO M W SMITH UDT 12/1/1994" + res = parse(dstr, fuzzy_with_tokens=True) + expected = datetime(1994, 12, 1) + assert res == expected + + @pytest.mark.xfail + def test_unambiguous_YYYYMM(self): + # 171206 can be parsed as YYMMDD. However, 201712 cannot be parsed + # as instance of YYMMDD and parser could fallback to YYYYMM format. + dstr = "201712" + res = parse(dstr) + expected = datetime(2017, 12, 1) + assert res == expected + +@pytest.mark.skipif(IS_WIN, reason='Windows does not use TZ var') +def test_parse_unambiguous_nonexistent_local(): + # When dates are specified "EST" even when they should be "EDT" in the + # local time zone, we should still assign the local time zone + with TZEnvContext('EST+5EDT,M3.2.0/2,M11.1.0/2'): + dt_exp = datetime(2011, 8, 1, 12, 30, tzinfo=tz.tzlocal()) + dt = parse('2011-08-01T12:30 EST') + + assert dt.tzname() == 'EDT' + assert dt == dt_exp + + +@pytest.mark.skipif(IS_WIN, reason='Windows does not use TZ var') +def test_tzlocal_in_gmt(): + # GH #318 + with TZEnvContext('GMT0BST,M3.5.0,M10.5.0'): + # This is an imaginary datetime in tz.tzlocal() but should still + # parse using the GMT-as-alias-for-UTC rule + dt = parse('2004-05-01T12:00 GMT') + dt_exp = datetime(2004, 5, 1, 12, tzinfo=tz.tzutc()) + + assert dt == dt_exp + + +@pytest.mark.skipif(IS_WIN, reason='Windows does not use TZ var') +def test_tzlocal_parse_fold(): + # One manifestion of GH #318 + with TZEnvContext('EST+5EDT,M3.2.0/2,M11.1.0/2'): + dt_exp = datetime(2011, 11, 6, 1, 30, tzinfo=tz.tzlocal()) + dt_exp = tz.enfold(dt_exp, fold=1) + dt = parse('2011-11-06T01:30 EST') + + # Because this is ambiguous, kuntil `tz.tzlocal() is tz.tzlocal()` + # we'll just check the attributes we care about rather than + # dt == dt_exp + assert dt.tzname() == dt_exp.tzname() + assert dt.replace(tzinfo=None) == dt_exp.replace(tzinfo=None) + assert getattr(dt, 'fold') == getattr(dt_exp, 'fold') + assert dt.astimezone(tz.tzutc()) == dt_exp.astimezone(tz.tzutc()) + + +def test_parse_tzinfos_fold(): + NYC = tz.gettz('America/New_York') + tzinfos = {'EST': NYC, 'EDT': NYC} + + dt_exp = tz.enfold(datetime(2011, 11, 6, 1, 30, tzinfo=NYC), fold=1) + dt = parse('2011-11-06T01:30 EST', tzinfos=tzinfos) + + assert dt == dt_exp + assert dt.tzinfo is dt_exp.tzinfo + assert getattr(dt, 'fold') == getattr(dt_exp, 'fold') + assert dt.astimezone(tz.tzutc()) == dt_exp.astimezone(tz.tzutc()) + + +@pytest.mark.parametrize('dtstr,dt', [ + ('5.6h', datetime(2003, 9, 25, 5, 36)), + ('5.6m', datetime(2003, 9, 25, 0, 5, 36)), + # '5.6s' never had a rounding problem, test added for completeness + ('5.6s', datetime(2003, 9, 25, 0, 0, 5, 600000)) +]) +def test_rounding_floatlike_strings(dtstr, dt): + assert parse(dtstr, default=datetime(2003, 9, 25)) == dt + + +@pytest.mark.parametrize('value', ['1: test', 'Nan']) +def test_decimal_error(value): + # GH 632, GH 662 - decimal.Decimal raises some non-ValueError exception when + # constructed with an invalid value + with pytest.raises(ValueError): + parse(value) + + +def test_BYd_corner_case(): + # GH#687 + res = parse('December.0031.30') + assert res == datetime(31, 12, 30) diff --git a/libraries/dateutil/test/test_relativedelta.py b/libraries/dateutil/test/test_relativedelta.py new file mode 100644 index 00000000..70cb543a --- /dev/null +++ b/libraries/dateutil/test/test_relativedelta.py @@ -0,0 +1,678 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +from ._common import WarningTestMixin, NotAValue + +import calendar +from datetime import datetime, date, timedelta +import unittest + +from dateutil.relativedelta import relativedelta, MO, TU, WE, FR, SU + + +class RelativeDeltaTest(WarningTestMixin, unittest.TestCase): + now = datetime(2003, 9, 17, 20, 54, 47, 282310) + today = date(2003, 9, 17) + + def testInheritance(self): + # Ensure that relativedelta is inheritance-friendly. + class rdChildClass(relativedelta): + pass + + ccRD = rdChildClass(years=1, months=1, days=1, leapdays=1, weeks=1, + hours=1, minutes=1, seconds=1, microseconds=1) + + rd = relativedelta(years=1, months=1, days=1, leapdays=1, weeks=1, + hours=1, minutes=1, seconds=1, microseconds=1) + + self.assertEqual(type(ccRD + rd), type(ccRD), + msg='Addition does not inherit type.') + + self.assertEqual(type(ccRD - rd), type(ccRD), + msg='Subtraction does not inherit type.') + + self.assertEqual(type(-ccRD), type(ccRD), + msg='Negation does not inherit type.') + + self.assertEqual(type(ccRD * 5.0), type(ccRD), + msg='Multiplication does not inherit type.') + + self.assertEqual(type(ccRD / 5.0), type(ccRD), + msg='Division does not inherit type.') + + def testMonthEndMonthBeginning(self): + self.assertEqual(relativedelta(datetime(2003, 1, 31, 23, 59, 59), + datetime(2003, 3, 1, 0, 0, 0)), + relativedelta(months=-1, seconds=-1)) + + self.assertEqual(relativedelta(datetime(2003, 3, 1, 0, 0, 0), + datetime(2003, 1, 31, 23, 59, 59)), + relativedelta(months=1, seconds=1)) + + def testMonthEndMonthBeginningLeapYear(self): + self.assertEqual(relativedelta(datetime(2012, 1, 31, 23, 59, 59), + datetime(2012, 3, 1, 0, 0, 0)), + relativedelta(months=-1, seconds=-1)) + + self.assertEqual(relativedelta(datetime(2003, 3, 1, 0, 0, 0), + datetime(2003, 1, 31, 23, 59, 59)), + relativedelta(months=1, seconds=1)) + + def testNextMonth(self): + self.assertEqual(self.now+relativedelta(months=+1), + datetime(2003, 10, 17, 20, 54, 47, 282310)) + + def testNextMonthPlusOneWeek(self): + self.assertEqual(self.now+relativedelta(months=+1, weeks=+1), + datetime(2003, 10, 24, 20, 54, 47, 282310)) + + def testNextMonthPlusOneWeek10am(self): + self.assertEqual(self.today + + relativedelta(months=+1, weeks=+1, hour=10), + datetime(2003, 10, 24, 10, 0)) + + def testNextMonthPlusOneWeek10amDiff(self): + self.assertEqual(relativedelta(datetime(2003, 10, 24, 10, 0), + self.today), + relativedelta(months=+1, days=+7, hours=+10)) + + def testOneMonthBeforeOneYear(self): + self.assertEqual(self.now+relativedelta(years=+1, months=-1), + datetime(2004, 8, 17, 20, 54, 47, 282310)) + + def testMonthsOfDiffNumOfDays(self): + self.assertEqual(date(2003, 1, 27)+relativedelta(months=+1), + date(2003, 2, 27)) + self.assertEqual(date(2003, 1, 31)+relativedelta(months=+1), + date(2003, 2, 28)) + self.assertEqual(date(2003, 1, 31)+relativedelta(months=+2), + date(2003, 3, 31)) + + def testMonthsOfDiffNumOfDaysWithYears(self): + self.assertEqual(date(2000, 2, 28)+relativedelta(years=+1), + date(2001, 2, 28)) + self.assertEqual(date(2000, 2, 29)+relativedelta(years=+1), + date(2001, 2, 28)) + + self.assertEqual(date(1999, 2, 28)+relativedelta(years=+1), + date(2000, 2, 28)) + self.assertEqual(date(1999, 3, 1)+relativedelta(years=+1), + date(2000, 3, 1)) + self.assertEqual(date(1999, 3, 1)+relativedelta(years=+1), + date(2000, 3, 1)) + + self.assertEqual(date(2001, 2, 28)+relativedelta(years=-1), + date(2000, 2, 28)) + self.assertEqual(date(2001, 3, 1)+relativedelta(years=-1), + date(2000, 3, 1)) + + def testNextFriday(self): + self.assertEqual(self.today+relativedelta(weekday=FR), + date(2003, 9, 19)) + + def testNextFridayInt(self): + self.assertEqual(self.today+relativedelta(weekday=calendar.FRIDAY), + date(2003, 9, 19)) + + def testLastFridayInThisMonth(self): + self.assertEqual(self.today+relativedelta(day=31, weekday=FR(-1)), + date(2003, 9, 26)) + + def testNextWednesdayIsToday(self): + self.assertEqual(self.today+relativedelta(weekday=WE), + date(2003, 9, 17)) + + def testNextWenesdayNotToday(self): + self.assertEqual(self.today+relativedelta(days=+1, weekday=WE), + date(2003, 9, 24)) + + def test15thISOYearWeek(self): + self.assertEqual(date(2003, 1, 1) + + relativedelta(day=4, weeks=+14, weekday=MO(-1)), + date(2003, 4, 7)) + + def testMillenniumAge(self): + self.assertEqual(relativedelta(self.now, date(2001, 1, 1)), + relativedelta(years=+2, months=+8, days=+16, + hours=+20, minutes=+54, seconds=+47, + microseconds=+282310)) + + def testJohnAge(self): + self.assertEqual(relativedelta(self.now, + datetime(1978, 4, 5, 12, 0)), + relativedelta(years=+25, months=+5, days=+12, + hours=+8, minutes=+54, seconds=+47, + microseconds=+282310)) + + def testJohnAgeWithDate(self): + self.assertEqual(relativedelta(self.today, + datetime(1978, 4, 5, 12, 0)), + relativedelta(years=+25, months=+5, days=+11, + hours=+12)) + + def testYearDay(self): + self.assertEqual(date(2003, 1, 1)+relativedelta(yearday=260), + date(2003, 9, 17)) + self.assertEqual(date(2002, 1, 1)+relativedelta(yearday=260), + date(2002, 9, 17)) + self.assertEqual(date(2000, 1, 1)+relativedelta(yearday=260), + date(2000, 9, 16)) + self.assertEqual(self.today+relativedelta(yearday=261), + date(2003, 9, 18)) + + def testYearDayBug(self): + # Tests a problem reported by Adam Ryan. + self.assertEqual(date(2010, 1, 1)+relativedelta(yearday=15), + date(2010, 1, 15)) + + def testNonLeapYearDay(self): + self.assertEqual(date(2003, 1, 1)+relativedelta(nlyearday=260), + date(2003, 9, 17)) + self.assertEqual(date(2002, 1, 1)+relativedelta(nlyearday=260), + date(2002, 9, 17)) + self.assertEqual(date(2000, 1, 1)+relativedelta(nlyearday=260), + date(2000, 9, 17)) + self.assertEqual(self.today+relativedelta(yearday=261), + date(2003, 9, 18)) + + def testAddition(self): + self.assertEqual(relativedelta(days=10) + + relativedelta(years=1, months=2, days=3, hours=4, + minutes=5, microseconds=6), + relativedelta(years=1, months=2, days=13, hours=4, + minutes=5, microseconds=6)) + + def testAbsoluteAddition(self): + self.assertEqual(relativedelta() + relativedelta(day=0, hour=0), + relativedelta(day=0, hour=0)) + self.assertEqual(relativedelta(day=0, hour=0) + relativedelta(), + relativedelta(day=0, hour=0)) + + def testAdditionToDatetime(self): + self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=1), + datetime(2000, 1, 2)) + + def testRightAdditionToDatetime(self): + self.assertEqual(relativedelta(days=1) + datetime(2000, 1, 1), + datetime(2000, 1, 2)) + + def testAdditionInvalidType(self): + with self.assertRaises(TypeError): + relativedelta(days=3) + 9 + + def testAdditionUnsupportedType(self): + # For unsupported types that define their own comparators, etc. + self.assertIs(relativedelta(days=1) + NotAValue, NotAValue) + + def testAdditionFloatValue(self): + self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=float(1)), + datetime(2000, 1, 2)) + self.assertEqual(datetime(2000, 1, 1) + relativedelta(months=float(1)), + datetime(2000, 2, 1)) + self.assertEqual(datetime(2000, 1, 1) + relativedelta(years=float(1)), + datetime(2001, 1, 1)) + + def testAdditionFloatFractionals(self): + self.assertEqual(datetime(2000, 1, 1, 0) + + relativedelta(days=float(0.5)), + datetime(2000, 1, 1, 12)) + self.assertEqual(datetime(2000, 1, 1, 0, 0) + + relativedelta(hours=float(0.5)), + datetime(2000, 1, 1, 0, 30)) + self.assertEqual(datetime(2000, 1, 1, 0, 0, 0) + + relativedelta(minutes=float(0.5)), + datetime(2000, 1, 1, 0, 0, 30)) + self.assertEqual(datetime(2000, 1, 1, 0, 0, 0, 0) + + relativedelta(seconds=float(0.5)), + datetime(2000, 1, 1, 0, 0, 0, 500000)) + self.assertEqual(datetime(2000, 1, 1, 0, 0, 0, 0) + + relativedelta(microseconds=float(500000.25)), + datetime(2000, 1, 1, 0, 0, 0, 500000)) + + def testSubtraction(self): + self.assertEqual(relativedelta(days=10) - + relativedelta(years=1, months=2, days=3, hours=4, + minutes=5, microseconds=6), + relativedelta(years=-1, months=-2, days=7, hours=-4, + minutes=-5, microseconds=-6)) + + def testRightSubtractionFromDatetime(self): + self.assertEqual(datetime(2000, 1, 2) - relativedelta(days=1), + datetime(2000, 1, 1)) + + def testSubractionWithDatetime(self): + self.assertRaises(TypeError, lambda x, y: x - y, + (relativedelta(days=1), datetime(2000, 1, 1))) + + def testSubtractionInvalidType(self): + with self.assertRaises(TypeError): + relativedelta(hours=12) - 14 + + def testSubtractionUnsupportedType(self): + self.assertIs(relativedelta(days=1) + NotAValue, NotAValue) + + def testMultiplication(self): + self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=1) * 28, + datetime(2000, 1, 29)) + self.assertEqual(datetime(2000, 1, 1) + 28 * relativedelta(days=1), + datetime(2000, 1, 29)) + + def testMultiplicationUnsupportedType(self): + self.assertIs(relativedelta(days=1) * NotAValue, NotAValue) + + def testDivision(self): + self.assertEqual(datetime(2000, 1, 1) + relativedelta(days=28) / 28, + datetime(2000, 1, 2)) + + def testDivisionUnsupportedType(self): + self.assertIs(relativedelta(days=1) / NotAValue, NotAValue) + + def testBoolean(self): + self.assertFalse(relativedelta(days=0)) + self.assertTrue(relativedelta(days=1)) + + def testAbsoluteValueNegative(self): + rd_base = relativedelta(years=-1, months=-5, days=-2, hours=-3, + minutes=-5, seconds=-2, microseconds=-12) + rd_expected = relativedelta(years=1, months=5, days=2, hours=3, + minutes=5, seconds=2, microseconds=12) + self.assertEqual(abs(rd_base), rd_expected) + + def testAbsoluteValuePositive(self): + rd_base = relativedelta(years=1, months=5, days=2, hours=3, + minutes=5, seconds=2, microseconds=12) + rd_expected = rd_base + + self.assertEqual(abs(rd_base), rd_expected) + + def testComparison(self): + d1 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1, + minutes=1, seconds=1, microseconds=1) + d2 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1, + minutes=1, seconds=1, microseconds=1) + d3 = relativedelta(years=1, months=1, days=1, leapdays=0, hours=1, + minutes=1, seconds=1, microseconds=2) + + self.assertEqual(d1, d2) + self.assertNotEqual(d1, d3) + + def testInequalityTypeMismatch(self): + # Different type + self.assertFalse(relativedelta(year=1) == 19) + + def testInequalityUnsupportedType(self): + self.assertIs(relativedelta(hours=3) == NotAValue, NotAValue) + + def testInequalityWeekdays(self): + # Different weekdays + no_wday = relativedelta(year=1997, month=4) + wday_mo_1 = relativedelta(year=1997, month=4, weekday=MO(+1)) + wday_mo_2 = relativedelta(year=1997, month=4, weekday=MO(+2)) + wday_tu = relativedelta(year=1997, month=4, weekday=TU) + + self.assertTrue(wday_mo_1 == wday_mo_1) + + self.assertFalse(no_wday == wday_mo_1) + self.assertFalse(wday_mo_1 == no_wday) + + self.assertFalse(wday_mo_1 == wday_mo_2) + self.assertFalse(wday_mo_2 == wday_mo_1) + + self.assertFalse(wday_mo_1 == wday_tu) + self.assertFalse(wday_tu == wday_mo_1) + + def testMonthOverflow(self): + self.assertEqual(relativedelta(months=273), + relativedelta(years=22, months=9)) + + def testWeeks(self): + # Test that the weeks property is working properly. + rd = relativedelta(years=4, months=2, weeks=8, days=6) + self.assertEqual((rd.weeks, rd.days), (8, 8 * 7 + 6)) + + rd.weeks = 3 + self.assertEqual((rd.weeks, rd.days), (3, 3 * 7 + 6)) + + def testRelativeDeltaRepr(self): + self.assertEqual(repr(relativedelta(years=1, months=-1, days=15)), + 'relativedelta(years=+1, months=-1, days=+15)') + + self.assertEqual(repr(relativedelta(months=14, seconds=-25)), + 'relativedelta(years=+1, months=+2, seconds=-25)') + + self.assertEqual(repr(relativedelta(month=3, hour=3, weekday=SU(3))), + 'relativedelta(month=3, weekday=SU(+3), hour=3)') + + def testRelativeDeltaFractionalYear(self): + with self.assertRaises(ValueError): + relativedelta(years=1.5) + + def testRelativeDeltaFractionalMonth(self): + with self.assertRaises(ValueError): + relativedelta(months=1.5) + + def testRelativeDeltaFractionalAbsolutes(self): + # Fractional absolute values will soon be unsupported, + # check for the deprecation warning. + with self.assertWarns(DeprecationWarning): + relativedelta(year=2.86) + + with self.assertWarns(DeprecationWarning): + relativedelta(month=1.29) + + with self.assertWarns(DeprecationWarning): + relativedelta(day=0.44) + + with self.assertWarns(DeprecationWarning): + relativedelta(hour=23.98) + + with self.assertWarns(DeprecationWarning): + relativedelta(minute=45.21) + + with self.assertWarns(DeprecationWarning): + relativedelta(second=13.2) + + with self.assertWarns(DeprecationWarning): + relativedelta(microsecond=157221.93) + + def testRelativeDeltaFractionalRepr(self): + rd = relativedelta(years=3, months=-2, days=1.25) + + self.assertEqual(repr(rd), + 'relativedelta(years=+3, months=-2, days=+1.25)') + + rd = relativedelta(hours=0.5, seconds=9.22) + self.assertEqual(repr(rd), + 'relativedelta(hours=+0.5, seconds=+9.22)') + + def testRelativeDeltaFractionalWeeks(self): + # Equivalent to days=8, hours=18 + rd = relativedelta(weeks=1.25) + d1 = datetime(2009, 9, 3, 0, 0) + self.assertEqual(d1 + rd, + datetime(2009, 9, 11, 18)) + + def testRelativeDeltaFractionalDays(self): + rd1 = relativedelta(days=1.48) + + d1 = datetime(2009, 9, 3, 0, 0) + self.assertEqual(d1 + rd1, + datetime(2009, 9, 4, 11, 31, 12)) + + rd2 = relativedelta(days=1.5) + self.assertEqual(d1 + rd2, + datetime(2009, 9, 4, 12, 0, 0)) + + def testRelativeDeltaFractionalHours(self): + rd = relativedelta(days=1, hours=12.5) + d1 = datetime(2009, 9, 3, 0, 0) + self.assertEqual(d1 + rd, + datetime(2009, 9, 4, 12, 30, 0)) + + def testRelativeDeltaFractionalMinutes(self): + rd = relativedelta(hours=1, minutes=30.5) + d1 = datetime(2009, 9, 3, 0, 0) + self.assertEqual(d1 + rd, + datetime(2009, 9, 3, 1, 30, 30)) + + def testRelativeDeltaFractionalSeconds(self): + rd = relativedelta(hours=5, minutes=30, seconds=30.5) + d1 = datetime(2009, 9, 3, 0, 0) + self.assertEqual(d1 + rd, + datetime(2009, 9, 3, 5, 30, 30, 500000)) + + def testRelativeDeltaFractionalPositiveOverflow(self): + # Equivalent to (days=1, hours=14) + rd1 = relativedelta(days=1.5, hours=2) + d1 = datetime(2009, 9, 3, 0, 0) + self.assertEqual(d1 + rd1, + datetime(2009, 9, 4, 14, 0, 0)) + + # Equivalent to (days=1, hours=14, minutes=45) + rd2 = relativedelta(days=1.5, hours=2.5, minutes=15) + d1 = datetime(2009, 9, 3, 0, 0) + self.assertEqual(d1 + rd2, + datetime(2009, 9, 4, 14, 45)) + + # Carry back up - equivalent to (days=2, hours=2, minutes=0, seconds=1) + rd3 = relativedelta(days=1.5, hours=13, minutes=59.5, seconds=31) + self.assertEqual(d1 + rd3, + datetime(2009, 9, 5, 2, 0, 1)) + + def testRelativeDeltaFractionalNegativeDays(self): + # Equivalent to (days=-1, hours=-1) + rd1 = relativedelta(days=-1.5, hours=11) + d1 = datetime(2009, 9, 3, 12, 0) + self.assertEqual(d1 + rd1, + datetime(2009, 9, 2, 11, 0, 0)) + + # Equivalent to (days=-1, hours=-9) + rd2 = relativedelta(days=-1.25, hours=-3) + self.assertEqual(d1 + rd2, + datetime(2009, 9, 2, 3)) + + def testRelativeDeltaNormalizeFractionalDays(self): + # Equivalent to (days=2, hours=18) + rd1 = relativedelta(days=2.75) + + self.assertEqual(rd1.normalized(), relativedelta(days=2, hours=18)) + + # Equvalent to (days=1, hours=11, minutes=31, seconds=12) + rd2 = relativedelta(days=1.48) + + self.assertEqual(rd2.normalized(), + relativedelta(days=1, hours=11, minutes=31, seconds=12)) + + def testRelativeDeltaNormalizeFractionalDays2(self): + # Equivalent to (hours=1, minutes=30) + rd1 = relativedelta(hours=1.5) + + self.assertEqual(rd1.normalized(), relativedelta(hours=1, minutes=30)) + + # Equivalent to (hours=3, minutes=17, seconds=5, microseconds=100) + rd2 = relativedelta(hours=3.28472225) + + self.assertEqual(rd2.normalized(), + relativedelta(hours=3, minutes=17, seconds=5, microseconds=100)) + + def testRelativeDeltaNormalizeFractionalMinutes(self): + # Equivalent to (minutes=15, seconds=36) + rd1 = relativedelta(minutes=15.6) + + self.assertEqual(rd1.normalized(), + relativedelta(minutes=15, seconds=36)) + + # Equivalent to (minutes=25, seconds=20, microseconds=25000) + rd2 = relativedelta(minutes=25.33375) + + self.assertEqual(rd2.normalized(), + relativedelta(minutes=25, seconds=20, microseconds=25000)) + + def testRelativeDeltaNormalizeFractionalSeconds(self): + # Equivalent to (seconds=45, microseconds=25000) + rd1 = relativedelta(seconds=45.025) + self.assertEqual(rd1.normalized(), + relativedelta(seconds=45, microseconds=25000)) + + def testRelativeDeltaFractionalPositiveOverflow2(self): + # Equivalent to (days=1, hours=14) + rd1 = relativedelta(days=1.5, hours=2) + self.assertEqual(rd1.normalized(), + relativedelta(days=1, hours=14)) + + # Equivalent to (days=1, hours=14, minutes=45) + rd2 = relativedelta(days=1.5, hours=2.5, minutes=15) + self.assertEqual(rd2.normalized(), + relativedelta(days=1, hours=14, minutes=45)) + + # Carry back up - equivalent to: + # (days=2, hours=2, minutes=0, seconds=2, microseconds=3) + rd3 = relativedelta(days=1.5, hours=13, minutes=59.50045, + seconds=31.473, microseconds=500003) + self.assertEqual(rd3.normalized(), + relativedelta(days=2, hours=2, minutes=0, + seconds=2, microseconds=3)) + + def testRelativeDeltaFractionalNegativeOverflow(self): + # Equivalent to (days=-1) + rd1 = relativedelta(days=-0.5, hours=-12) + self.assertEqual(rd1.normalized(), + relativedelta(days=-1)) + + # Equivalent to (days=-1) + rd2 = relativedelta(days=-1.5, hours=12) + self.assertEqual(rd2.normalized(), + relativedelta(days=-1)) + + # Equivalent to (days=-1, hours=-14, minutes=-45) + rd3 = relativedelta(days=-1.5, hours=-2.5, minutes=-15) + self.assertEqual(rd3.normalized(), + relativedelta(days=-1, hours=-14, minutes=-45)) + + # Equivalent to (days=-1, hours=-14, minutes=+15) + rd4 = relativedelta(days=-1.5, hours=-2.5, minutes=45) + self.assertEqual(rd4.normalized(), + relativedelta(days=-1, hours=-14, minutes=+15)) + + # Carry back up - equivalent to: + # (days=-2, hours=-2, minutes=0, seconds=-2, microseconds=-3) + rd3 = relativedelta(days=-1.5, hours=-13, minutes=-59.50045, + seconds=-31.473, microseconds=-500003) + self.assertEqual(rd3.normalized(), + relativedelta(days=-2, hours=-2, minutes=0, + seconds=-2, microseconds=-3)) + + def testInvalidYearDay(self): + with self.assertRaises(ValueError): + relativedelta(yearday=367) + + def testAddTimedeltaToUnpopulatedRelativedelta(self): + td = timedelta( + days=1, + seconds=1, + microseconds=1, + milliseconds=1, + minutes=1, + hours=1, + weeks=1 + ) + + expected = relativedelta( + weeks=1, + days=1, + hours=1, + minutes=1, + seconds=1, + microseconds=1001 + ) + + self.assertEqual(expected, relativedelta() + td) + + def testAddTimedeltaToPopulatedRelativeDelta(self): + td = timedelta( + days=1, + seconds=1, + microseconds=1, + milliseconds=1, + minutes=1, + hours=1, + weeks=1 + ) + + rd = relativedelta( + year=1, + month=1, + day=1, + hour=1, + minute=1, + second=1, + microsecond=1, + years=1, + months=1, + days=1, + weeks=1, + hours=1, + minutes=1, + seconds=1, + microseconds=1 + ) + + expected = relativedelta( + year=1, + month=1, + day=1, + hour=1, + minute=1, + second=1, + microsecond=1, + years=1, + months=1, + weeks=2, + days=2, + hours=2, + minutes=2, + seconds=2, + microseconds=1002, + ) + + self.assertEqual(expected, rd + td) + + def testHashable(self): + try: + {relativedelta(minute=1): 'test'} + except: + self.fail("relativedelta() failed to hash!") + + +class RelativeDeltaWeeksPropertyGetterTest(unittest.TestCase): + """Test the weeks property getter""" + + def test_one_day(self): + rd = relativedelta(days=1) + self.assertEqual(rd.days, 1) + self.assertEqual(rd.weeks, 0) + + def test_minus_one_day(self): + rd = relativedelta(days=-1) + self.assertEqual(rd.days, -1) + self.assertEqual(rd.weeks, 0) + + def test_height_days(self): + rd = relativedelta(days=8) + self.assertEqual(rd.days, 8) + self.assertEqual(rd.weeks, 1) + + def test_minus_height_days(self): + rd = relativedelta(days=-8) + self.assertEqual(rd.days, -8) + self.assertEqual(rd.weeks, -1) + + +class RelativeDeltaWeeksPropertySetterTest(unittest.TestCase): + """Test the weeks setter which makes a "smart" update of the days attribute""" + + def test_one_day_set_one_week(self): + rd = relativedelta(days=1) + rd.weeks = 1 # add 7 days + self.assertEqual(rd.days, 8) + self.assertEqual(rd.weeks, 1) + + def test_minus_one_day_set_one_week(self): + rd = relativedelta(days=-1) + rd.weeks = 1 # add 7 days + self.assertEqual(rd.days, 6) + self.assertEqual(rd.weeks, 0) + + def test_height_days_set_minus_one_week(self): + rd = relativedelta(days=8) + rd.weeks = -1 # change from 1 week, 1 day to -1 week, 1 day + self.assertEqual(rd.days, -6) + self.assertEqual(rd.weeks, 0) + + def test_minus_height_days_set_minus_one_week(self): + rd = relativedelta(days=-8) + rd.weeks = -1 # does not change anything + self.assertEqual(rd.days, -8) + self.assertEqual(rd.weeks, -1) + + +# vim:ts=4:sw=4:et diff --git a/libraries/dateutil/test/test_rrule.py b/libraries/dateutil/test/test_rrule.py new file mode 100644 index 00000000..cd08ce29 --- /dev/null +++ b/libraries/dateutil/test/test_rrule.py @@ -0,0 +1,4842 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +from ._common import WarningTestMixin + +from datetime import datetime, date +import unittest +from six import PY3 + +from dateutil import tz +from dateutil.rrule import ( + rrule, rruleset, rrulestr, + YEARLY, MONTHLY, WEEKLY, DAILY, + HOURLY, MINUTELY, SECONDLY, + MO, TU, WE, TH, FR, SA, SU +) + +from freezegun import freeze_time + +import pytest + + +@pytest.mark.rrule +class RRuleTest(WarningTestMixin, unittest.TestCase): + def _rrulestr_reverse_test(self, rule): + """ + Call with an `rrule` and it will test that `str(rrule)` generates a + string which generates the same `rrule` as the input when passed to + `rrulestr()` + """ + rr_str = str(rule) + rrulestr_rrule = rrulestr(rr_str) + + self.assertEqual(list(rule), list(rrulestr_rrule)) + + def testStrAppendRRULEToken(self): + # `_rrulestr_reverse_test` does not check if the "RRULE:" prefix + # property is appended properly, so give it a dedicated test + self.assertEqual(str(rrule(YEARLY, + count=5, + dtstart=datetime(1997, 9, 2, 9, 0))), + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=5") + + rr_str = ( + 'DTSTART:19970105T083000\nRRULE:FREQ=YEARLY;INTERVAL=2' + ) + self.assertEqual(str(rrulestr(rr_str)), rr_str) + + def testYearly(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1998, 9, 2, 9, 0), + datetime(1999, 9, 2, 9, 0)]) + + def testYearlyInterval(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1999, 9, 2, 9, 0), + datetime(2001, 9, 2, 9, 0)]) + + def testYearlyIntervalLarge(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + interval=100, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(2097, 9, 2, 9, 0), + datetime(2197, 9, 2, 9, 0)]) + + def testYearlyByMonth(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 2, 9, 0), + datetime(1998, 3, 2, 9, 0), + datetime(1999, 1, 2, 9, 0)]) + + def testYearlyByMonthDay(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 3, 9, 0), + datetime(1997, 10, 1, 9, 0), + datetime(1997, 10, 3, 9, 0)]) + + def testYearlyByMonthAndMonthDay(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 5, 9, 0), + datetime(1998, 1, 7, 9, 0), + datetime(1998, 3, 5, 9, 0)]) + + def testYearlyByWeekDay(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testYearlyByNWeekDay(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 25, 9, 0), + datetime(1998, 1, 6, 9, 0), + datetime(1998, 12, 31, 9, 0)]) + + def testYearlyByNWeekDayLarge(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byweekday=(TU(3), TH(-3)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 11, 9, 0), + datetime(1998, 1, 20, 9, 0), + datetime(1998, 12, 17, 9, 0)]) + + def testYearlyByMonthAndWeekDay(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 1, 6, 9, 0), + datetime(1998, 1, 8, 9, 0)]) + + def testYearlyByMonthAndNWeekDay(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 6, 9, 0), + datetime(1998, 1, 29, 9, 0), + datetime(1998, 3, 3, 9, 0)]) + + def testYearlyByMonthAndNWeekDayLarge(self): + # This is interesting because the TH(-3) ends up before + # the TU(3). + self.assertEqual(list(rrule(YEARLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(3), TH(-3)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 15, 9, 0), + datetime(1998, 1, 20, 9, 0), + datetime(1998, 3, 12, 9, 0)]) + + def testYearlyByMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 2, 3, 9, 0), + datetime(1998, 3, 3, 9, 0)]) + + def testYearlyByMonthAndMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 3, 3, 9, 0), + datetime(2001, 3, 1, 9, 0)]) + + def testYearlyByYearDay(self): + self.assertEqual(list(rrule(YEARLY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 9, 0), + datetime(1998, 1, 1, 9, 0), + datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0)]) + + def testYearlyByYearDayNeg(self): + self.assertEqual(list(rrule(YEARLY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 9, 0), + datetime(1998, 1, 1, 9, 0), + datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0)]) + + def testYearlyByMonthAndYearDay(self): + self.assertEqual(list(rrule(YEARLY, + count=4, + bymonth=(4, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0), + datetime(1999, 4, 10, 9, 0), + datetime(1999, 7, 19, 9, 0)]) + + def testYearlyByMonthAndYearDayNeg(self): + self.assertEqual(list(rrule(YEARLY, + count=4, + bymonth=(4, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0), + datetime(1999, 4, 10, 9, 0), + datetime(1999, 7, 19, 9, 0)]) + + def testYearlyByWeekNo(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 5, 11, 9, 0), + datetime(1998, 5, 12, 9, 0), + datetime(1998, 5, 13, 9, 0)]) + + def testYearlyByWeekNoAndWeekDay(self): + # That's a nice one. The first days of week number one + # may be in the last year. + self.assertEqual(list(rrule(YEARLY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 29, 9, 0), + datetime(1999, 1, 4, 9, 0), + datetime(2000, 1, 3, 9, 0)]) + + def testYearlyByWeekNoAndWeekDayLarge(self): + # Another nice test. The last days of week number 52/53 + # may be in the next year. + self.assertEqual(list(rrule(YEARLY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 9, 0), + datetime(1998, 12, 27, 9, 0), + datetime(2000, 1, 2, 9, 0)]) + + def testYearlyByWeekNoAndWeekDayLast(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 9, 0), + datetime(1999, 1, 3, 9, 0), + datetime(2000, 1, 2, 9, 0)]) + + def testYearlyByEaster(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 12, 9, 0), + datetime(1999, 4, 4, 9, 0), + datetime(2000, 4, 23, 9, 0)]) + + def testYearlyByEasterPos(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 13, 9, 0), + datetime(1999, 4, 5, 9, 0), + datetime(2000, 4, 24, 9, 0)]) + + def testYearlyByEasterNeg(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 11, 9, 0), + datetime(1999, 4, 3, 9, 0), + datetime(2000, 4, 22, 9, 0)]) + + def testYearlyByWeekNoAndWeekDay53(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 12, 28, 9, 0), + datetime(2004, 12, 27, 9, 0), + datetime(2009, 12, 28, 9, 0)]) + + def testYearlyByHour(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0), + datetime(1998, 9, 2, 6, 0), + datetime(1998, 9, 2, 18, 0)]) + + def testYearlyByMinute(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6), + datetime(1997, 9, 2, 9, 18), + datetime(1998, 9, 2, 9, 6)]) + + def testYearlyBySecond(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 6), + datetime(1997, 9, 2, 9, 0, 18), + datetime(1998, 9, 2, 9, 0, 6)]) + + def testYearlyByHourAndMinute(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6), + datetime(1997, 9, 2, 18, 18), + datetime(1998, 9, 2, 6, 6)]) + + def testYearlyByHourAndSecond(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0, 6), + datetime(1997, 9, 2, 18, 0, 18), + datetime(1998, 9, 2, 6, 0, 6)]) + + def testYearlyByMinuteAndSecond(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6, 6), + datetime(1997, 9, 2, 9, 6, 18), + datetime(1997, 9, 2, 9, 18, 6)]) + + def testYearlyByHourAndMinuteAndSecond(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6, 6), + datetime(1997, 9, 2, 18, 6, 18), + datetime(1997, 9, 2, 18, 18, 6)]) + + def testYearlyBySetPos(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + bymonthday=15, + byhour=(6, 18), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 11, 15, 18, 0), + datetime(1998, 2, 15, 6, 0), + datetime(1998, 11, 15, 18, 0)]) + + def testMonthly(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 10, 2, 9, 0), + datetime(1997, 11, 2, 9, 0)]) + + def testMonthlyInterval(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 11, 2, 9, 0), + datetime(1998, 1, 2, 9, 0)]) + + def testMonthlyIntervalLarge(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + interval=18, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1999, 3, 2, 9, 0), + datetime(2000, 9, 2, 9, 0)]) + + def testMonthlyByMonth(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 2, 9, 0), + datetime(1998, 3, 2, 9, 0), + datetime(1999, 1, 2, 9, 0)]) + + def testMonthlyByMonthDay(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 3, 9, 0), + datetime(1997, 10, 1, 9, 0), + datetime(1997, 10, 3, 9, 0)]) + + def testMonthlyByMonthAndMonthDay(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 5, 9, 0), + datetime(1998, 1, 7, 9, 0), + datetime(1998, 3, 5, 9, 0)]) + + def testMonthlyByWeekDay(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + # Third Monday of the month + self.assertEqual(rrule(MONTHLY, + byweekday=(MO(+3)), + dtstart=datetime(1997, 9, 1)).between(datetime(1997, 9, 1), + datetime(1997, 12, 1)), + [datetime(1997, 9, 15, 0, 0), + datetime(1997, 10, 20, 0, 0), + datetime(1997, 11, 17, 0, 0)]) + + def testMonthlyByNWeekDay(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 25, 9, 0), + datetime(1997, 10, 7, 9, 0)]) + + def testMonthlyByNWeekDayLarge(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byweekday=(TU(3), TH(-3)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 11, 9, 0), + datetime(1997, 9, 16, 9, 0), + datetime(1997, 10, 16, 9, 0)]) + + def testMonthlyByMonthAndWeekDay(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 1, 6, 9, 0), + datetime(1998, 1, 8, 9, 0)]) + + def testMonthlyByMonthAndNWeekDay(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 6, 9, 0), + datetime(1998, 1, 29, 9, 0), + datetime(1998, 3, 3, 9, 0)]) + + def testMonthlyByMonthAndNWeekDayLarge(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(3), TH(-3)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 15, 9, 0), + datetime(1998, 1, 20, 9, 0), + datetime(1998, 3, 12, 9, 0)]) + + def testMonthlyByMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 2, 3, 9, 0), + datetime(1998, 3, 3, 9, 0)]) + + def testMonthlyByMonthAndMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 3, 3, 9, 0), + datetime(2001, 3, 1, 9, 0)]) + + def testMonthlyByYearDay(self): + self.assertEqual(list(rrule(MONTHLY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 9, 0), + datetime(1998, 1, 1, 9, 0), + datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0)]) + + def testMonthlyByYearDayNeg(self): + self.assertEqual(list(rrule(MONTHLY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 9, 0), + datetime(1998, 1, 1, 9, 0), + datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0)]) + + def testMonthlyByMonthAndYearDay(self): + self.assertEqual(list(rrule(MONTHLY, + count=4, + bymonth=(4, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0), + datetime(1999, 4, 10, 9, 0), + datetime(1999, 7, 19, 9, 0)]) + + def testMonthlyByMonthAndYearDayNeg(self): + self.assertEqual(list(rrule(MONTHLY, + count=4, + bymonth=(4, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0), + datetime(1999, 4, 10, 9, 0), + datetime(1999, 7, 19, 9, 0)]) + + def testMonthlyByWeekNo(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 5, 11, 9, 0), + datetime(1998, 5, 12, 9, 0), + datetime(1998, 5, 13, 9, 0)]) + + def testMonthlyByWeekNoAndWeekDay(self): + # That's a nice one. The first days of week number one + # may be in the last year. + self.assertEqual(list(rrule(MONTHLY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 29, 9, 0), + datetime(1999, 1, 4, 9, 0), + datetime(2000, 1, 3, 9, 0)]) + + def testMonthlyByWeekNoAndWeekDayLarge(self): + # Another nice test. The last days of week number 52/53 + # may be in the next year. + self.assertEqual(list(rrule(MONTHLY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 9, 0), + datetime(1998, 12, 27, 9, 0), + datetime(2000, 1, 2, 9, 0)]) + + def testMonthlyByWeekNoAndWeekDayLast(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 9, 0), + datetime(1999, 1, 3, 9, 0), + datetime(2000, 1, 2, 9, 0)]) + + def testMonthlyByWeekNoAndWeekDay53(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 12, 28, 9, 0), + datetime(2004, 12, 27, 9, 0), + datetime(2009, 12, 28, 9, 0)]) + + def testMonthlyByEaster(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 12, 9, 0), + datetime(1999, 4, 4, 9, 0), + datetime(2000, 4, 23, 9, 0)]) + + def testMonthlyByEasterPos(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 13, 9, 0), + datetime(1999, 4, 5, 9, 0), + datetime(2000, 4, 24, 9, 0)]) + + def testMonthlyByEasterNeg(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 11, 9, 0), + datetime(1999, 4, 3, 9, 0), + datetime(2000, 4, 22, 9, 0)]) + + def testMonthlyByHour(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0), + datetime(1997, 10, 2, 6, 0), + datetime(1997, 10, 2, 18, 0)]) + + def testMonthlyByMinute(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6), + datetime(1997, 9, 2, 9, 18), + datetime(1997, 10, 2, 9, 6)]) + + def testMonthlyBySecond(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 6), + datetime(1997, 9, 2, 9, 0, 18), + datetime(1997, 10, 2, 9, 0, 6)]) + + def testMonthlyByHourAndMinute(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6), + datetime(1997, 9, 2, 18, 18), + datetime(1997, 10, 2, 6, 6)]) + + def testMonthlyByHourAndSecond(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0, 6), + datetime(1997, 9, 2, 18, 0, 18), + datetime(1997, 10, 2, 6, 0, 6)]) + + def testMonthlyByMinuteAndSecond(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6, 6), + datetime(1997, 9, 2, 9, 6, 18), + datetime(1997, 9, 2, 9, 18, 6)]) + + def testMonthlyByHourAndMinuteAndSecond(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6, 6), + datetime(1997, 9, 2, 18, 6, 18), + datetime(1997, 9, 2, 18, 18, 6)]) + + def testMonthlyBySetPos(self): + self.assertEqual(list(rrule(MONTHLY, + count=3, + bymonthday=(13, 17), + byhour=(6, 18), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 13, 18, 0), + datetime(1997, 9, 17, 6, 0), + datetime(1997, 10, 13, 18, 0)]) + + def testWeekly(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testWeeklyInterval(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 16, 9, 0), + datetime(1997, 9, 30, 9, 0)]) + + def testWeeklyIntervalLarge(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + interval=20, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1998, 1, 20, 9, 0), + datetime(1998, 6, 9, 9, 0)]) + + def testWeeklyByMonth(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 6, 9, 0), + datetime(1998, 1, 13, 9, 0), + datetime(1998, 1, 20, 9, 0)]) + + def testWeeklyByMonthDay(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 3, 9, 0), + datetime(1997, 10, 1, 9, 0), + datetime(1997, 10, 3, 9, 0)]) + + def testWeeklyByMonthAndMonthDay(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 5, 9, 0), + datetime(1998, 1, 7, 9, 0), + datetime(1998, 3, 5, 9, 0)]) + + def testWeeklyByWeekDay(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testWeeklyByNWeekDay(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testWeeklyByMonthAndWeekDay(self): + # This test is interesting, because it crosses the year + # boundary in a weekly period to find day '1' as a + # valid recurrence. + self.assertEqual(list(rrule(WEEKLY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 1, 6, 9, 0), + datetime(1998, 1, 8, 9, 0)]) + + def testWeeklyByMonthAndNWeekDay(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 1, 6, 9, 0), + datetime(1998, 1, 8, 9, 0)]) + + def testWeeklyByMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 2, 3, 9, 0), + datetime(1998, 3, 3, 9, 0)]) + + def testWeeklyByMonthAndMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 3, 3, 9, 0), + datetime(2001, 3, 1, 9, 0)]) + + def testWeeklyByYearDay(self): + self.assertEqual(list(rrule(WEEKLY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 9, 0), + datetime(1998, 1, 1, 9, 0), + datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0)]) + + def testWeeklyByYearDayNeg(self): + self.assertEqual(list(rrule(WEEKLY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 9, 0), + datetime(1998, 1, 1, 9, 0), + datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0)]) + + def testWeeklyByMonthAndYearDay(self): + self.assertEqual(list(rrule(WEEKLY, + count=4, + bymonth=(1, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 7, 19, 9, 0), + datetime(1999, 1, 1, 9, 0), + datetime(1999, 7, 19, 9, 0)]) + + def testWeeklyByMonthAndYearDayNeg(self): + self.assertEqual(list(rrule(WEEKLY, + count=4, + bymonth=(1, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 7, 19, 9, 0), + datetime(1999, 1, 1, 9, 0), + datetime(1999, 7, 19, 9, 0)]) + + def testWeeklyByWeekNo(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 5, 11, 9, 0), + datetime(1998, 5, 12, 9, 0), + datetime(1998, 5, 13, 9, 0)]) + + def testWeeklyByWeekNoAndWeekDay(self): + # That's a nice one. The first days of week number one + # may be in the last year. + self.assertEqual(list(rrule(WEEKLY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 29, 9, 0), + datetime(1999, 1, 4, 9, 0), + datetime(2000, 1, 3, 9, 0)]) + + def testWeeklyByWeekNoAndWeekDayLarge(self): + # Another nice test. The last days of week number 52/53 + # may be in the next year. + self.assertEqual(list(rrule(WEEKLY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 9, 0), + datetime(1998, 12, 27, 9, 0), + datetime(2000, 1, 2, 9, 0)]) + + def testWeeklyByWeekNoAndWeekDayLast(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 9, 0), + datetime(1999, 1, 3, 9, 0), + datetime(2000, 1, 2, 9, 0)]) + + def testWeeklyByWeekNoAndWeekDay53(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 12, 28, 9, 0), + datetime(2004, 12, 27, 9, 0), + datetime(2009, 12, 28, 9, 0)]) + + def testWeeklyByEaster(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 12, 9, 0), + datetime(1999, 4, 4, 9, 0), + datetime(2000, 4, 23, 9, 0)]) + + def testWeeklyByEasterPos(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 13, 9, 0), + datetime(1999, 4, 5, 9, 0), + datetime(2000, 4, 24, 9, 0)]) + + def testWeeklyByEasterNeg(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 11, 9, 0), + datetime(1999, 4, 3, 9, 0), + datetime(2000, 4, 22, 9, 0)]) + + def testWeeklyByHour(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0), + datetime(1997, 9, 9, 6, 0), + datetime(1997, 9, 9, 18, 0)]) + + def testWeeklyByMinute(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6), + datetime(1997, 9, 2, 9, 18), + datetime(1997, 9, 9, 9, 6)]) + + def testWeeklyBySecond(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 6), + datetime(1997, 9, 2, 9, 0, 18), + datetime(1997, 9, 9, 9, 0, 6)]) + + def testWeeklyByHourAndMinute(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6), + datetime(1997, 9, 2, 18, 18), + datetime(1997, 9, 9, 6, 6)]) + + def testWeeklyByHourAndSecond(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0, 6), + datetime(1997, 9, 2, 18, 0, 18), + datetime(1997, 9, 9, 6, 0, 6)]) + + def testWeeklyByMinuteAndSecond(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6, 6), + datetime(1997, 9, 2, 9, 6, 18), + datetime(1997, 9, 2, 9, 18, 6)]) + + def testWeeklyByHourAndMinuteAndSecond(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6, 6), + datetime(1997, 9, 2, 18, 6, 18), + datetime(1997, 9, 2, 18, 18, 6)]) + + def testWeeklyBySetPos(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + byweekday=(TU, TH), + byhour=(6, 18), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0), + datetime(1997, 9, 4, 6, 0), + datetime(1997, 9, 9, 18, 0)]) + + def testDaily(self): + self.assertEqual(list(rrule(DAILY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 3, 9, 0), + datetime(1997, 9, 4, 9, 0)]) + + def testDailyInterval(self): + self.assertEqual(list(rrule(DAILY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 6, 9, 0)]) + + def testDailyIntervalLarge(self): + self.assertEqual(list(rrule(DAILY, + count=3, + interval=92, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 12, 3, 9, 0), + datetime(1998, 3, 5, 9, 0)]) + + def testDailyByMonth(self): + self.assertEqual(list(rrule(DAILY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 1, 2, 9, 0), + datetime(1998, 1, 3, 9, 0)]) + + def testDailyByMonthDay(self): + self.assertEqual(list(rrule(DAILY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 3, 9, 0), + datetime(1997, 10, 1, 9, 0), + datetime(1997, 10, 3, 9, 0)]) + + def testDailyByMonthAndMonthDay(self): + self.assertEqual(list(rrule(DAILY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 5, 9, 0), + datetime(1998, 1, 7, 9, 0), + datetime(1998, 3, 5, 9, 0)]) + + def testDailyByWeekDay(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testDailyByNWeekDay(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testDailyByMonthAndWeekDay(self): + self.assertEqual(list(rrule(DAILY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 1, 6, 9, 0), + datetime(1998, 1, 8, 9, 0)]) + + def testDailyByMonthAndNWeekDay(self): + self.assertEqual(list(rrule(DAILY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 1, 6, 9, 0), + datetime(1998, 1, 8, 9, 0)]) + + def testDailyByMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(DAILY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 2, 3, 9, 0), + datetime(1998, 3, 3, 9, 0)]) + + def testDailyByMonthAndMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(DAILY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 3, 3, 9, 0), + datetime(2001, 3, 1, 9, 0)]) + + def testDailyByYearDay(self): + self.assertEqual(list(rrule(DAILY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 9, 0), + datetime(1998, 1, 1, 9, 0), + datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0)]) + + def testDailyByYearDayNeg(self): + self.assertEqual(list(rrule(DAILY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 9, 0), + datetime(1998, 1, 1, 9, 0), + datetime(1998, 4, 10, 9, 0), + datetime(1998, 7, 19, 9, 0)]) + + def testDailyByMonthAndYearDay(self): + self.assertEqual(list(rrule(DAILY, + count=4, + bymonth=(1, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 7, 19, 9, 0), + datetime(1999, 1, 1, 9, 0), + datetime(1999, 7, 19, 9, 0)]) + + def testDailyByMonthAndYearDayNeg(self): + self.assertEqual(list(rrule(DAILY, + count=4, + bymonth=(1, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 9, 0), + datetime(1998, 7, 19, 9, 0), + datetime(1999, 1, 1, 9, 0), + datetime(1999, 7, 19, 9, 0)]) + + def testDailyByWeekNo(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 5, 11, 9, 0), + datetime(1998, 5, 12, 9, 0), + datetime(1998, 5, 13, 9, 0)]) + + def testDailyByWeekNoAndWeekDay(self): + # That's a nice one. The first days of week number one + # may be in the last year. + self.assertEqual(list(rrule(DAILY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 29, 9, 0), + datetime(1999, 1, 4, 9, 0), + datetime(2000, 1, 3, 9, 0)]) + + def testDailyByWeekNoAndWeekDayLarge(self): + # Another nice test. The last days of week number 52/53 + # may be in the next year. + self.assertEqual(list(rrule(DAILY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 9, 0), + datetime(1998, 12, 27, 9, 0), + datetime(2000, 1, 2, 9, 0)]) + + def testDailyByWeekNoAndWeekDayLast(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 9, 0), + datetime(1999, 1, 3, 9, 0), + datetime(2000, 1, 2, 9, 0)]) + + def testDailyByWeekNoAndWeekDay53(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 12, 28, 9, 0), + datetime(2004, 12, 27, 9, 0), + datetime(2009, 12, 28, 9, 0)]) + + def testDailyByEaster(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 12, 9, 0), + datetime(1999, 4, 4, 9, 0), + datetime(2000, 4, 23, 9, 0)]) + + def testDailyByEasterPos(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 13, 9, 0), + datetime(1999, 4, 5, 9, 0), + datetime(2000, 4, 24, 9, 0)]) + + def testDailyByEasterNeg(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 11, 9, 0), + datetime(1999, 4, 3, 9, 0), + datetime(2000, 4, 22, 9, 0)]) + + def testDailyByHour(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0), + datetime(1997, 9, 3, 6, 0), + datetime(1997, 9, 3, 18, 0)]) + + def testDailyByMinute(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6), + datetime(1997, 9, 2, 9, 18), + datetime(1997, 9, 3, 9, 6)]) + + def testDailyBySecond(self): + self.assertEqual(list(rrule(DAILY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 6), + datetime(1997, 9, 2, 9, 0, 18), + datetime(1997, 9, 3, 9, 0, 6)]) + + def testDailyByHourAndMinute(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6), + datetime(1997, 9, 2, 18, 18), + datetime(1997, 9, 3, 6, 6)]) + + def testDailyByHourAndSecond(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0, 6), + datetime(1997, 9, 2, 18, 0, 18), + datetime(1997, 9, 3, 6, 0, 6)]) + + def testDailyByMinuteAndSecond(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6, 6), + datetime(1997, 9, 2, 9, 6, 18), + datetime(1997, 9, 2, 9, 18, 6)]) + + def testDailyByHourAndMinuteAndSecond(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6, 6), + datetime(1997, 9, 2, 18, 6, 18), + datetime(1997, 9, 2, 18, 18, 6)]) + + def testDailyBySetPos(self): + self.assertEqual(list(rrule(DAILY, + count=3, + byhour=(6, 18), + byminute=(15, 45), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 15), + datetime(1997, 9, 3, 6, 45), + datetime(1997, 9, 3, 18, 15)]) + + def testHourly(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 2, 10, 0), + datetime(1997, 9, 2, 11, 0)]) + + def testHourlyInterval(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 2, 11, 0), + datetime(1997, 9, 2, 13, 0)]) + + def testHourlyIntervalLarge(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + interval=769, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 10, 4, 10, 0), + datetime(1997, 11, 5, 11, 0)]) + + def testHourlyByMonth(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0), + datetime(1998, 1, 1, 1, 0), + datetime(1998, 1, 1, 2, 0)]) + + def testHourlyByMonthDay(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 3, 0, 0), + datetime(1997, 9, 3, 1, 0), + datetime(1997, 9, 3, 2, 0)]) + + def testHourlyByMonthAndMonthDay(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 5, 0, 0), + datetime(1998, 1, 5, 1, 0), + datetime(1998, 1, 5, 2, 0)]) + + def testHourlyByWeekDay(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 2, 10, 0), + datetime(1997, 9, 2, 11, 0)]) + + def testHourlyByNWeekDay(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 2, 10, 0), + datetime(1997, 9, 2, 11, 0)]) + + def testHourlyByMonthAndWeekDay(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0), + datetime(1998, 1, 1, 1, 0), + datetime(1998, 1, 1, 2, 0)]) + + def testHourlyByMonthAndNWeekDay(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0), + datetime(1998, 1, 1, 1, 0), + datetime(1998, 1, 1, 2, 0)]) + + def testHourlyByMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0), + datetime(1998, 1, 1, 1, 0), + datetime(1998, 1, 1, 2, 0)]) + + def testHourlyByMonthAndMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0), + datetime(1998, 1, 1, 1, 0), + datetime(1998, 1, 1, 2, 0)]) + + def testHourlyByYearDay(self): + self.assertEqual(list(rrule(HOURLY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 0, 0), + datetime(1997, 12, 31, 1, 0), + datetime(1997, 12, 31, 2, 0), + datetime(1997, 12, 31, 3, 0)]) + + def testHourlyByYearDayNeg(self): + self.assertEqual(list(rrule(HOURLY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 0, 0), + datetime(1997, 12, 31, 1, 0), + datetime(1997, 12, 31, 2, 0), + datetime(1997, 12, 31, 3, 0)]) + + def testHourlyByMonthAndYearDay(self): + self.assertEqual(list(rrule(HOURLY, + count=4, + bymonth=(4, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 10, 0, 0), + datetime(1998, 4, 10, 1, 0), + datetime(1998, 4, 10, 2, 0), + datetime(1998, 4, 10, 3, 0)]) + + def testHourlyByMonthAndYearDayNeg(self): + self.assertEqual(list(rrule(HOURLY, + count=4, + bymonth=(4, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 10, 0, 0), + datetime(1998, 4, 10, 1, 0), + datetime(1998, 4, 10, 2, 0), + datetime(1998, 4, 10, 3, 0)]) + + def testHourlyByWeekNo(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 5, 11, 0, 0), + datetime(1998, 5, 11, 1, 0), + datetime(1998, 5, 11, 2, 0)]) + + def testHourlyByWeekNoAndWeekDay(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 29, 0, 0), + datetime(1997, 12, 29, 1, 0), + datetime(1997, 12, 29, 2, 0)]) + + def testHourlyByWeekNoAndWeekDayLarge(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 0, 0), + datetime(1997, 12, 28, 1, 0), + datetime(1997, 12, 28, 2, 0)]) + + def testHourlyByWeekNoAndWeekDayLast(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 0, 0), + datetime(1997, 12, 28, 1, 0), + datetime(1997, 12, 28, 2, 0)]) + + def testHourlyByWeekNoAndWeekDay53(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 12, 28, 0, 0), + datetime(1998, 12, 28, 1, 0), + datetime(1998, 12, 28, 2, 0)]) + + def testHourlyByEaster(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 12, 0, 0), + datetime(1998, 4, 12, 1, 0), + datetime(1998, 4, 12, 2, 0)]) + + def testHourlyByEasterPos(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 13, 0, 0), + datetime(1998, 4, 13, 1, 0), + datetime(1998, 4, 13, 2, 0)]) + + def testHourlyByEasterNeg(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 11, 0, 0), + datetime(1998, 4, 11, 1, 0), + datetime(1998, 4, 11, 2, 0)]) + + def testHourlyByHour(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0), + datetime(1997, 9, 3, 6, 0), + datetime(1997, 9, 3, 18, 0)]) + + def testHourlyByMinute(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6), + datetime(1997, 9, 2, 9, 18), + datetime(1997, 9, 2, 10, 6)]) + + def testHourlyBySecond(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 6), + datetime(1997, 9, 2, 9, 0, 18), + datetime(1997, 9, 2, 10, 0, 6)]) + + def testHourlyByHourAndMinute(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6), + datetime(1997, 9, 2, 18, 18), + datetime(1997, 9, 3, 6, 6)]) + + def testHourlyByHourAndSecond(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0, 6), + datetime(1997, 9, 2, 18, 0, 18), + datetime(1997, 9, 3, 6, 0, 6)]) + + def testHourlyByMinuteAndSecond(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6, 6), + datetime(1997, 9, 2, 9, 6, 18), + datetime(1997, 9, 2, 9, 18, 6)]) + + def testHourlyByHourAndMinuteAndSecond(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6, 6), + datetime(1997, 9, 2, 18, 6, 18), + datetime(1997, 9, 2, 18, 18, 6)]) + + def testHourlyBySetPos(self): + self.assertEqual(list(rrule(HOURLY, + count=3, + byminute=(15, 45), + bysecond=(15, 45), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 15, 45), + datetime(1997, 9, 2, 9, 45, 15), + datetime(1997, 9, 2, 10, 15, 45)]) + + def testMinutely(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 2, 9, 1), + datetime(1997, 9, 2, 9, 2)]) + + def testMinutelyInterval(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 2, 9, 2), + datetime(1997, 9, 2, 9, 4)]) + + def testMinutelyIntervalLarge(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + interval=1501, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 3, 10, 1), + datetime(1997, 9, 4, 11, 2)]) + + def testMinutelyByMonth(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0), + datetime(1998, 1, 1, 0, 1), + datetime(1998, 1, 1, 0, 2)]) + + def testMinutelyByMonthDay(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 3, 0, 0), + datetime(1997, 9, 3, 0, 1), + datetime(1997, 9, 3, 0, 2)]) + + def testMinutelyByMonthAndMonthDay(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 5, 0, 0), + datetime(1998, 1, 5, 0, 1), + datetime(1998, 1, 5, 0, 2)]) + + def testMinutelyByWeekDay(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 2, 9, 1), + datetime(1997, 9, 2, 9, 2)]) + + def testMinutelyByNWeekDay(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 2, 9, 1), + datetime(1997, 9, 2, 9, 2)]) + + def testMinutelyByMonthAndWeekDay(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0), + datetime(1998, 1, 1, 0, 1), + datetime(1998, 1, 1, 0, 2)]) + + def testMinutelyByMonthAndNWeekDay(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0), + datetime(1998, 1, 1, 0, 1), + datetime(1998, 1, 1, 0, 2)]) + + def testMinutelyByMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0), + datetime(1998, 1, 1, 0, 1), + datetime(1998, 1, 1, 0, 2)]) + + def testMinutelyByMonthAndMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0), + datetime(1998, 1, 1, 0, 1), + datetime(1998, 1, 1, 0, 2)]) + + def testMinutelyByYearDay(self): + self.assertEqual(list(rrule(MINUTELY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 0, 0), + datetime(1997, 12, 31, 0, 1), + datetime(1997, 12, 31, 0, 2), + datetime(1997, 12, 31, 0, 3)]) + + def testMinutelyByYearDayNeg(self): + self.assertEqual(list(rrule(MINUTELY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 0, 0), + datetime(1997, 12, 31, 0, 1), + datetime(1997, 12, 31, 0, 2), + datetime(1997, 12, 31, 0, 3)]) + + def testMinutelyByMonthAndYearDay(self): + self.assertEqual(list(rrule(MINUTELY, + count=4, + bymonth=(4, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 10, 0, 0), + datetime(1998, 4, 10, 0, 1), + datetime(1998, 4, 10, 0, 2), + datetime(1998, 4, 10, 0, 3)]) + + def testMinutelyByMonthAndYearDayNeg(self): + self.assertEqual(list(rrule(MINUTELY, + count=4, + bymonth=(4, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 10, 0, 0), + datetime(1998, 4, 10, 0, 1), + datetime(1998, 4, 10, 0, 2), + datetime(1998, 4, 10, 0, 3)]) + + def testMinutelyByWeekNo(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 5, 11, 0, 0), + datetime(1998, 5, 11, 0, 1), + datetime(1998, 5, 11, 0, 2)]) + + def testMinutelyByWeekNoAndWeekDay(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 29, 0, 0), + datetime(1997, 12, 29, 0, 1), + datetime(1997, 12, 29, 0, 2)]) + + def testMinutelyByWeekNoAndWeekDayLarge(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 0, 0), + datetime(1997, 12, 28, 0, 1), + datetime(1997, 12, 28, 0, 2)]) + + def testMinutelyByWeekNoAndWeekDayLast(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 0, 0), + datetime(1997, 12, 28, 0, 1), + datetime(1997, 12, 28, 0, 2)]) + + def testMinutelyByWeekNoAndWeekDay53(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 12, 28, 0, 0), + datetime(1998, 12, 28, 0, 1), + datetime(1998, 12, 28, 0, 2)]) + + def testMinutelyByEaster(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 12, 0, 0), + datetime(1998, 4, 12, 0, 1), + datetime(1998, 4, 12, 0, 2)]) + + def testMinutelyByEasterPos(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 13, 0, 0), + datetime(1998, 4, 13, 0, 1), + datetime(1998, 4, 13, 0, 2)]) + + def testMinutelyByEasterNeg(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 11, 0, 0), + datetime(1998, 4, 11, 0, 1), + datetime(1998, 4, 11, 0, 2)]) + + def testMinutelyByHour(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0), + datetime(1997, 9, 2, 18, 1), + datetime(1997, 9, 2, 18, 2)]) + + def testMinutelyByMinute(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6), + datetime(1997, 9, 2, 9, 18), + datetime(1997, 9, 2, 10, 6)]) + + def testMinutelyBySecond(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 6), + datetime(1997, 9, 2, 9, 0, 18), + datetime(1997, 9, 2, 9, 1, 6)]) + + def testMinutelyByHourAndMinute(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6), + datetime(1997, 9, 2, 18, 18), + datetime(1997, 9, 3, 6, 6)]) + + def testMinutelyByHourAndSecond(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0, 6), + datetime(1997, 9, 2, 18, 0, 18), + datetime(1997, 9, 2, 18, 1, 6)]) + + def testMinutelyByMinuteAndSecond(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6, 6), + datetime(1997, 9, 2, 9, 6, 18), + datetime(1997, 9, 2, 9, 18, 6)]) + + def testMinutelyByHourAndMinuteAndSecond(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6, 6), + datetime(1997, 9, 2, 18, 6, 18), + datetime(1997, 9, 2, 18, 18, 6)]) + + def testMinutelyBySetPos(self): + self.assertEqual(list(rrule(MINUTELY, + count=3, + bysecond=(15, 30, 45), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 15), + datetime(1997, 9, 2, 9, 0, 45), + datetime(1997, 9, 2, 9, 1, 15)]) + + def testSecondly(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 0), + datetime(1997, 9, 2, 9, 0, 1), + datetime(1997, 9, 2, 9, 0, 2)]) + + def testSecondlyInterval(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 0), + datetime(1997, 9, 2, 9, 0, 2), + datetime(1997, 9, 2, 9, 0, 4)]) + + def testSecondlyIntervalLarge(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + interval=90061, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 0), + datetime(1997, 9, 3, 10, 1, 1), + datetime(1997, 9, 4, 11, 2, 2)]) + + def testSecondlyByMonth(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0, 0), + datetime(1998, 1, 1, 0, 0, 1), + datetime(1998, 1, 1, 0, 0, 2)]) + + def testSecondlyByMonthDay(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 3, 0, 0, 0), + datetime(1997, 9, 3, 0, 0, 1), + datetime(1997, 9, 3, 0, 0, 2)]) + + def testSecondlyByMonthAndMonthDay(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 5, 0, 0, 0), + datetime(1998, 1, 5, 0, 0, 1), + datetime(1998, 1, 5, 0, 0, 2)]) + + def testSecondlyByWeekDay(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 0), + datetime(1997, 9, 2, 9, 0, 1), + datetime(1997, 9, 2, 9, 0, 2)]) + + def testSecondlyByNWeekDay(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 0), + datetime(1997, 9, 2, 9, 0, 1), + datetime(1997, 9, 2, 9, 0, 2)]) + + def testSecondlyByMonthAndWeekDay(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0, 0), + datetime(1998, 1, 1, 0, 0, 1), + datetime(1998, 1, 1, 0, 0, 2)]) + + def testSecondlyByMonthAndNWeekDay(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0, 0), + datetime(1998, 1, 1, 0, 0, 1), + datetime(1998, 1, 1, 0, 0, 2)]) + + def testSecondlyByMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0, 0), + datetime(1998, 1, 1, 0, 0, 1), + datetime(1998, 1, 1, 0, 0, 2)]) + + def testSecondlyByMonthAndMonthDayAndWeekDay(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 1, 0, 0, 0), + datetime(1998, 1, 1, 0, 0, 1), + datetime(1998, 1, 1, 0, 0, 2)]) + + def testSecondlyByYearDay(self): + self.assertEqual(list(rrule(SECONDLY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 0, 0, 0), + datetime(1997, 12, 31, 0, 0, 1), + datetime(1997, 12, 31, 0, 0, 2), + datetime(1997, 12, 31, 0, 0, 3)]) + + def testSecondlyByYearDayNeg(self): + self.assertEqual(list(rrule(SECONDLY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 31, 0, 0, 0), + datetime(1997, 12, 31, 0, 0, 1), + datetime(1997, 12, 31, 0, 0, 2), + datetime(1997, 12, 31, 0, 0, 3)]) + + def testSecondlyByMonthAndYearDay(self): + self.assertEqual(list(rrule(SECONDLY, + count=4, + bymonth=(4, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 10, 0, 0, 0), + datetime(1998, 4, 10, 0, 0, 1), + datetime(1998, 4, 10, 0, 0, 2), + datetime(1998, 4, 10, 0, 0, 3)]) + + def testSecondlyByMonthAndYearDayNeg(self): + self.assertEqual(list(rrule(SECONDLY, + count=4, + bymonth=(4, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 10, 0, 0, 0), + datetime(1998, 4, 10, 0, 0, 1), + datetime(1998, 4, 10, 0, 0, 2), + datetime(1998, 4, 10, 0, 0, 3)]) + + def testSecondlyByWeekNo(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 5, 11, 0, 0, 0), + datetime(1998, 5, 11, 0, 0, 1), + datetime(1998, 5, 11, 0, 0, 2)]) + + def testSecondlyByWeekNoAndWeekDay(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 29, 0, 0, 0), + datetime(1997, 12, 29, 0, 0, 1), + datetime(1997, 12, 29, 0, 0, 2)]) + + def testSecondlyByWeekNoAndWeekDayLarge(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 0, 0, 0), + datetime(1997, 12, 28, 0, 0, 1), + datetime(1997, 12, 28, 0, 0, 2)]) + + def testSecondlyByWeekNoAndWeekDayLast(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 12, 28, 0, 0, 0), + datetime(1997, 12, 28, 0, 0, 1), + datetime(1997, 12, 28, 0, 0, 2)]) + + def testSecondlyByWeekNoAndWeekDay53(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 12, 28, 0, 0, 0), + datetime(1998, 12, 28, 0, 0, 1), + datetime(1998, 12, 28, 0, 0, 2)]) + + def testSecondlyByEaster(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 12, 0, 0, 0), + datetime(1998, 4, 12, 0, 0, 1), + datetime(1998, 4, 12, 0, 0, 2)]) + + def testSecondlyByEasterPos(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 13, 0, 0, 0), + datetime(1998, 4, 13, 0, 0, 1), + datetime(1998, 4, 13, 0, 0, 2)]) + + def testSecondlyByEasterNeg(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 4, 11, 0, 0, 0), + datetime(1998, 4, 11, 0, 0, 1), + datetime(1998, 4, 11, 0, 0, 2)]) + + def testSecondlyByHour(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0, 0), + datetime(1997, 9, 2, 18, 0, 1), + datetime(1997, 9, 2, 18, 0, 2)]) + + def testSecondlyByMinute(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6, 0), + datetime(1997, 9, 2, 9, 6, 1), + datetime(1997, 9, 2, 9, 6, 2)]) + + def testSecondlyBySecond(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0, 6), + datetime(1997, 9, 2, 9, 0, 18), + datetime(1997, 9, 2, 9, 1, 6)]) + + def testSecondlyByHourAndMinute(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6, 0), + datetime(1997, 9, 2, 18, 6, 1), + datetime(1997, 9, 2, 18, 6, 2)]) + + def testSecondlyByHourAndSecond(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 0, 6), + datetime(1997, 9, 2, 18, 0, 18), + datetime(1997, 9, 2, 18, 1, 6)]) + + def testSecondlyByMinuteAndSecond(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 6, 6), + datetime(1997, 9, 2, 9, 6, 18), + datetime(1997, 9, 2, 9, 18, 6)]) + + def testSecondlyByHourAndMinuteAndSecond(self): + self.assertEqual(list(rrule(SECONDLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 18, 6, 6), + datetime(1997, 9, 2, 18, 6, 18), + datetime(1997, 9, 2, 18, 18, 6)]) + + def testSecondlyByHourAndMinuteAndSecondBug(self): + # This explores a bug found by Mathieu Bridon. + self.assertEqual(list(rrule(SECONDLY, + count=3, + bysecond=(0,), + byminute=(1,), + dtstart=datetime(2010, 3, 22, 12, 1))), + [datetime(2010, 3, 22, 12, 1), + datetime(2010, 3, 22, 13, 1), + datetime(2010, 3, 22, 14, 1)]) + + def testLongIntegers(self): + if not PY3: # There is no longs in python3 + self.assertEqual(list(rrule(MINUTELY, + count=long(2), + interval=long(2), + bymonth=long(2), + byweekday=long(3), + byhour=long(6), + byminute=long(6), + bysecond=long(6), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 2, 5, 6, 6, 6), + datetime(1998, 2, 12, 6, 6, 6)]) + self.assertEqual(list(rrule(YEARLY, + count=long(2), + bymonthday=long(5), + byweekno=long(2), + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1998, 1, 5, 9, 0), + datetime(2004, 1, 5, 9, 0)]) + + def testHourlyBadRRule(self): + """ + When `byhour` is specified with `freq=HOURLY`, there are certain + combinations of `dtstart` and `byhour` which result in an rrule with no + valid values. + + See https://github.com/dateutil/dateutil/issues/4 + """ + + self.assertRaises(ValueError, rrule, HOURLY, + **dict(interval=4, byhour=(7, 11, 15, 19), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testMinutelyBadRRule(self): + """ + See :func:`testHourlyBadRRule` for details. + """ + + self.assertRaises(ValueError, rrule, MINUTELY, + **dict(interval=12, byminute=(10, 11, 25, 39, 50), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testSecondlyBadRRule(self): + """ + See :func:`testHourlyBadRRule` for details. + """ + + self.assertRaises(ValueError, rrule, SECONDLY, + **dict(interval=10, bysecond=(2, 15, 37, 42, 59), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testMinutelyBadComboRRule(self): + """ + Certain values of :param:`interval` in :class:`rrule`, when combined + with certain values of :param:`byhour` create rules which apply to no + valid dates. The library should detect this case in the iterator and + raise a :exception:`ValueError`. + """ + + # In Python 2.7 you can use a context manager for this. + def make_bad_rrule(): + list(rrule(MINUTELY, interval=120, byhour=(10, 12, 14, 16), + count=2, dtstart=datetime(1997, 9, 2, 9, 0))) + + self.assertRaises(ValueError, make_bad_rrule) + + def testSecondlyBadComboRRule(self): + """ + See :func:`testMinutelyBadComboRRule' for details. + """ + + # In Python 2.7 you can use a context manager for this. + def make_bad_minute_rrule(): + list(rrule(SECONDLY, interval=360, byminute=(10, 28, 49), + count=4, dtstart=datetime(1997, 9, 2, 9, 0))) + + def make_bad_hour_rrule(): + list(rrule(SECONDLY, interval=43200, byhour=(2, 10, 18, 23), + count=4, dtstart=datetime(1997, 9, 2, 9, 0))) + + self.assertRaises(ValueError, make_bad_minute_rrule) + self.assertRaises(ValueError, make_bad_hour_rrule) + + def testBadUntilCountRRule(self): + """ + See rfc-5545 3.3.10 - This checks for the deprecation warning, and will + eventually check for an error. + """ + with self.assertWarns(DeprecationWarning): + rrule(DAILY, dtstart=datetime(1997, 9, 2, 9, 0), + count=3, until=datetime(1997, 9, 4, 9, 0)) + + def testUntilNotMatching(self): + self.assertEqual(list(rrule(DAILY, + dtstart=datetime(1997, 9, 2, 9, 0), + until=datetime(1997, 9, 5, 8, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 3, 9, 0), + datetime(1997, 9, 4, 9, 0)]) + + def testUntilMatching(self): + self.assertEqual(list(rrule(DAILY, + dtstart=datetime(1997, 9, 2, 9, 0), + until=datetime(1997, 9, 4, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 3, 9, 0), + datetime(1997, 9, 4, 9, 0)]) + + def testUntilSingle(self): + self.assertEqual(list(rrule(DAILY, + dtstart=datetime(1997, 9, 2, 9, 0), + until=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0)]) + + def testUntilEmpty(self): + self.assertEqual(list(rrule(DAILY, + dtstart=datetime(1997, 9, 2, 9, 0), + until=datetime(1997, 9, 1, 9, 0))), + []) + + def testUntilWithDate(self): + self.assertEqual(list(rrule(DAILY, + dtstart=datetime(1997, 9, 2, 9, 0), + until=date(1997, 9, 5))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 3, 9, 0), + datetime(1997, 9, 4, 9, 0)]) + + def testWkStIntervalMO(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + interval=2, + byweekday=(TU, SU), + wkst=MO, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 7, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testWkStIntervalSU(self): + self.assertEqual(list(rrule(WEEKLY, + count=3, + interval=2, + byweekday=(TU, SU), + wkst=SU, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 14, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testDTStartIsDate(self): + self.assertEqual(list(rrule(DAILY, + count=3, + dtstart=date(1997, 9, 2))), + [datetime(1997, 9, 2, 0, 0), + datetime(1997, 9, 3, 0, 0), + datetime(1997, 9, 4, 0, 0)]) + + def testDTStartWithMicroseconds(self): + self.assertEqual(list(rrule(DAILY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0, 0, 500000))), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 3, 9, 0), + datetime(1997, 9, 4, 9, 0)]) + + def testMaxYear(self): + self.assertEqual(list(rrule(YEARLY, + count=3, + bymonth=2, + bymonthday=31, + dtstart=datetime(9997, 9, 2, 9, 0, 0))), + []) + + def testGetItem(self): + self.assertEqual(rrule(DAILY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))[0], + datetime(1997, 9, 2, 9, 0)) + + def testGetItemNeg(self): + self.assertEqual(rrule(DAILY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))[-1], + datetime(1997, 9, 4, 9, 0)) + + def testGetItemSlice(self): + self.assertEqual(rrule(DAILY, + # count=3, + dtstart=datetime(1997, 9, 2, 9, 0))[1:2], + [datetime(1997, 9, 3, 9, 0)]) + + def testGetItemSliceEmpty(self): + self.assertEqual(rrule(DAILY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))[:], + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 3, 9, 0), + datetime(1997, 9, 4, 9, 0)]) + + def testGetItemSliceStep(self): + self.assertEqual(rrule(DAILY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))[::-2], + [datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 2, 9, 0)]) + + def testCount(self): + self.assertEqual(rrule(DAILY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0)).count(), + 3) + + def testCountZero(self): + self.assertEqual(rrule(YEARLY, + count=0, + dtstart=datetime(1997, 9, 2, 9, 0)).count(), + 0) + + def testContains(self): + rr = rrule(DAILY, count=3, dtstart=datetime(1997, 9, 2, 9, 0)) + self.assertEqual(datetime(1997, 9, 3, 9, 0) in rr, True) + + def testContainsNot(self): + rr = rrule(DAILY, count=3, dtstart=datetime(1997, 9, 2, 9, 0)) + self.assertEqual(datetime(1997, 9, 3, 9, 0) not in rr, False) + + def testBefore(self): + self.assertEqual(rrule(DAILY, # count=5 + dtstart=datetime(1997, 9, 2, 9, 0)).before(datetime(1997, 9, 5, 9, 0)), + datetime(1997, 9, 4, 9, 0)) + + def testBeforeInc(self): + self.assertEqual(rrule(DAILY, + #count=5, + dtstart=datetime(1997, 9, 2, 9, 0)) + .before(datetime(1997, 9, 5, 9, 0), inc=True), + datetime(1997, 9, 5, 9, 0)) + + def testAfter(self): + self.assertEqual(rrule(DAILY, + #count=5, + dtstart=datetime(1997, 9, 2, 9, 0)) + .after(datetime(1997, 9, 4, 9, 0)), + datetime(1997, 9, 5, 9, 0)) + + def testAfterInc(self): + self.assertEqual(rrule(DAILY, + #count=5, + dtstart=datetime(1997, 9, 2, 9, 0)) + .after(datetime(1997, 9, 4, 9, 0), inc=True), + datetime(1997, 9, 4, 9, 0)) + + def testXAfter(self): + self.assertEqual(list(rrule(DAILY, + dtstart=datetime(1997, 9, 2, 9, 0)) + .xafter(datetime(1997, 9, 8, 9, 0), count=12)), + [datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 10, 9, 0), + datetime(1997, 9, 11, 9, 0), + datetime(1997, 9, 12, 9, 0), + datetime(1997, 9, 13, 9, 0), + datetime(1997, 9, 14, 9, 0), + datetime(1997, 9, 15, 9, 0), + datetime(1997, 9, 16, 9, 0), + datetime(1997, 9, 17, 9, 0), + datetime(1997, 9, 18, 9, 0), + datetime(1997, 9, 19, 9, 0), + datetime(1997, 9, 20, 9, 0)]) + + def testXAfterInc(self): + self.assertEqual(list(rrule(DAILY, + dtstart=datetime(1997, 9, 2, 9, 0)) + .xafter(datetime(1997, 9, 8, 9, 0), count=12, inc=True)), + [datetime(1997, 9, 8, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 10, 9, 0), + datetime(1997, 9, 11, 9, 0), + datetime(1997, 9, 12, 9, 0), + datetime(1997, 9, 13, 9, 0), + datetime(1997, 9, 14, 9, 0), + datetime(1997, 9, 15, 9, 0), + datetime(1997, 9, 16, 9, 0), + datetime(1997, 9, 17, 9, 0), + datetime(1997, 9, 18, 9, 0), + datetime(1997, 9, 19, 9, 0)]) + + def testBetween(self): + self.assertEqual(rrule(DAILY, + #count=5, + dtstart=datetime(1997, 9, 2, 9, 0)) + .between(datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 6, 9, 0)), + [datetime(1997, 9, 3, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 5, 9, 0)]) + + def testBetweenInc(self): + self.assertEqual(rrule(DAILY, + #count=5, + dtstart=datetime(1997, 9, 2, 9, 0)) + .between(datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 6, 9, 0), inc=True), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 3, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 5, 9, 0), + datetime(1997, 9, 6, 9, 0)]) + + def testCachePre(self): + rr = rrule(DAILY, count=15, cache=True, + dtstart=datetime(1997, 9, 2, 9, 0)) + self.assertEqual(list(rr), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 3, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 5, 9, 0), + datetime(1997, 9, 6, 9, 0), + datetime(1997, 9, 7, 9, 0), + datetime(1997, 9, 8, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 10, 9, 0), + datetime(1997, 9, 11, 9, 0), + datetime(1997, 9, 12, 9, 0), + datetime(1997, 9, 13, 9, 0), + datetime(1997, 9, 14, 9, 0), + datetime(1997, 9, 15, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testCachePost(self): + rr = rrule(DAILY, count=15, cache=True, + dtstart=datetime(1997, 9, 2, 9, 0)) + for x in rr: pass + self.assertEqual(list(rr), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 3, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 5, 9, 0), + datetime(1997, 9, 6, 9, 0), + datetime(1997, 9, 7, 9, 0), + datetime(1997, 9, 8, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 10, 9, 0), + datetime(1997, 9, 11, 9, 0), + datetime(1997, 9, 12, 9, 0), + datetime(1997, 9, 13, 9, 0), + datetime(1997, 9, 14, 9, 0), + datetime(1997, 9, 15, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testCachePostInternal(self): + rr = rrule(DAILY, count=15, cache=True, + dtstart=datetime(1997, 9, 2, 9, 0)) + for x in rr: pass + self.assertEqual(rr._cache, + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 3, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 5, 9, 0), + datetime(1997, 9, 6, 9, 0), + datetime(1997, 9, 7, 9, 0), + datetime(1997, 9, 8, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 10, 9, 0), + datetime(1997, 9, 11, 9, 0), + datetime(1997, 9, 12, 9, 0), + datetime(1997, 9, 13, 9, 0), + datetime(1997, 9, 14, 9, 0), + datetime(1997, 9, 15, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testCachePreContains(self): + rr = rrule(DAILY, count=3, cache=True, + dtstart=datetime(1997, 9, 2, 9, 0)) + self.assertEqual(datetime(1997, 9, 3, 9, 0) in rr, True) + + def testCachePostContains(self): + rr = rrule(DAILY, count=3, cache=True, + dtstart=datetime(1997, 9, 2, 9, 0)) + for x in rr: pass + self.assertEqual(datetime(1997, 9, 3, 9, 0) in rr, True) + + def testStr(self): + self.assertEqual(list(rrulestr( + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=3\n" + )), + [datetime(1997, 9, 2, 9, 0), + datetime(1998, 9, 2, 9, 0), + datetime(1999, 9, 2, 9, 0)]) + + def testStrWithTZID(self): + NYC = tz.gettz('America/New_York') + self.assertEqual(list(rrulestr( + "DTSTART;TZID=America/New_York:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=3\n" + )), + [datetime(1997, 9, 2, 9, 0, tzinfo=NYC), + datetime(1998, 9, 2, 9, 0, tzinfo=NYC), + datetime(1999, 9, 2, 9, 0, tzinfo=NYC)]) + + def testStrWithTZIDMapping(self): + rrstr = ("DTSTART;TZID=Eastern:19970902T090000\n" + + "RRULE:FREQ=YEARLY;COUNT=3") + + NYC = tz.gettz('America/New_York') + rr = rrulestr(rrstr, tzids={'Eastern': NYC}) + exp = [datetime(1997, 9, 2, 9, 0, tzinfo=NYC), + datetime(1998, 9, 2, 9, 0, tzinfo=NYC), + datetime(1999, 9, 2, 9, 0, tzinfo=NYC)] + + self.assertEqual(list(rr), exp) + + def testStrWithTZIDCallable(self): + rrstr = ('DTSTART;TZID=UTC+04:19970902T090000\n' + + 'RRULE:FREQ=YEARLY;COUNT=3') + + TZ = tz.tzstr('UTC+04') + def parse_tzstr(tzstr): + if tzstr is None: + raise ValueError('Invalid tzstr') + + return tz.tzstr(tzstr) + + rr = rrulestr(rrstr, tzids=parse_tzstr) + + exp = [datetime(1997, 9, 2, 9, 0, tzinfo=TZ), + datetime(1998, 9, 2, 9, 0, tzinfo=TZ), + datetime(1999, 9, 2, 9, 0, tzinfo=TZ),] + + self.assertEqual(list(rr), exp) + + def testStrWithTZIDCallableFailure(self): + rrstr = ('DTSTART;TZID=America/New_York:19970902T090000\n' + + 'RRULE:FREQ=YEARLY;COUNT=3') + + class TzInfoError(Exception): + pass + + def tzinfos(tzstr): + if tzstr == 'America/New_York': + raise TzInfoError('Invalid!') + return None + + with self.assertRaises(TzInfoError): + rrulestr(rrstr, tzids=tzinfos) + + def testStrWithConflictingTZID(self): + # RFC 5545 Section 3.3.5, FORM #2: DATE WITH UTC TIME + # https://tools.ietf.org/html/rfc5545#section-3.3.5 + # The "TZID" property parameter MUST NOT be applied to DATE-TIME + with self.assertRaises(ValueError): + rrulestr("DTSTART;TZID=America/New_York:19970902T090000Z\n"+ + "RRULE:FREQ=YEARLY;COUNT=3\n") + + def testStrType(self): + self.assertEqual(isinstance(rrulestr( + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=3\n" + ), rrule), True) + + def testStrForceSetType(self): + self.assertEqual(isinstance(rrulestr( + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=3\n" + , forceset=True), rruleset), True) + + def testStrSetType(self): + self.assertEqual(isinstance(rrulestr( + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=2;BYDAY=TU\n" + "RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TH\n" + ), rruleset), True) + + def testStrCase(self): + self.assertEqual(list(rrulestr( + "dtstart:19970902T090000\n" + "rrule:freq=yearly;count=3\n" + )), + [datetime(1997, 9, 2, 9, 0), + datetime(1998, 9, 2, 9, 0), + datetime(1999, 9, 2, 9, 0)]) + + def testStrSpaces(self): + self.assertEqual(list(rrulestr( + " DTSTART:19970902T090000 " + " RRULE:FREQ=YEARLY;COUNT=3 " + )), + [datetime(1997, 9, 2, 9, 0), + datetime(1998, 9, 2, 9, 0), + datetime(1999, 9, 2, 9, 0)]) + + def testStrSpacesAndLines(self): + self.assertEqual(list(rrulestr( + " DTSTART:19970902T090000 \n" + " \n" + " RRULE:FREQ=YEARLY;COUNT=3 \n" + )), + [datetime(1997, 9, 2, 9, 0), + datetime(1998, 9, 2, 9, 0), + datetime(1999, 9, 2, 9, 0)]) + + def testStrNoDTStart(self): + self.assertEqual(list(rrulestr( + "RRULE:FREQ=YEARLY;COUNT=3\n" + , dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1998, 9, 2, 9, 0), + datetime(1999, 9, 2, 9, 0)]) + + def testStrValueOnly(self): + self.assertEqual(list(rrulestr( + "FREQ=YEARLY;COUNT=3\n" + , dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1998, 9, 2, 9, 0), + datetime(1999, 9, 2, 9, 0)]) + + def testStrUnfold(self): + self.assertEqual(list(rrulestr( + "FREQ=YEA\n RLY;COUNT=3\n", unfold=True, + dtstart=datetime(1997, 9, 2, 9, 0))), + [datetime(1997, 9, 2, 9, 0), + datetime(1998, 9, 2, 9, 0), + datetime(1999, 9, 2, 9, 0)]) + + def testStrSet(self): + self.assertEqual(list(rrulestr( + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=2;BYDAY=TU\n" + "RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TH\n" + )), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testStrSetDate(self): + self.assertEqual(list(rrulestr( + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TU\n" + "RDATE:19970904T090000\n" + "RDATE:19970909T090000\n" + )), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testStrSetExRule(self): + self.assertEqual(list(rrulestr( + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=6;BYDAY=TU,TH\n" + "EXRULE:FREQ=YEARLY;COUNT=3;BYDAY=TH\n" + )), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testStrSetExDate(self): + self.assertEqual(list(rrulestr( + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=6;BYDAY=TU,TH\n" + "EXDATE:19970904T090000\n" + "EXDATE:19970911T090000\n" + "EXDATE:19970918T090000\n" + )), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testStrSetDateAndExDate(self): + self.assertEqual(list(rrulestr( + "DTSTART:19970902T090000\n" + "RDATE:19970902T090000\n" + "RDATE:19970904T090000\n" + "RDATE:19970909T090000\n" + "RDATE:19970911T090000\n" + "RDATE:19970916T090000\n" + "RDATE:19970918T090000\n" + "EXDATE:19970904T090000\n" + "EXDATE:19970911T090000\n" + "EXDATE:19970918T090000\n" + )), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testStrSetDateAndExRule(self): + self.assertEqual(list(rrulestr( + "DTSTART:19970902T090000\n" + "RDATE:19970902T090000\n" + "RDATE:19970904T090000\n" + "RDATE:19970909T090000\n" + "RDATE:19970911T090000\n" + "RDATE:19970916T090000\n" + "RDATE:19970918T090000\n" + "EXRULE:FREQ=YEARLY;COUNT=3;BYDAY=TH\n" + )), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testStrKeywords(self): + self.assertEqual(list(rrulestr( + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=3;INTERVAL=3;" + "BYMONTH=3;BYWEEKDAY=TH;BYMONTHDAY=3;" + "BYHOUR=3;BYMINUTE=3;BYSECOND=3\n" + )), + [datetime(2033, 3, 3, 3, 3, 3), + datetime(2039, 3, 3, 3, 3, 3), + datetime(2072, 3, 3, 3, 3, 3)]) + + def testStrNWeekDay(self): + self.assertEqual(list(rrulestr( + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=3;BYDAY=1TU,-1TH\n" + )), + [datetime(1997, 12, 25, 9, 0), + datetime(1998, 1, 6, 9, 0), + datetime(1998, 12, 31, 9, 0)]) + + def testStrUntil(self): + self.assertEqual(list(rrulestr( + "DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;" + "UNTIL=19990101T000000;BYDAY=1TU,-1TH\n" + )), + [datetime(1997, 12, 25, 9, 0), + datetime(1998, 1, 6, 9, 0), + datetime(1998, 12, 31, 9, 0)]) + + def testStrValueDatetime(self): + rr = rrulestr("DTSTART;VALUE=DATE-TIME:19970902T090000\n" + "RRULE:FREQ=YEARLY;COUNT=2") + + self.assertEqual(list(rr), [datetime(1997, 9, 2, 9, 0, 0), + datetime(1998, 9, 2, 9, 0, 0)]) + + def testStrValueDate(self): + rr = rrulestr("DTSTART;VALUE=DATE:19970902\n" + "RRULE:FREQ=YEARLY;COUNT=2") + + self.assertEqual(list(rr), [datetime(1997, 9, 2, 0, 0, 0), + datetime(1998, 9, 2, 0, 0, 0)]) + + def testStrInvalidUntil(self): + with self.assertRaises(ValueError): + list(rrulestr("DTSTART:19970902T090000\n" + "RRULE:FREQ=YEARLY;" + "UNTIL=TheCowsComeHome;BYDAY=1TU,-1TH\n")) + + def testStrUntilMustBeUTC(self): + with self.assertRaises(ValueError): + list(rrulestr("DTSTART;TZID=America/New_York:19970902T090000\n" + "RRULE:FREQ=YEARLY;" + "UNTIL=19990101T000000;BYDAY=1TU,-1TH\n")) + + def testStrUntilWithTZ(self): + NYC = tz.gettz('America/New_York') + rr = list(rrulestr("DTSTART;TZID=America/New_York:19970101T000000\n" + "RRULE:FREQ=YEARLY;" + "UNTIL=19990101T000000Z\n")) + self.assertEqual(list(rr), [datetime(1997, 1, 1, 0, 0, 0, tzinfo=NYC), + datetime(1998, 1, 1, 0, 0, 0, tzinfo=NYC)]) + + def testStrEmptyByDay(self): + with self.assertRaises(ValueError): + list(rrulestr("DTSTART:19970902T090000\n" + "FREQ=WEEKLY;" + "BYDAY=;" # This part is invalid + "WKST=SU")) + + def testStrInvalidByDay(self): + with self.assertRaises(ValueError): + list(rrulestr("DTSTART:19970902T090000\n" + "FREQ=WEEKLY;" + "BYDAY=-1OK;" # This part is invalid + "WKST=SU")) + + def testBadBySetPos(self): + self.assertRaises(ValueError, + rrule, MONTHLY, + count=1, + bysetpos=0, + dtstart=datetime(1997, 9, 2, 9, 0)) + + def testBadBySetPosMany(self): + self.assertRaises(ValueError, + rrule, MONTHLY, + count=1, + bysetpos=(-1, 0, 1), + dtstart=datetime(1997, 9, 2, 9, 0)) + + # Tests to ensure that str(rrule) works + def testToStrYearly(self): + rule = rrule(YEARLY, count=3, dtstart=datetime(1997, 9, 2, 9, 0)) + self._rrulestr_reverse_test(rule) + + def testToStrYearlyInterval(self): + rule = rrule(YEARLY, count=3, interval=2, + dtstart=datetime(1997, 9, 2, 9, 0)) + self._rrulestr_reverse_test(rule) + + def testToStrYearlyByMonth(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByMonthDay(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByMonthAndMonthDay(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByWeekDay(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByNWeekDay(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByNWeekDayLarge(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byweekday=(TU(3), TH(-3)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByMonthAndWeekDay(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByMonthAndNWeekDay(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByMonthAndNWeekDayLarge(self): + # This is interesting because the TH(-3) ends up before + # the TU(3). + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(3), TH(-3)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByMonthAndMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByYearDay(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByYearDayNeg(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByMonthAndYearDay(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=4, + bymonth=(4, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByMonthAndYearDayNeg(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=4, + bymonth=(4, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByWeekNo(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByWeekNoAndWeekDay(self): + # That's a nice one. The first days of week number one + # may be in the last year. + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByWeekNoAndWeekDayLarge(self): + # Another nice test. The last days of week number 52/53 + # may be in the next year. + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByWeekNoAndWeekDayLast(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByEaster(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByEasterPos(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByEasterNeg(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByWeekNoAndWeekDay53(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByHour(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByMinute(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyBySecond(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByHourAndMinute(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByHourAndSecond(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyByHourAndMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrYearlyBySetPos(self): + self._rrulestr_reverse_test(rrule(YEARLY, + count=3, + bymonthday=15, + byhour=(6, 18), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthly(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyInterval(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyIntervalLarge(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + interval=18, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMonth(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMonthDay(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMonthAndMonthDay(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByWeekDay(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + # Third Monday of the month + self.assertEqual(rrule(MONTHLY, + byweekday=(MO(+3)), + dtstart=datetime(1997, 9, 1)).between(datetime(1997, + 9, + 1), + datetime(1997, + 12, + 1)), + [datetime(1997, 9, 15, 0, 0), + datetime(1997, 10, 20, 0, 0), + datetime(1997, 11, 17, 0, 0)]) + + def testToStrMonthlyByNWeekDay(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByNWeekDayLarge(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byweekday=(TU(3), TH(-3)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMonthAndWeekDay(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMonthAndNWeekDay(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMonthAndNWeekDayLarge(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(3), TH(-3)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMonthAndMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByYearDay(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByYearDayNeg(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMonthAndYearDay(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=4, + bymonth=(4, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMonthAndYearDayNeg(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=4, + bymonth=(4, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByWeekNo(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByWeekNoAndWeekDay(self): + # That's a nice one. The first days of week number one + # may be in the last year. + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByWeekNoAndWeekDayLarge(self): + # Another nice test. The last days of week number 52/53 + # may be in the next year. + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByWeekNoAndWeekDayLast(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByWeekNoAndWeekDay53(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByEaster(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByEasterPos(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByEasterNeg(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByHour(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMinute(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyBySecond(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByHourAndMinute(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByHourAndSecond(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyByHourAndMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMonthlyBySetPos(self): + self._rrulestr_reverse_test(rrule(MONTHLY, + count=3, + bymonthday=(13, 17), + byhour=(6, 18), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeekly(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyInterval(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyIntervalLarge(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + interval=20, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByMonth(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByMonthDay(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByMonthAndMonthDay(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByWeekDay(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByNWeekDay(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByMonthAndWeekDay(self): + # This test is interesting, because it crosses the year + # boundary in a weekly period to find day '1' as a + # valid recurrence. + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByMonthAndNWeekDay(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByMonthAndMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByYearDay(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByYearDayNeg(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByMonthAndYearDay(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=4, + bymonth=(1, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByMonthAndYearDayNeg(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=4, + bymonth=(1, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByWeekNo(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByWeekNoAndWeekDay(self): + # That's a nice one. The first days of week number one + # may be in the last year. + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByWeekNoAndWeekDayLarge(self): + # Another nice test. The last days of week number 52/53 + # may be in the next year. + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByWeekNoAndWeekDayLast(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByWeekNoAndWeekDay53(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByEaster(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByEasterPos(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByEasterNeg(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByHour(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByMinute(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyBySecond(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByHourAndMinute(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByHourAndSecond(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyByHourAndMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrWeeklyBySetPos(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + byweekday=(TU, TH), + byhour=(6, 18), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDaily(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyInterval(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyIntervalLarge(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + interval=92, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByMonth(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByMonthDay(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByMonthAndMonthDay(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByWeekDay(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByNWeekDay(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByMonthAndWeekDay(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByMonthAndNWeekDay(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByMonthAndMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByYearDay(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByYearDayNeg(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByMonthAndYearDay(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=4, + bymonth=(1, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByMonthAndYearDayNeg(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=4, + bymonth=(1, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByWeekNo(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByWeekNoAndWeekDay(self): + # That's a nice one. The first days of week number one + # may be in the last year. + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByWeekNoAndWeekDayLarge(self): + # Another nice test. The last days of week number 52/53 + # may be in the next year. + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByWeekNoAndWeekDayLast(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByWeekNoAndWeekDay53(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByEaster(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByEasterPos(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByEasterNeg(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByHour(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByMinute(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyBySecond(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByHourAndMinute(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByHourAndSecond(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyByHourAndMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrDailyBySetPos(self): + self._rrulestr_reverse_test(rrule(DAILY, + count=3, + byhour=(6, 18), + byminute=(15, 45), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourly(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyInterval(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyIntervalLarge(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + interval=769, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByMonth(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByMonthDay(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByMonthAndMonthDay(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByWeekDay(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByNWeekDay(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByMonthAndWeekDay(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByMonthAndNWeekDay(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByMonthAndMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByYearDay(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByYearDayNeg(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByMonthAndYearDay(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=4, + bymonth=(4, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByMonthAndYearDayNeg(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=4, + bymonth=(4, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByWeekNo(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByWeekNoAndWeekDay(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByWeekNoAndWeekDayLarge(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByWeekNoAndWeekDayLast(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByWeekNoAndWeekDay53(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByEaster(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByEasterPos(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByEasterNeg(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByHour(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByMinute(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyBySecond(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByHourAndMinute(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByHourAndSecond(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyByHourAndMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrHourlyBySetPos(self): + self._rrulestr_reverse_test(rrule(HOURLY, + count=3, + byminute=(15, 45), + bysecond=(15, 45), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutely(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyInterval(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyIntervalLarge(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + interval=1501, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByMonth(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByMonthDay(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByMonthAndMonthDay(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByWeekDay(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByNWeekDay(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByMonthAndWeekDay(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByMonthAndNWeekDay(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByMonthAndMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByYearDay(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByYearDayNeg(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByMonthAndYearDay(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=4, + bymonth=(4, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByMonthAndYearDayNeg(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=4, + bymonth=(4, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByWeekNo(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByWeekNoAndWeekDay(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByWeekNoAndWeekDayLarge(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByWeekNoAndWeekDayLast(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByWeekNoAndWeekDay53(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByEaster(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByEasterPos(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByEasterNeg(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByHour(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByMinute(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyBySecond(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByHourAndMinute(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByHourAndSecond(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyByHourAndMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrMinutelyBySetPos(self): + self._rrulestr_reverse_test(rrule(MINUTELY, + count=3, + bysecond=(15, 30, 45), + bysetpos=(3, -3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondly(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyInterval(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + interval=2, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyIntervalLarge(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + interval=90061, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByMonth(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + bymonth=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByMonthDay(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + bymonthday=(1, 3), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByMonthAndMonthDay(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + bymonth=(1, 3), + bymonthday=(5, 7), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByWeekDay(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByNWeekDay(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByMonthAndWeekDay(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + bymonth=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByMonthAndNWeekDay(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + bymonth=(1, 3), + byweekday=(TU(1), TH(-1)), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByMonthAndMonthDayAndWeekDay(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + bymonth=(1, 3), + bymonthday=(1, 3), + byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByYearDay(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=4, + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByYearDayNeg(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=4, + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByMonthAndYearDay(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=4, + bymonth=(4, 7), + byyearday=(1, 100, 200, 365), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByMonthAndYearDayNeg(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=4, + bymonth=(4, 7), + byyearday=(-365, -266, -166, -1), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByWeekNo(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byweekno=20, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByWeekNoAndWeekDay(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byweekno=1, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByWeekNoAndWeekDayLarge(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byweekno=52, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByWeekNoAndWeekDayLast(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byweekno=-1, + byweekday=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByWeekNoAndWeekDay53(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byweekno=53, + byweekday=MO, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByEaster(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byeaster=0, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByEasterPos(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byeaster=1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByEasterNeg(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byeaster=-1, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByHour(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byhour=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByMinute(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyBySecond(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByHourAndMinute(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByHourAndSecond(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byhour=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByHourAndMinuteAndSecond(self): + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + byhour=(6, 18), + byminute=(6, 18), + bysecond=(6, 18), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrSecondlyByHourAndMinuteAndSecondBug(self): + # This explores a bug found by Mathieu Bridon. + self._rrulestr_reverse_test(rrule(SECONDLY, + count=3, + bysecond=(0,), + byminute=(1,), + dtstart=datetime(2010, 3, 22, 12, 1))) + + def testToStrWithWkSt(self): + self._rrulestr_reverse_test(rrule(WEEKLY, + count=3, + wkst=SU, + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testToStrLongIntegers(self): + if not PY3: # There is no longs in python3 + self._rrulestr_reverse_test(rrule(MINUTELY, + count=long(2), + interval=long(2), + bymonth=long(2), + byweekday=long(3), + byhour=long(6), + byminute=long(6), + bysecond=long(6), + dtstart=datetime(1997, 9, 2, 9, 0))) + + self._rrulestr_reverse_test(rrule(YEARLY, + count=long(2), + bymonthday=long(5), + byweekno=long(2), + dtstart=datetime(1997, 9, 2, 9, 0))) + + def testReplaceIfSet(self): + rr = rrule(YEARLY, + count=1, + bymonthday=5, + dtstart=datetime(1997, 1, 1)) + newrr = rr.replace(bymonthday=6) + self.assertEqual(list(rr), [datetime(1997, 1, 5)]) + self.assertEqual(list(newrr), + [datetime(1997, 1, 6)]) + + def testReplaceIfNotSet(self): + rr = rrule(YEARLY, + count=1, + dtstart=datetime(1997, 1, 1)) + newrr = rr.replace(bymonthday=6) + self.assertEqual(list(rr), [datetime(1997, 1, 1)]) + self.assertEqual(list(newrr), + [datetime(1997, 1, 6)]) + + +@pytest.mark.rrule +@freeze_time(datetime(2018, 3, 6, 5, 36, tzinfo=tz.UTC)) +def test_generated_aware_dtstart(): + dtstart_exp = datetime(2018, 3, 6, 5, 36, tzinfo=tz.UTC) + UNTIL = datetime(2018, 3, 6, 8, 0, tzinfo=tz.UTC) + + rule_without_dtstart = rrule(freq=HOURLY, until=UNTIL) + rule_with_dtstart = rrule(freq=HOURLY, dtstart=dtstart_exp, until=UNTIL) + assert list(rule_without_dtstart) == list(rule_with_dtstart) + + +@pytest.mark.rrule +@pytest.mark.rrulestr +@pytest.mark.xfail(reason="rrulestr loses time zone, gh issue #637") +@freeze_time(datetime(2018, 3, 6, 5, 36, tzinfo=tz.UTC)) +def test_generated_aware_dtstart_rrulestr(): + rrule_without_dtstart = rrule(freq=HOURLY, + until=datetime(2018, 3, 6, 8, 0, + tzinfo=tz.UTC)) + rrule_r = rrulestr(str(rrule_without_dtstart)) + + assert list(rrule_r) == list(rrule_without_dtstart) + + +@pytest.mark.rruleset +class RRuleSetTest(unittest.TestCase): + def testSet(self): + rrset = rruleset() + rrset.rrule(rrule(YEARLY, count=2, byweekday=TU, + dtstart=datetime(1997, 9, 2, 9, 0))) + rrset.rrule(rrule(YEARLY, count=1, byweekday=TH, + dtstart=datetime(1997, 9, 2, 9, 0))) + self.assertEqual(list(rrset), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testSetDate(self): + rrset = rruleset() + rrset.rrule(rrule(YEARLY, count=1, byweekday=TU, + dtstart=datetime(1997, 9, 2, 9, 0))) + rrset.rdate(datetime(1997, 9, 4, 9)) + rrset.rdate(datetime(1997, 9, 9, 9)) + self.assertEqual(list(rrset), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testSetExRule(self): + rrset = rruleset() + rrset.rrule(rrule(YEARLY, count=6, byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + rrset.exrule(rrule(YEARLY, count=3, byweekday=TH, + dtstart=datetime(1997, 9, 2, 9, 0))) + self.assertEqual(list(rrset), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testSetExDate(self): + rrset = rruleset() + rrset.rrule(rrule(YEARLY, count=6, byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + rrset.exdate(datetime(1997, 9, 4, 9)) + rrset.exdate(datetime(1997, 9, 11, 9)) + rrset.exdate(datetime(1997, 9, 18, 9)) + self.assertEqual(list(rrset), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testSetExDateRevOrder(self): + rrset = rruleset() + rrset.rrule(rrule(MONTHLY, count=5, bymonthday=10, + dtstart=datetime(2004, 1, 1, 9, 0))) + rrset.exdate(datetime(2004, 4, 10, 9, 0)) + rrset.exdate(datetime(2004, 2, 10, 9, 0)) + self.assertEqual(list(rrset), + [datetime(2004, 1, 10, 9, 0), + datetime(2004, 3, 10, 9, 0), + datetime(2004, 5, 10, 9, 0)]) + + def testSetDateAndExDate(self): + rrset = rruleset() + rrset.rdate(datetime(1997, 9, 2, 9)) + rrset.rdate(datetime(1997, 9, 4, 9)) + rrset.rdate(datetime(1997, 9, 9, 9)) + rrset.rdate(datetime(1997, 9, 11, 9)) + rrset.rdate(datetime(1997, 9, 16, 9)) + rrset.rdate(datetime(1997, 9, 18, 9)) + rrset.exdate(datetime(1997, 9, 4, 9)) + rrset.exdate(datetime(1997, 9, 11, 9)) + rrset.exdate(datetime(1997, 9, 18, 9)) + self.assertEqual(list(rrset), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testSetDateAndExRule(self): + rrset = rruleset() + rrset.rdate(datetime(1997, 9, 2, 9)) + rrset.rdate(datetime(1997, 9, 4, 9)) + rrset.rdate(datetime(1997, 9, 9, 9)) + rrset.rdate(datetime(1997, 9, 11, 9)) + rrset.rdate(datetime(1997, 9, 16, 9)) + rrset.rdate(datetime(1997, 9, 18, 9)) + rrset.exrule(rrule(YEARLY, count=3, byweekday=TH, + dtstart=datetime(1997, 9, 2, 9, 0))) + self.assertEqual(list(rrset), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 9, 9, 0), + datetime(1997, 9, 16, 9, 0)]) + + def testSetCount(self): + rrset = rruleset() + rrset.rrule(rrule(YEARLY, count=6, byweekday=(TU, TH), + dtstart=datetime(1997, 9, 2, 9, 0))) + rrset.exrule(rrule(YEARLY, count=3, byweekday=TH, + dtstart=datetime(1997, 9, 2, 9, 0))) + self.assertEqual(rrset.count(), 3) + + def testSetCachePre(self): + rrset = rruleset() + rrset.rrule(rrule(YEARLY, count=2, byweekday=TU, + dtstart=datetime(1997, 9, 2, 9, 0))) + rrset.rrule(rrule(YEARLY, count=1, byweekday=TH, + dtstart=datetime(1997, 9, 2, 9, 0))) + self.assertEqual(list(rrset), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testSetCachePost(self): + rrset = rruleset(cache=True) + rrset.rrule(rrule(YEARLY, count=2, byweekday=TU, + dtstart=datetime(1997, 9, 2, 9, 0))) + rrset.rrule(rrule(YEARLY, count=1, byweekday=TH, + dtstart=datetime(1997, 9, 2, 9, 0))) + for x in rrset: pass + self.assertEqual(list(rrset), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testSetCachePostInternal(self): + rrset = rruleset(cache=True) + rrset.rrule(rrule(YEARLY, count=2, byweekday=TU, + dtstart=datetime(1997, 9, 2, 9, 0))) + rrset.rrule(rrule(YEARLY, count=1, byweekday=TH, + dtstart=datetime(1997, 9, 2, 9, 0))) + for x in rrset: pass + self.assertEqual(list(rrset._cache), + [datetime(1997, 9, 2, 9, 0), + datetime(1997, 9, 4, 9, 0), + datetime(1997, 9, 9, 9, 0)]) + + def testSetRRuleCount(self): + # Test that the count is updated when an rrule is added + rrset = rruleset(cache=False) + for cache in (True, False): + rrset = rruleset(cache=cache) + rrset.rrule(rrule(YEARLY, count=2, byweekday=TH, + dtstart=datetime(1983, 4, 1))) + rrset.rrule(rrule(WEEKLY, count=4, byweekday=FR, + dtstart=datetime(1991, 6, 3))) + + # Check the length twice - first one sets a cache, second reads it + self.assertEqual(rrset.count(), 6) + self.assertEqual(rrset.count(), 6) + + # This should invalidate the cache and force an update + rrset.rrule(rrule(MONTHLY, count=3, dtstart=datetime(1994, 1, 3))) + + self.assertEqual(rrset.count(), 9) + self.assertEqual(rrset.count(), 9) + + def testSetRDateCount(self): + # Test that the count is updated when an rdate is added + rrset = rruleset(cache=False) + for cache in (True, False): + rrset = rruleset(cache=cache) + rrset.rrule(rrule(YEARLY, count=2, byweekday=TH, + dtstart=datetime(1983, 4, 1))) + rrset.rrule(rrule(WEEKLY, count=4, byweekday=FR, + dtstart=datetime(1991, 6, 3))) + + # Check the length twice - first one sets a cache, second reads it + self.assertEqual(rrset.count(), 6) + self.assertEqual(rrset.count(), 6) + + # This should invalidate the cache and force an update + rrset.rdate(datetime(1993, 2, 14)) + + self.assertEqual(rrset.count(), 7) + self.assertEqual(rrset.count(), 7) + + def testSetExRuleCount(self): + # Test that the count is updated when an exrule is added + rrset = rruleset(cache=False) + for cache in (True, False): + rrset = rruleset(cache=cache) + rrset.rrule(rrule(YEARLY, count=2, byweekday=TH, + dtstart=datetime(1983, 4, 1))) + rrset.rrule(rrule(WEEKLY, count=4, byweekday=FR, + dtstart=datetime(1991, 6, 3))) + + # Check the length twice - first one sets a cache, second reads it + self.assertEqual(rrset.count(), 6) + self.assertEqual(rrset.count(), 6) + + # This should invalidate the cache and force an update + rrset.exrule(rrule(WEEKLY, count=2, interval=2, + dtstart=datetime(1991, 6, 14))) + + self.assertEqual(rrset.count(), 4) + self.assertEqual(rrset.count(), 4) + + def testSetExDateCount(self): + # Test that the count is updated when an rdate is added + for cache in (True, False): + rrset = rruleset(cache=cache) + rrset.rrule(rrule(YEARLY, count=2, byweekday=TH, + dtstart=datetime(1983, 4, 1))) + rrset.rrule(rrule(WEEKLY, count=4, byweekday=FR, + dtstart=datetime(1991, 6, 3))) + + # Check the length twice - first one sets a cache, second reads it + self.assertEqual(rrset.count(), 6) + self.assertEqual(rrset.count(), 6) + + # This should invalidate the cache and force an update + rrset.exdate(datetime(1991, 6, 28)) + + self.assertEqual(rrset.count(), 5) + self.assertEqual(rrset.count(), 5) + + +class WeekdayTest(unittest.TestCase): + def testInvalidNthWeekday(self): + with self.assertRaises(ValueError): + FR(0) + + def testWeekdayCallable(self): + # Calling a weekday instance generates a new weekday instance with the + # value of n changed. + from dateutil.rrule import weekday + self.assertEqual(MO(1), weekday(0, 1)) + + # Calling a weekday instance with the identical n returns the original + # object + FR_3 = weekday(4, 3) + self.assertIs(FR_3(3), FR_3) + + def testWeekdayEquality(self): + # Two weekday objects are not equal if they have different values for n + self.assertNotEqual(TH, TH(-1)) + self.assertNotEqual(SA(3), SA(2)) + + def testWeekdayEqualitySubclass(self): + # Two weekday objects equal if their "weekday" and "n" attributes are + # available and the same + class BasicWeekday(object): + def __init__(self, weekday): + self.weekday = weekday + + class BasicNWeekday(BasicWeekday): + def __init__(self, weekday, n=None): + super(BasicNWeekday, self).__init__(weekday) + self.n = n + + MO_Basic = BasicWeekday(0) + + self.assertNotEqual(MO, MO_Basic) + self.assertNotEqual(MO(1), MO_Basic) + + TU_BasicN = BasicNWeekday(1) + + self.assertEqual(TU, TU_BasicN) + self.assertNotEqual(TU(3), TU_BasicN) + + WE_Basic3 = BasicNWeekday(2, 3) + self.assertEqual(WE(3), WE_Basic3) + self.assertNotEqual(WE(2), WE_Basic3) + + def testWeekdayReprNoN(self): + no_n_reprs = ('MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU') + no_n_wdays = (MO, TU, WE, TH, FR, SA, SU) + + for repstr, wday in zip(no_n_reprs, no_n_wdays): + self.assertEqual(repr(wday), repstr) + + def testWeekdayReprWithN(self): + with_n_reprs = ('WE(+1)', 'TH(-2)', 'SU(+3)') + with_n_wdays = (WE(1), TH(-2), SU(+3)) + + for repstr, wday in zip(with_n_reprs, with_n_wdays): + self.assertEqual(repr(wday), repstr) diff --git a/libraries/dateutil/test/test_tz.py b/libraries/dateutil/test/test_tz.py new file mode 100644 index 00000000..54dfb1bd --- /dev/null +++ b/libraries/dateutil/test/test_tz.py @@ -0,0 +1,2603 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +from ._common import PicklableMixin +from ._common import TZEnvContext, TZWinContext +from ._common import WarningTestMixin +from ._common import ComparesEqual + +from datetime import datetime, timedelta +from datetime import time as dt_time +from datetime import tzinfo +from six import BytesIO, StringIO +import unittest + +import sys +import base64 +import copy + +from functools import partial + +IS_WIN = sys.platform.startswith('win') + +import pytest + +# dateutil imports +from dateutil.relativedelta import relativedelta, SU, TH +from dateutil.parser import parse +from dateutil import tz as tz +from dateutil import zoneinfo + +try: + from dateutil import tzwin +except ImportError as e: + if IS_WIN: + raise e + else: + pass + +MISSING_TARBALL = ("This test fails if you don't have the dateutil " + "timezone file installed. Please read the README") + +TZFILE_EST5EDT = b""" +VFppZgAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAAAAAADrAAAABAAAABCeph5wn7rrYKCGAHCh +ms1gomXicKOD6eCkaq5wpTWnYKZTyvCnFYlgqDOs8Kj+peCqE47wqt6H4KvzcPCsvmngrdNS8K6e +S+CvszTwsH4t4LGcUXCyZ0pgs3wzcLRHLGC1XBVwticOYLc793C4BvBguRvZcLnm0mC7BPXwu8a0 +YLzk1/C9r9DgvsS58L+PsuDApJvwwW+U4MKEffDDT3bgxGRf8MUvWODGTXxwxw864MgtXnDI+Fdg +yg1AcMrYOWDLiPBw0iP0cNJg++DTdeTw1EDd4NVVxvDWIL/g1zWo8NgAoeDZFYrw2eCD4Nr+p3Db +wGXg3N6JcN2pgmDevmtw34lkYOCeTXDhaUZg4n4vcONJKGDkXhFw5Vcu4OZHLfDnNxDg6CcP8OkW +8uDqBvHw6vbU4Ovm0/Ds1rbg7ca18O6/02Dvr9Jw8J+1YPGPtHDyf5dg82+WcPRfeWD1T3hw9j9b +YPcvWnD4KHfg+Q88cPoIWeD6+Fjw++g74PzYOvD9yB3g/rgc8P+n/+AAl/7wAYfh4AJ34PADcP5g +BGD9cAVQ4GAGQN9wBzDCYAeNGXAJEKRgCa2U8ArwhmAL4IVwDNmi4A3AZ3AOuYTgD6mD8BCZZuAR +iWXwEnlI4BNpR/AUWSrgFUkp8BY5DOAXKQvwGCIpYBkI7fAaAgtgGvIKcBvh7WAc0exwHcHPYB6x +znAfobFgIHYA8CGBk2AiVeLwI2qv4CQ1xPAlSpHgJhWm8Ccqc+An/sNwKQpV4CnepXAq6jfgK76H +cCzTVGAtnmlwLrM2YC9+S3AwkxhgMWdn8DJy+mAzR0nwNFLcYDUnK/A2Mr5gNwcN8Dgb2uA45u/w +Ofu84DrG0fA7257gPK/ucD27gOA+j9BwP5ti4EBvsnBBhH9gQk+UcENkYWBEL3ZwRURDYEYPWHBH +JCVgR/h08EkEB2BJ2FbwSuPpYEu4OPBMzQXgTZga8E6s5+BPd/zwUIzJ4FFhGXBSbKvgU0D7cFRM +jeBVIN1wVixv4FcAv3BYFYxgWOChcFn1bmBawINwW9VQYFypn/BdtTJgXomB8F+VFGBgaWPwYX4w +4GJJRfBjXhLgZCkn8GU99OBmEkRwZx3W4GfyJnBo/bjgadIIcGrdmuBrsepwbMa3YG2RzHBupplg +b3GucHCGe2BxWsrwcmZdYHM6rPB0Rj9gdRqO8HYvW+B2+nDweA894HjaUvB57x/gero08HvPAeB8 +o1Fwfa7j4H6DM3B/jsXgAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB +AAEAAQABAgMBAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB +AAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEA +AQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB +AAEAAQABAAEAAQABAAEAAQABAAEAAf//x8ABAP//ubAABP//x8ABCP//x8ABDEVEVABFU1QARVdU +AEVQVAAAAAABAAAAAQ== +""" + +EUROPE_HELSINKI = b""" +VFppZgAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABQAAAAAAAAB1AAAABQAAAA2kc28Yy85RYMy/hdAV +I+uQFhPckBcDzZAX876QGOOvkBnToJAaw5GQG7y9EBysrhAdnJ8QHoyQEB98gRAgbHIQIVxjECJM +VBAjPEUQJCw2ECUcJxAmDBgQJwVDkCf1NJAo5SWQKdUWkCrFB5ArtPiQLKTpkC2U2pAuhMuQL3S8 +kDBkrZAxXdkQMnK0EDM9uxA0UpYQNR2dEDYyeBA2/X8QOBuUkDjdYRA5+3aQOr1DEDvbWJA8pl+Q +Pbs6kD6GQZA/mxyQQGYjkEGEORBCRgWQQ2QbEEQl55BFQ/0QRgXJkEcj3xBH7uYQSQPBEEnOyBBK +46MQS66qEEzMv5BNjowQTqyhkE9ubhBQjIOQUVeKkFJsZZBTN2yQVExHkFUXTpBWLCmQVvcwkFgV +RhBY1xKQWfUoEFq29JBb1QoQXKAREF207BBef/MQX5TOEGBf1RBhfeqQYj+3EGNdzJBkH5kQZT2u +kGYItZBnHZCQZ+iXkGj9cpBpyHmQat1UkGuoW5BsxnEQbYg9kG6mUxBvaB+QcIY1EHFRPBByZhcQ +czEeEHRF+RB1EQAQdi8VkHbw4hB4DveQeNDEEHnu2ZB6sKYQe867kHyZwpB9rp2QfnmkkH+Of5AC +AQIDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQD +BAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAMEAwQDBAME +AwQAABdoAAAAACowAQQAABwgAAkAACowAQQAABwgAAlITVQARUVTVABFRVQAAAAAAQEAAAABAQ== +""" + +NEW_YORK = b""" +VFppZgAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAABcAAADrAAAABAAAABCeph5wn7rrYKCGAHCh +ms1gomXicKOD6eCkaq5wpTWnYKZTyvCnFYlgqDOs8Kj+peCqE47wqt6H4KvzcPCsvmngrdNS8K6e +S+CvszTwsH4t4LGcUXCyZ0pgs3wzcLRHLGC1XBVwticOYLc793C4BvBguRvZcLnm0mC7BPXwu8a0 +YLzk1/C9r9DgvsS58L+PsuDApJvwwW+U4MKEffDDT3bgxGRf8MUvWODGTXxwxw864MgtXnDI+Fdg +yg1AcMrYOWDLiPBw0iP0cNJg++DTdeTw1EDd4NVVxvDWIL/g1zWo8NgAoeDZFYrw2eCD4Nr+p3Db +wGXg3N6JcN2pgmDevmtw34lkYOCeTXDhaUZg4n4vcONJKGDkXhFw5Vcu4OZHLfDnNxDg6CcP8OkW +8uDqBvHw6vbU4Ovm0/Ds1rbg7ca18O6/02Dvr9Jw8J+1YPGPtHDyf5dg82+WcPRfeWD1T3hw9j9b +YPcvWnD4KHfg+Q88cPoIWeD6+Fjw++g74PzYOvD9yB3g/rgc8P+n/+AAl/7wAYfh4AJ34PADcP5g +BGD9cAVQ4GEGQN9yBzDCYgeNGXMJEKRjCa2U9ArwhmQL4IV1DNmi5Q3AZ3YOuYTmD6mD9xCZZucR +iWX4EnlI6BNpR/kUWSrpFUkp+RY5DOoXKQv6GCIpaxkI7fsaAgtsGvIKfBvh7Wwc0ex8HcHPbR6x +zn0fobFtIHYA/SGBk20iVeL+I2qv7iQ1xP4lSpHuJhWm/ycqc+8n/sOAKQpV8CnepYAq6jfxK76H +gSzTVHItnmmCLrM2cy9+S4MwkxhzMWdoBDJy+nQzR0oENFLcdTUnLAU2Mr51NwcOBjgb2vY45vAG +Ofu89jrG0gY72572PK/uhj27gPY+j9CGP5ti9kBvsoZBhH92Qk+UhkNkYXZEL3aHRURDd0XzqQdH +LV/3R9OLB0kNQfdJs20HSu0j90uciYdM1kB3TXxrh062IndPXE2HUJYEd1E8L4dSdeZ3UxwRh1RV +yHdU+/OHVjWqd1blEAdYHsb3WMTyB1n+qPdapNQHW96K91yEtgddvmz3XmSYB1+eTvdgTbSHYYdr +d2ItlodjZ013ZA14h2VHL3dl7VqHZycRd2fNPIdpBvN3aa0eh2rm1XdrljsHbM/x9212HQdur9P3 +b1X/B3CPtfdxNeEHcm+X93MVwwd0T3n3dP7fh3Y4lnd23sGHeBh4d3i+o4d5+Fp3ep6Fh3vYPHd8 +fmeHfbged35eSYd/mAB3AAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB +AAEAAQABAgMBAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB +AAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEA +AQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAB +AAEAAQABAAEAAQABAAEAAQABAAEAAf//x8ABAP//ubAABP//x8ABCP//x8ABDEVEVABFU1QARVdU +AEVQVAAEslgAAAAAAQWk7AEAAAACB4YfggAAAAMJZ1MDAAAABAtIhoQAAAAFDSsLhQAAAAYPDD8G +AAAABxDtcocAAAAIEs6mCAAAAAkVn8qJAAAACheA/goAAAALGWIxiwAAAAwdJeoMAAAADSHa5Q0A +AAAOJZ6djgAAAA8nf9EPAAAAECpQ9ZAAAAARLDIpEQAAABIuE1ySAAAAEzDnJBMAAAAUM7hIlAAA +ABU2jBAVAAAAFkO3G5YAAAAXAAAAAQAAAAE= +""" + +TZICAL_EST5EDT = """ +BEGIN:VTIMEZONE +TZID:US-Eastern +LAST-MODIFIED:19870101T000000Z +TZURL:http://zones.stds_r_us.net/tz/US-Eastern +BEGIN:STANDARD +DTSTART:19671029T020000 +RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10 +TZOFFSETFROM:-0400 +TZOFFSETTO:-0500 +TZNAME:EST +END:STANDARD +BEGIN:DAYLIGHT +DTSTART:19870405T020000 +RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4 +TZOFFSETFROM:-0500 +TZOFFSETTO:-0400 +TZNAME:EDT +END:DAYLIGHT +END:VTIMEZONE +""" + +TZICAL_PST8PDT = """ +BEGIN:VTIMEZONE +TZID:US-Pacific +LAST-MODIFIED:19870101T000000Z +BEGIN:STANDARD +DTSTART:19671029T020000 +RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10 +TZOFFSETFROM:-0700 +TZOFFSETTO:-0800 +TZNAME:PST +END:STANDARD +BEGIN:DAYLIGHT +DTSTART:19870405T020000 +RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4 +TZOFFSETFROM:-0800 +TZOFFSETTO:-0700 +TZNAME:PDT +END:DAYLIGHT +END:VTIMEZONE +""" + +EST_TUPLE = ('EST', timedelta(hours=-5), timedelta(hours=0)) +EDT_TUPLE = ('EDT', timedelta(hours=-4), timedelta(hours=1)) + + +### +# Helper functions +def get_timezone_tuple(dt): + """Retrieve a (tzname, utcoffset, dst) tuple for a given DST""" + return dt.tzname(), dt.utcoffset(), dt.dst() + + +### +# Mix-ins +class context_passthrough(object): + def __init__(*args, **kwargs): + pass + + def __enter__(*args, **kwargs): + pass + + def __exit__(*args, **kwargs): + pass + + +class TzFoldMixin(object): + """ Mix-in class for testing ambiguous times """ + def gettz(self, tzname): + raise NotImplementedError + + def _get_tzname(self, tzname): + return tzname + + def _gettz_context(self, tzname): + return context_passthrough() + + def testFoldPositiveUTCOffset(self): + # Test that we can resolve ambiguous times + tzname = self._get_tzname('Australia/Sydney') + + with self._gettz_context(tzname): + SYD = self.gettz(tzname) + + t0_u = datetime(2012, 3, 31, 15, 30, tzinfo=tz.tzutc()) # AEST + t1_u = datetime(2012, 3, 31, 16, 30, tzinfo=tz.tzutc()) # AEDT + + t0_syd0 = t0_u.astimezone(SYD) + t1_syd1 = t1_u.astimezone(SYD) + + self.assertEqual(t0_syd0.replace(tzinfo=None), + datetime(2012, 4, 1, 2, 30)) + + self.assertEqual(t1_syd1.replace(tzinfo=None), + datetime(2012, 4, 1, 2, 30)) + + self.assertEqual(t0_syd0.utcoffset(), timedelta(hours=11)) + self.assertEqual(t1_syd1.utcoffset(), timedelta(hours=10)) + + def testGapPositiveUTCOffset(self): + # Test that we don't have a problem around gaps. + tzname = self._get_tzname('Australia/Sydney') + + with self._gettz_context(tzname): + SYD = self.gettz(tzname) + + t0_u = datetime(2012, 10, 6, 15, 30, tzinfo=tz.tzutc()) # AEST + t1_u = datetime(2012, 10, 6, 16, 30, tzinfo=tz.tzutc()) # AEDT + + t0 = t0_u.astimezone(SYD) + t1 = t1_u.astimezone(SYD) + + self.assertEqual(t0.replace(tzinfo=None), + datetime(2012, 10, 7, 1, 30)) + + self.assertEqual(t1.replace(tzinfo=None), + datetime(2012, 10, 7, 3, 30)) + + self.assertEqual(t0.utcoffset(), timedelta(hours=10)) + self.assertEqual(t1.utcoffset(), timedelta(hours=11)) + + def testFoldNegativeUTCOffset(self): + # Test that we can resolve ambiguous times + tzname = self._get_tzname('America/Toronto') + + with self._gettz_context(tzname): + TOR = self.gettz(tzname) + + t0_u = datetime(2011, 11, 6, 5, 30, tzinfo=tz.tzutc()) + t1_u = datetime(2011, 11, 6, 6, 30, tzinfo=tz.tzutc()) + + t0_tor = t0_u.astimezone(TOR) + t1_tor = t1_u.astimezone(TOR) + + self.assertEqual(t0_tor.replace(tzinfo=None), + datetime(2011, 11, 6, 1, 30)) + + self.assertEqual(t1_tor.replace(tzinfo=None), + datetime(2011, 11, 6, 1, 30)) + + self.assertNotEqual(t0_tor.tzname(), t1_tor.tzname()) + self.assertEqual(t0_tor.utcoffset(), timedelta(hours=-4.0)) + self.assertEqual(t1_tor.utcoffset(), timedelta(hours=-5.0)) + + def testGapNegativeUTCOffset(self): + # Test that we don't have a problem around gaps. + tzname = self._get_tzname('America/Toronto') + + with self._gettz_context(tzname): + TOR = self.gettz(tzname) + + t0_u = datetime(2011, 3, 13, 6, 30, tzinfo=tz.tzutc()) + t1_u = datetime(2011, 3, 13, 7, 30, tzinfo=tz.tzutc()) + + t0 = t0_u.astimezone(TOR) + t1 = t1_u.astimezone(TOR) + + self.assertEqual(t0.replace(tzinfo=None), + datetime(2011, 3, 13, 1, 30)) + + self.assertEqual(t1.replace(tzinfo=None), + datetime(2011, 3, 13, 3, 30)) + + self.assertNotEqual(t0, t1) + self.assertEqual(t0.utcoffset(), timedelta(hours=-5.0)) + self.assertEqual(t1.utcoffset(), timedelta(hours=-4.0)) + + def testFoldLondon(self): + tzname = self._get_tzname('Europe/London') + + with self._gettz_context(tzname): + LON = self.gettz(tzname) + UTC = tz.tzutc() + + t0_u = datetime(2013, 10, 27, 0, 30, tzinfo=UTC) # BST + t1_u = datetime(2013, 10, 27, 1, 30, tzinfo=UTC) # GMT + + t0 = t0_u.astimezone(LON) + t1 = t1_u.astimezone(LON) + + self.assertEqual(t0.replace(tzinfo=None), + datetime(2013, 10, 27, 1, 30)) + + self.assertEqual(t1.replace(tzinfo=None), + datetime(2013, 10, 27, 1, 30)) + + self.assertEqual(t0.utcoffset(), timedelta(hours=1)) + self.assertEqual(t1.utcoffset(), timedelta(hours=0)) + + def testFoldIndependence(self): + tzname = self._get_tzname('America/New_York') + + with self._gettz_context(tzname): + NYC = self.gettz(tzname) + UTC = tz.tzutc() + hour = timedelta(hours=1) + + # Firmly 2015-11-01 0:30 EDT-4 + pre_dst = datetime(2015, 11, 1, 0, 30, tzinfo=NYC) + + # Ambiguous between 2015-11-01 1:30 EDT-4 and 2015-11-01 1:30 EST-5 + in_dst = pre_dst + hour + in_dst_tzname_0 = in_dst.tzname() # Stash the tzname - EDT + + # Doing the arithmetic in UTC creates a date that is unambiguously + # 2015-11-01 1:30 EDT-5 + in_dst_via_utc = (pre_dst.astimezone(UTC) + 2*hour).astimezone(NYC) + + # Make sure the dates are actually ambiguous + self.assertEqual(in_dst, in_dst_via_utc) + + # Make sure we got the right folding behavior + self.assertNotEqual(in_dst_via_utc.tzname(), in_dst_tzname_0) + + # Now check to make sure in_dst's tzname hasn't changed + self.assertEqual(in_dst_tzname_0, in_dst.tzname()) + + def testInZoneFoldEquality(self): + # Two datetimes in the same zone are considered to be equal if their + # wall times are equal, even if they have different absolute times. + + tzname = self._get_tzname('America/New_York') + + with self._gettz_context(tzname): + NYC = self.gettz(tzname) + UTC = tz.tzutc() + + dt0 = datetime(2011, 11, 6, 1, 30, tzinfo=NYC) + dt1 = tz.enfold(dt0, fold=1) + + # Make sure these actually represent different times + self.assertNotEqual(dt0.astimezone(UTC), dt1.astimezone(UTC)) + + # Test that they compare equal + self.assertEqual(dt0, dt1) + + def _test_ambiguous_time(self, dt, tzid, ambiguous): + # This is a test to check that the individual is_ambiguous values + # on the _tzinfo subclasses work. + tzname = self._get_tzname(tzid) + + with self._gettz_context(tzname): + tzi = self.gettz(tzname) + + self.assertEqual(tz.datetime_ambiguous(dt, tz=tzi), ambiguous) + + def testAmbiguousNegativeUTCOffset(self): + self._test_ambiguous_time(datetime(2015, 11, 1, 1, 30), + 'America/New_York', True) + + def testAmbiguousPositiveUTCOffset(self): + self._test_ambiguous_time(datetime(2012, 4, 1, 2, 30), + 'Australia/Sydney', True) + + def testUnambiguousNegativeUTCOffset(self): + self._test_ambiguous_time(datetime(2015, 11, 1, 2, 30), + 'America/New_York', False) + + def testUnambiguousPositiveUTCOffset(self): + self._test_ambiguous_time(datetime(2012, 4, 1, 3, 30), + 'Australia/Sydney', False) + + def testUnambiguousGapNegativeUTCOffset(self): + # Imaginary time + self._test_ambiguous_time(datetime(2011, 3, 13, 2, 30), + 'America/New_York', False) + + def testUnambiguousGapPositiveUTCOffset(self): + # Imaginary time + self._test_ambiguous_time(datetime(2012, 10, 7, 2, 30), + 'Australia/Sydney', False) + + def _test_imaginary_time(self, dt, tzid, exists): + tzname = self._get_tzname(tzid) + with self._gettz_context(tzname): + tzi = self.gettz(tzname) + + self.assertEqual(tz.datetime_exists(dt, tz=tzi), exists) + + def testImaginaryNegativeUTCOffset(self): + self._test_imaginary_time(datetime(2011, 3, 13, 2, 30), + 'America/New_York', False) + + def testNotImaginaryNegativeUTCOffset(self): + self._test_imaginary_time(datetime(2011, 3, 13, 1, 30), + 'America/New_York', True) + + def testImaginaryPositiveUTCOffset(self): + self._test_imaginary_time(datetime(2012, 10, 7, 2, 30), + 'Australia/Sydney', False) + + def testNotImaginaryPositiveUTCOffset(self): + self._test_imaginary_time(datetime(2012, 10, 7, 1, 30), + 'Australia/Sydney', True) + + def testNotImaginaryFoldNegativeUTCOffset(self): + self._test_imaginary_time(datetime(2015, 11, 1, 1, 30), + 'America/New_York', True) + + def testNotImaginaryFoldPositiveUTCOffset(self): + self._test_imaginary_time(datetime(2012, 4, 1, 3, 30), + 'Australia/Sydney', True) + + @unittest.skip("Known failure in Python 3.6.") + def testEqualAmbiguousComparison(self): + tzname = self._get_tzname('Australia/Sydney') + + with self._gettz_context(tzname): + SYD0 = self.gettz(tzname) + SYD1 = self.gettz(tzname) + + t0_u = datetime(2012, 3, 31, 14, 30, tzinfo=tz.tzutc()) # AEST + + t0_syd0 = t0_u.astimezone(SYD0) + t0_syd1 = t0_u.astimezone(SYD1) + + # This is considered an "inter-zone comparison" because it's an + # ambiguous datetime. + self.assertEqual(t0_syd0, t0_syd1) + + +class TzWinFoldMixin(object): + def get_args(self, tzname): + return (tzname, ) + + class context(object): + def __init__(*args, **kwargs): + pass + + def __enter__(*args, **kwargs): + pass + + def __exit__(*args, **kwargs): + pass + + def get_utc_transitions(self, tzi, year, gap): + dston, dstoff = tzi.transitions(year) + if gap: + t_n = dston - timedelta(minutes=30) + + t0_u = t_n.replace(tzinfo=tzi).astimezone(tz.tzutc()) + t1_u = t0_u + timedelta(hours=1) + else: + # Get 1 hour before the first ambiguous date + t_n = dstoff - timedelta(minutes=30) + + t0_u = t_n.replace(tzinfo=tzi).astimezone(tz.tzutc()) + t_n += timedelta(hours=1) # Naive ambiguous date + t0_u = t0_u + timedelta(hours=1) # First ambiguous date + t1_u = t0_u + timedelta(hours=1) # Second ambiguous date + + return t_n, t0_u, t1_u + + def testFoldPositiveUTCOffset(self): + # Test that we can resolve ambiguous times + tzname = 'AUS Eastern Standard Time' + args = self.get_args(tzname) + + with self.context(tzname): + # Calling fromutc() alters the tzfile object + SYD = self.tzclass(*args) + + # Get the transition time in UTC from the object, because + # Windows doesn't store historical info + t_n, t0_u, t1_u = self.get_utc_transitions(SYD, 2012, False) + + # Using fresh tzfiles + t0_syd = t0_u.astimezone(SYD) + t1_syd = t1_u.astimezone(SYD) + + self.assertEqual(t0_syd.replace(tzinfo=None), t_n) + + self.assertEqual(t1_syd.replace(tzinfo=None), t_n) + + self.assertEqual(t0_syd.utcoffset(), timedelta(hours=11)) + self.assertEqual(t1_syd.utcoffset(), timedelta(hours=10)) + self.assertNotEqual(t0_syd.tzname(), t1_syd.tzname()) + + def testGapPositiveUTCOffset(self): + # Test that we don't have a problem around gaps. + tzname = 'AUS Eastern Standard Time' + args = self.get_args(tzname) + + with self.context(tzname): + SYD = self.tzclass(*args) + + t_n, t0_u, t1_u = self.get_utc_transitions(SYD, 2012, True) + + t0 = t0_u.astimezone(SYD) + t1 = t1_u.astimezone(SYD) + + self.assertEqual(t0.replace(tzinfo=None), t_n) + + self.assertEqual(t1.replace(tzinfo=None), t_n + timedelta(hours=2)) + + self.assertEqual(t0.utcoffset(), timedelta(hours=10)) + self.assertEqual(t1.utcoffset(), timedelta(hours=11)) + + def testFoldNegativeUTCOffset(self): + # Test that we can resolve ambiguous times + tzname = 'Eastern Standard Time' + args = self.get_args(tzname) + + with self.context(tzname): + TOR = self.tzclass(*args) + + t_n, t0_u, t1_u = self.get_utc_transitions(TOR, 2011, False) + + t0_tor = t0_u.astimezone(TOR) + t1_tor = t1_u.astimezone(TOR) + + self.assertEqual(t0_tor.replace(tzinfo=None), t_n) + self.assertEqual(t1_tor.replace(tzinfo=None), t_n) + + self.assertNotEqual(t0_tor.tzname(), t1_tor.tzname()) + self.assertEqual(t0_tor.utcoffset(), timedelta(hours=-4.0)) + self.assertEqual(t1_tor.utcoffset(), timedelta(hours=-5.0)) + + def testGapNegativeUTCOffset(self): + # Test that we don't have a problem around gaps. + tzname = 'Eastern Standard Time' + args = self.get_args(tzname) + + with self.context(tzname): + TOR = self.tzclass(*args) + + t_n, t0_u, t1_u = self.get_utc_transitions(TOR, 2011, True) + + t0 = t0_u.astimezone(TOR) + t1 = t1_u.astimezone(TOR) + + self.assertEqual(t0.replace(tzinfo=None), + t_n) + + self.assertEqual(t1.replace(tzinfo=None), + t_n + timedelta(hours=2)) + + self.assertNotEqual(t0.tzname(), t1.tzname()) + self.assertEqual(t0.utcoffset(), timedelta(hours=-5.0)) + self.assertEqual(t1.utcoffset(), timedelta(hours=-4.0)) + + def testFoldIndependence(self): + tzname = 'Eastern Standard Time' + args = self.get_args(tzname) + + with self.context(tzname): + NYC = self.tzclass(*args) + UTC = tz.tzutc() + hour = timedelta(hours=1) + + # Firmly 2015-11-01 0:30 EDT-4 + t_n, t0_u, t1_u = self.get_utc_transitions(NYC, 2015, False) + + pre_dst = (t_n - hour).replace(tzinfo=NYC) + + # Currently, there's no way around the fact that this resolves to an + # ambiguous date, which defaults to EST. I'm not hard-coding in the + # answer, though, because the preferred behavior would be that this + # results in a time on the EDT side. + + # Ambiguous between 2015-11-01 1:30 EDT-4 and 2015-11-01 1:30 EST-5 + in_dst = pre_dst + hour + in_dst_tzname_0 = in_dst.tzname() # Stash the tzname - EDT + + # Doing the arithmetic in UTC creates a date that is unambiguously + # 2015-11-01 1:30 EDT-5 + in_dst_via_utc = (pre_dst.astimezone(UTC) + 2*hour).astimezone(NYC) + + # Make sure we got the right folding behavior + self.assertNotEqual(in_dst_via_utc.tzname(), in_dst_tzname_0) + + # Now check to make sure in_dst's tzname hasn't changed + self.assertEqual(in_dst_tzname_0, in_dst.tzname()) + + def testInZoneFoldEquality(self): + # Two datetimes in the same zone are considered to be equal if their + # wall times are equal, even if they have different absolute times. + tzname = 'Eastern Standard Time' + args = self.get_args(tzname) + + with self.context(tzname): + NYC = self.tzclass(*args) + UTC = tz.tzutc() + + t_n, t0_u, t1_u = self.get_utc_transitions(NYC, 2011, False) + + dt0 = t_n.replace(tzinfo=NYC) + dt1 = tz.enfold(dt0, fold=1) + + # Make sure these actually represent different times + self.assertNotEqual(dt0.astimezone(UTC), dt1.astimezone(UTC)) + + # Test that they compare equal + self.assertEqual(dt0, dt1) + +### +# Test Cases +class TzUTCTest(unittest.TestCase): + def testSingleton(self): + UTC_0 = tz.tzutc() + UTC_1 = tz.tzutc() + + self.assertIs(UTC_0, UTC_1) + + def testOffset(self): + ct = datetime(2009, 4, 1, 12, 11, 13, tzinfo=tz.tzutc()) + + self.assertEqual(ct.utcoffset(), timedelta(seconds=0)) + + def testDst(self): + ct = datetime(2009, 4, 1, 12, 11, 13, tzinfo=tz.tzutc()) + + self.assertEqual(ct.dst(), timedelta(seconds=0)) + + def testTzName(self): + ct = datetime(2009, 4, 1, 12, 11, 13, tzinfo=tz.tzutc()) + self.assertEqual(ct.tzname(), 'UTC') + + def testEquality(self): + UTC0 = tz.tzutc() + UTC1 = tz.tzutc() + + self.assertEqual(UTC0, UTC1) + + def testInequality(self): + UTC = tz.tzutc() + UTCp4 = tz.tzoffset('UTC+4', 14400) + + self.assertNotEqual(UTC, UTCp4) + + def testInequalityInteger(self): + self.assertFalse(tz.tzutc() == 7) + self.assertNotEqual(tz.tzutc(), 7) + + def testInequalityUnsupported(self): + self.assertEqual(tz.tzutc(), ComparesEqual) + + def testRepr(self): + UTC = tz.tzutc() + self.assertEqual(repr(UTC), 'tzutc()') + + def testTimeOnlyUTC(self): + # https://github.com/dateutil/dateutil/issues/132 + # tzutc doesn't care + tz_utc = tz.tzutc() + self.assertEqual(dt_time(13, 20, tzinfo=tz_utc).utcoffset(), + timedelta(0)) + + def testAmbiguity(self): + # Pick an arbitrary datetime, this should always return False. + dt = datetime(2011, 9, 1, 2, 30, tzinfo=tz.tzutc()) + + self.assertFalse(tz.datetime_ambiguous(dt)) + + +@pytest.mark.tzoffset +class TzOffsetTest(unittest.TestCase): + def testTimedeltaOffset(self): + est = tz.tzoffset('EST', timedelta(hours=-5)) + est_s = tz.tzoffset('EST', -18000) + + self.assertEqual(est, est_s) + + def testTzNameNone(self): + gmt5 = tz.tzoffset(None, -18000) # -5:00 + self.assertIs(datetime(2003, 10, 26, 0, 0, tzinfo=gmt5).tzname(), + None) + + def testTimeOnlyOffset(self): + # tzoffset doesn't care + tz_offset = tz.tzoffset('+3', 3600) + self.assertEqual(dt_time(13, 20, tzinfo=tz_offset).utcoffset(), + timedelta(seconds=3600)) + + def testTzOffsetRepr(self): + tname = 'EST' + tzo = tz.tzoffset(tname, -5 * 3600) + self.assertEqual(repr(tzo), "tzoffset(" + repr(tname) + ", -18000)") + + def testEquality(self): + utc = tz.tzoffset('UTC', 0) + gmt = tz.tzoffset('GMT', 0) + + self.assertEqual(utc, gmt) + + def testUTCEquality(self): + utc = tz.tzutc() + o_utc = tz.tzoffset('UTC', 0) + + self.assertEqual(utc, o_utc) + self.assertEqual(o_utc, utc) + + def testInequalityInvalid(self): + tzo = tz.tzoffset('-3', -3 * 3600) + self.assertFalse(tzo == -3) + self.assertNotEqual(tzo, -3) + + def testInequalityUnsupported(self): + tzo = tz.tzoffset('-5', -5 * 3600) + + self.assertTrue(tzo == ComparesEqual) + self.assertFalse(tzo != ComparesEqual) + self.assertEqual(tzo, ComparesEqual) + + def testAmbiguity(self): + # Pick an arbitrary datetime, this should always return False. + dt = datetime(2011, 9, 1, 2, 30, tzinfo=tz.tzoffset("EST", -5 * 3600)) + + self.assertFalse(tz.datetime_ambiguous(dt)) + + def testTzOffsetInstance(self): + tz1 = tz.tzoffset.instance('EST', timedelta(hours=-5)) + tz2 = tz.tzoffset.instance('EST', timedelta(hours=-5)) + + assert tz1 is not tz2 + + def testTzOffsetSingletonDifferent(self): + tz1 = tz.tzoffset('EST', timedelta(hours=-5)) + tz2 = tz.tzoffset('EST', -18000) + + assert tz1 is tz2 + +@pytest.mark.tzoffset +@pytest.mark.parametrize('args', [ + ('UTC', 0), + ('EST', -18000), + ('EST', timedelta(hours=-5)), + (None, timedelta(hours=3)), +]) +def test_tzoffset_singleton(args): + tz1 = tz.tzoffset(*args) + tz2 = tz.tzoffset(*args) + + assert tz1 is tz2 + +@pytest.mark.tzlocal +class TzLocalTest(unittest.TestCase): + def testEquality(self): + tz1 = tz.tzlocal() + tz2 = tz.tzlocal() + + # Explicitly calling == and != here to ensure the operators work + self.assertTrue(tz1 == tz2) + self.assertFalse(tz1 != tz2) + + def testInequalityFixedOffset(self): + tzl = tz.tzlocal() + tzos = tz.tzoffset('LST', tzl._std_offset.total_seconds()) + tzod = tz.tzoffset('LDT', tzl._std_offset.total_seconds()) + + self.assertFalse(tzl == tzos) + self.assertFalse(tzl == tzod) + self.assertTrue(tzl != tzos) + self.assertTrue(tzl != tzod) + + def testInequalityInvalid(self): + tzl = tz.tzlocal() + + self.assertTrue(tzl != 1) + self.assertFalse(tzl == 1) + + # TODO: Use some sort of universal local mocking so that it's clear + # that we're expecting tzlocal to *not* be Pacific/Kiritimati + LINT = tz.gettz('Pacific/Kiritimati') + self.assertTrue(tzl != LINT) + self.assertFalse(tzl == LINT) + + def testInequalityUnsupported(self): + tzl = tz.tzlocal() + + self.assertTrue(tzl == ComparesEqual) + self.assertFalse(tzl != ComparesEqual) + + def testRepr(self): + tzl = tz.tzlocal() + + self.assertEqual(repr(tzl), 'tzlocal()') + + +@pytest.mark.parametrize('args,kwargs', [ + (('EST', -18000), {}), + (('EST', timedelta(hours=-5)), {}), + (('EST',), {'offset': -18000}), + (('EST',), {'offset': timedelta(hours=-5)}), + (tuple(), {'name': 'EST', 'offset': -18000}) +]) +def test_tzoffset_is(args, kwargs): + tz_ref = tz.tzoffset('EST', -18000) + assert tz.tzoffset(*args, **kwargs) is tz_ref + + +def test_tzoffset_is_not(): + assert tz.tzoffset('EDT', -14400) is not tz.tzoffset('EST', -18000) + + +@pytest.mark.tzlocal +@unittest.skipIf(IS_WIN, "requires Unix") +@unittest.skipUnless(TZEnvContext.tz_change_allowed(), + TZEnvContext.tz_change_disallowed_message()) +class TzLocalNixTest(unittest.TestCase, TzFoldMixin): + # This is a set of tests for `tzlocal()` on *nix systems + + # POSIX string indicating change to summer time on the 2nd Sunday in March + # at 2AM, and ending the 1st Sunday in November at 2AM. (valid >= 2007) + TZ_EST = 'EST+5EDT,M3.2.0/2,M11.1.0/2' + + # POSIX string for AEST/AEDT (valid >= 2008) + TZ_AEST = 'AEST-10AEDT,M10.1.0/2,M4.1.0/3' + + # POSIX string for BST/GMT + TZ_LON = 'GMT0BST,M3.5.0,M10.5.0' + + # POSIX string for UTC + UTC = 'UTC' + + def gettz(self, tzname): + # Actual time zone changes are handled by the _gettz_context function + return tz.tzlocal() + + def _gettz_context(self, tzname): + tzname_map = {'Australia/Sydney': self.TZ_AEST, + 'America/Toronto': self.TZ_EST, + 'America/New_York': self.TZ_EST, + 'Europe/London': self.TZ_LON} + + return TZEnvContext(tzname_map.get(tzname, tzname)) + + def _testTzFunc(self, tzval, func, std_val, dst_val): + """ + This generates tests about how the behavior of a function ``func`` + changes between STD and DST (e.g. utcoffset, tzname, dst). + + It assume that DST starts the 2nd Sunday in March and ends the 1st + Sunday in November + """ + with TZEnvContext(tzval): + dt1 = datetime(2015, 2, 1, 12, 0, tzinfo=tz.tzlocal()) # STD + dt2 = datetime(2015, 5, 1, 12, 0, tzinfo=tz.tzlocal()) # DST + + self.assertEqual(func(dt1), std_val) + self.assertEqual(func(dt2), dst_val) + + def _testTzName(self, tzval, std_name, dst_name): + func = datetime.tzname + + self._testTzFunc(tzval, func, std_name, dst_name) + + def testTzNameDST(self): + # Test tzname in a zone with DST + self._testTzName(self.TZ_EST, 'EST', 'EDT') + + def testTzNameUTC(self): + # Test tzname in a zone without DST + self._testTzName(self.UTC, 'UTC', 'UTC') + + def _testOffset(self, tzval, std_off, dst_off): + func = datetime.utcoffset + + self._testTzFunc(tzval, func, std_off, dst_off) + + def testOffsetDST(self): + self._testOffset(self.TZ_EST, timedelta(hours=-5), timedelta(hours=-4)) + + def testOffsetUTC(self): + self._testOffset(self.UTC, timedelta(0), timedelta(0)) + + def _testDST(self, tzval, dst_dst): + func = datetime.dst + std_dst = timedelta(0) + + self._testTzFunc(tzval, func, std_dst, dst_dst) + + def testDSTDST(self): + self._testDST(self.TZ_EST, timedelta(hours=1)) + + def testDSTUTC(self): + self._testDST(self.UTC, timedelta(0)) + + def testTimeOnlyOffsetLocalUTC(self): + with TZEnvContext(self.UTC): + self.assertEqual(dt_time(13, 20, tzinfo=tz.tzlocal()).utcoffset(), + timedelta(0)) + + def testTimeOnlyOffsetLocalDST(self): + with TZEnvContext(self.TZ_EST): + self.assertIs(dt_time(13, 20, tzinfo=tz.tzlocal()).utcoffset(), + None) + + def testTimeOnlyDSTLocalUTC(self): + with TZEnvContext(self.UTC): + self.assertEqual(dt_time(13, 20, tzinfo=tz.tzlocal()).dst(), + timedelta(0)) + + def testTimeOnlyDSTLocalDST(self): + with TZEnvContext(self.TZ_EST): + self.assertIs(dt_time(13, 20, tzinfo=tz.tzlocal()).dst(), + None) + + def testUTCEquality(self): + with TZEnvContext(self.UTC): + assert tz.tzlocal() == tz.tzutc() + + +# TODO: Maybe a better hack than this? +def mark_tzlocal_nix(f): + marks = [ + pytest.mark.tzlocal, + pytest.mark.skipif(IS_WIN, reason='requires Unix'), + pytest.mark.skipif(not TZEnvContext.tz_change_allowed, + reason=TZEnvContext.tz_change_disallowed_message()) + ] + + for mark in reversed(marks): + f = mark(f) + + return f + + +@mark_tzlocal_nix +@pytest.mark.parametrize('tzvar', ['UTC', 'GMT0', 'UTC0']) +def test_tzlocal_utc_equal(tzvar): + with TZEnvContext(tzvar): + assert tz.tzlocal() == tz.UTC + + +@mark_tzlocal_nix +@pytest.mark.parametrize('tzvar', [ + 'Europe/London', 'America/New_York', + 'GMT0BST', 'EST5EDT']) +def test_tzlocal_utc_unequal(tzvar): + with TZEnvContext(tzvar): + assert tz.tzlocal() != tz.UTC + + +@mark_tzlocal_nix +def test_tzlocal_local_time_trim_colon(): + with TZEnvContext(':/etc/localtime'): + assert tz.gettz() is not None + + +@mark_tzlocal_nix +@pytest.mark.parametrize('tzvar, tzoff', [ + ('EST5', tz.tzoffset('EST', -18000)), + ('GMT', tz.tzoffset('GMT', 0)), + ('YAKT-9', tz.tzoffset('YAKT', timedelta(hours=9))), + ('JST-9', tz.tzoffset('JST', timedelta(hours=9))), +]) +def test_tzlocal_offset_equal(tzvar, tzoff): + with TZEnvContext(tzvar): + # Including both to test both __eq__ and __ne__ + assert tz.tzlocal() == tzoff + assert not (tz.tzlocal() != tzoff) + + +@mark_tzlocal_nix +@pytest.mark.parametrize('tzvar, tzoff', [ + ('EST5EDT', tz.tzoffset('EST', -18000)), + ('GMT0BST', tz.tzoffset('GMT', 0)), + ('EST5', tz.tzoffset('EST', -14400)), + ('YAKT-9', tz.tzoffset('JST', timedelta(hours=9))), + ('JST-9', tz.tzoffset('YAKT', timedelta(hours=9))), +]) +def test_tzlocal_offset_unequal(tzvar, tzoff): + with TZEnvContext(tzvar): + # Including both to test both __eq__ and __ne__ + assert tz.tzlocal() != tzoff + assert not (tz.tzlocal() == tzoff) + + +@pytest.mark.gettz +class GettzTest(unittest.TestCase, TzFoldMixin): + gettz = staticmethod(tz.gettz) + + def testGettz(self): + # bug 892569 + str(self.gettz('UTC')) + + def testGetTzEquality(self): + self.assertEqual(self.gettz('UTC'), self.gettz('UTC')) + + def testTimeOnlyGettz(self): + # gettz returns None + tz_get = self.gettz('Europe/Minsk') + self.assertIs(dt_time(13, 20, tzinfo=tz_get).utcoffset(), None) + + def testTimeOnlyGettzDST(self): + # gettz returns None + tz_get = self.gettz('Europe/Minsk') + self.assertIs(dt_time(13, 20, tzinfo=tz_get).dst(), None) + + def testTimeOnlyGettzTzName(self): + tz_get = self.gettz('Europe/Minsk') + self.assertIs(dt_time(13, 20, tzinfo=tz_get).tzname(), None) + + def testTimeOnlyFormatZ(self): + tz_get = self.gettz('Europe/Minsk') + t = dt_time(13, 20, tzinfo=tz_get) + + self.assertEqual(t.strftime('%H%M%Z'), '1320') + + def testPortugalDST(self): + # In 1996, Portugal changed from CET to WET + PORTUGAL = self.gettz('Portugal') + + t_cet = datetime(1996, 3, 31, 1, 59, tzinfo=PORTUGAL) + + self.assertEqual(t_cet.tzname(), 'CET') + self.assertEqual(t_cet.utcoffset(), timedelta(hours=1)) + self.assertEqual(t_cet.dst(), timedelta(0)) + + t_west = datetime(1996, 3, 31, 2, 1, tzinfo=PORTUGAL) + + self.assertEqual(t_west.tzname(), 'WEST') + self.assertEqual(t_west.utcoffset(), timedelta(hours=1)) + self.assertEqual(t_west.dst(), timedelta(hours=1)) + + def testGettzCacheTzFile(self): + NYC1 = tz.gettz('America/New_York') + NYC2 = tz.gettz('America/New_York') + + assert NYC1 is NYC2 + + def testGettzCacheTzLocal(self): + local1 = tz.gettz() + local2 = tz.gettz() + + assert local1 is not local2 + +@pytest.mark.gettz +@pytest.mark.xfail(IS_WIN, reason='zoneinfo separately cached') +def test_gettz_cache_clear(): + NYC1 = tz.gettz('America/New_York') + tz.gettz.cache_clear() + + NYC2 = tz.gettz('America/New_York') + + assert NYC1 is not NYC2 + + +class ZoneInfoGettzTest(GettzTest, WarningTestMixin): + def gettz(self, name): + zoneinfo_file = zoneinfo.get_zonefile_instance() + return zoneinfo_file.get(name) + + def testZoneInfoFileStart1(self): + tz = self.gettz("EST5EDT") + self.assertEqual(datetime(2003, 4, 6, 1, 59, tzinfo=tz).tzname(), "EST", + MISSING_TARBALL) + self.assertEqual(datetime(2003, 4, 6, 2, 00, tzinfo=tz).tzname(), "EDT") + + def testZoneInfoFileEnd1(self): + tzc = self.gettz("EST5EDT") + self.assertEqual(datetime(2003, 10, 26, 0, 59, tzinfo=tzc).tzname(), + "EDT", MISSING_TARBALL) + + end_est = tz.enfold(datetime(2003, 10, 26, 1, 00, tzinfo=tzc), fold=1) + self.assertEqual(end_est.tzname(), "EST") + + def testZoneInfoOffsetSignal(self): + utc = self.gettz("UTC") + nyc = self.gettz("America/New_York") + self.assertNotEqual(utc, None, MISSING_TARBALL) + self.assertNotEqual(nyc, None) + t0 = datetime(2007, 11, 4, 0, 30, tzinfo=nyc) + t1 = t0.astimezone(utc) + t2 = t1.astimezone(nyc) + self.assertEqual(t0, t2) + self.assertEqual(nyc.dst(t0), timedelta(hours=1)) + + def testZoneInfoCopy(self): + # copy.copy() called on a ZoneInfo file was returning the same instance + CHI = self.gettz('America/Chicago') + CHI_COPY = copy.copy(CHI) + + self.assertIsNot(CHI, CHI_COPY) + self.assertEqual(CHI, CHI_COPY) + + def testZoneInfoDeepCopy(self): + CHI = self.gettz('America/Chicago') + CHI_COPY = copy.deepcopy(CHI) + + self.assertIsNot(CHI, CHI_COPY) + self.assertEqual(CHI, CHI_COPY) + + def testZoneInfoInstanceCaching(self): + zif_0 = zoneinfo.get_zonefile_instance() + zif_1 = zoneinfo.get_zonefile_instance() + + self.assertIs(zif_0, zif_1) + + def testZoneInfoNewInstance(self): + zif_0 = zoneinfo.get_zonefile_instance() + zif_1 = zoneinfo.get_zonefile_instance(new_instance=True) + zif_2 = zoneinfo.get_zonefile_instance() + + self.assertIsNot(zif_0, zif_1) + self.assertIs(zif_1, zif_2) + + def testZoneInfoDeprecated(self): + with self.assertWarns(DeprecationWarning): + zoneinfo.gettz('US/Eastern') + + def testZoneInfoMetadataDeprecated(self): + with self.assertWarns(DeprecationWarning): + zoneinfo.gettz_db_metadata() + + +class TZRangeTest(unittest.TestCase, TzFoldMixin): + TZ_EST = tz.tzrange('EST', timedelta(hours=-5), + 'EDT', timedelta(hours=-4), + start=relativedelta(month=3, day=1, hour=2, + weekday=SU(+2)), + end=relativedelta(month=11, day=1, hour=1, + weekday=SU(+1))) + + TZ_AEST = tz.tzrange('AEST', timedelta(hours=10), + 'AEDT', timedelta(hours=11), + start=relativedelta(month=10, day=1, hour=2, + weekday=SU(+1)), + end=relativedelta(month=4, day=1, hour=2, + weekday=SU(+1))) + + TZ_LON = tz.tzrange('GMT', timedelta(hours=0), + 'BST', timedelta(hours=1), + start=relativedelta(month=3, day=31, weekday=SU(-1), + hours=2), + end=relativedelta(month=10, day=31, weekday=SU(-1), + hours=1)) + # POSIX string for UTC + UTC = 'UTC' + + def gettz(self, tzname): + tzname_map = {'Australia/Sydney': self.TZ_AEST, + 'America/Toronto': self.TZ_EST, + 'America/New_York': self.TZ_EST, + 'Europe/London': self.TZ_LON} + + return tzname_map[tzname] + + def testRangeCmp1(self): + self.assertEqual(tz.tzstr("EST5EDT"), + tz.tzrange("EST", -18000, "EDT", -14400, + relativedelta(hours=+2, + month=4, day=1, + weekday=SU(+1)), + relativedelta(hours=+1, + month=10, day=31, + weekday=SU(-1)))) + + def testRangeCmp2(self): + self.assertEqual(tz.tzstr("EST5EDT"), + tz.tzrange("EST", -18000, "EDT")) + + def testRangeOffsets(self): + TZR = tz.tzrange('EST', -18000, 'EDT', -14400, + start=relativedelta(hours=2, month=4, day=1, + weekday=SU(+2)), + end=relativedelta(hours=1, month=10, day=31, + weekday=SU(-1))) + + dt_std = datetime(2014, 4, 11, 12, 0, tzinfo=TZR) # STD + dt_dst = datetime(2016, 4, 11, 12, 0, tzinfo=TZR) # DST + + dst_zero = timedelta(0) + dst_hour = timedelta(hours=1) + + std_offset = timedelta(hours=-5) + dst_offset = timedelta(hours=-4) + + # Check dst() + self.assertEqual(dt_std.dst(), dst_zero) + self.assertEqual(dt_dst.dst(), dst_hour) + + # Check utcoffset() + self.assertEqual(dt_std.utcoffset(), std_offset) + self.assertEqual(dt_dst.utcoffset(), dst_offset) + + # Check tzname + self.assertEqual(dt_std.tzname(), 'EST') + self.assertEqual(dt_dst.tzname(), 'EDT') + + def testTimeOnlyRangeFixed(self): + # This is a fixed-offset zone, so tzrange allows this + tz_range = tz.tzrange('dflt', stdoffset=timedelta(hours=-3)) + self.assertEqual(dt_time(13, 20, tzinfo=tz_range).utcoffset(), + timedelta(hours=-3)) + + def testTimeOnlyRange(self): + # tzrange returns None because this zone has DST + tz_range = tz.tzrange('EST', timedelta(hours=-5), + 'EDT', timedelta(hours=-4)) + self.assertIs(dt_time(13, 20, tzinfo=tz_range).utcoffset(), None) + + def testBrokenIsDstHandling(self): + # tzrange._isdst() was using a date() rather than a datetime(). + # Issue reported by Lennart Regebro. + dt = datetime(2007, 8, 6, 4, 10, tzinfo=tz.tzutc()) + self.assertEqual(dt.astimezone(tz=tz.gettz("GMT+2")), + datetime(2007, 8, 6, 6, 10, tzinfo=tz.tzstr("GMT+2"))) + + def testRangeTimeDelta(self): + # Test that tzrange can be specified with a timedelta instead of an int. + EST5EDT_td = tz.tzrange('EST', timedelta(hours=-5), + 'EDT', timedelta(hours=-4)) + + EST5EDT_sec = tz.tzrange('EST', -18000, + 'EDT', -14400) + + self.assertEqual(EST5EDT_td, EST5EDT_sec) + + def testRangeEquality(self): + TZR1 = tz.tzrange('EST', -18000, 'EDT', -14400) + + # Standard abbreviation different + TZR2 = tz.tzrange('ET', -18000, 'EDT', -14400) + self.assertNotEqual(TZR1, TZR2) + + # DST abbreviation different + TZR3 = tz.tzrange('EST', -18000, 'EMT', -14400) + self.assertNotEqual(TZR1, TZR3) + + # STD offset different + TZR4 = tz.tzrange('EST', -14000, 'EDT', -14400) + self.assertNotEqual(TZR1, TZR4) + + # DST offset different + TZR5 = tz.tzrange('EST', -18000, 'EDT', -18000) + self.assertNotEqual(TZR1, TZR5) + + # Start delta different + TZR6 = tz.tzrange('EST', -18000, 'EDT', -14400, + start=relativedelta(hours=+1, month=3, + day=1, weekday=SU(+2))) + self.assertNotEqual(TZR1, TZR6) + + # End delta different + TZR7 = tz.tzrange('EST', -18000, 'EDT', -14400, + end=relativedelta(hours=+1, month=11, + day=1, weekday=SU(+2))) + self.assertNotEqual(TZR1, TZR7) + + def testRangeInequalityUnsupported(self): + TZR = tz.tzrange('EST', -18000, 'EDT', -14400) + + self.assertFalse(TZR == 4) + self.assertTrue(TZR == ComparesEqual) + self.assertFalse(TZR != ComparesEqual) + + +@pytest.mark.tzstr +class TZStrTest(unittest.TestCase, TzFoldMixin): + # POSIX string indicating change to summer time on the 2nd Sunday in March + # at 2AM, and ending the 1st Sunday in November at 2AM. (valid >= 2007) + TZ_EST = 'EST+5EDT,M3.2.0/2,M11.1.0/2' + + # POSIX string for AEST/AEDT (valid >= 2008) + TZ_AEST = 'AEST-10AEDT,M10.1.0/2,M4.1.0/3' + + # POSIX string for GMT/BST + TZ_LON = 'GMT0BST,M3.5.0,M10.5.0' + + def gettz(self, tzname): + # Actual time zone changes are handled by the _gettz_context function + tzname_map = {'Australia/Sydney': self.TZ_AEST, + 'America/Toronto': self.TZ_EST, + 'America/New_York': self.TZ_EST, + 'Europe/London': self.TZ_LON} + + return tz.tzstr(tzname_map[tzname]) + + def testStrStr(self): + # Test that tz.tzstr() won't throw an error if given a str instead + # of a unicode literal. + self.assertEqual(datetime(2003, 4, 6, 1, 59, + tzinfo=tz.tzstr(str("EST5EDT"))).tzname(), "EST") + self.assertEqual(datetime(2003, 4, 6, 2, 00, + tzinfo=tz.tzstr(str("EST5EDT"))).tzname(), "EDT") + + def testStrInequality(self): + TZS1 = tz.tzstr('EST5EDT4') + + # Standard abbreviation different + TZS2 = tz.tzstr('ET5EDT4') + self.assertNotEqual(TZS1, TZS2) + + # DST abbreviation different + TZS3 = tz.tzstr('EST5EMT') + self.assertNotEqual(TZS1, TZS3) + + # STD offset different + TZS4 = tz.tzstr('EST4EDT4') + self.assertNotEqual(TZS1, TZS4) + + # DST offset different + TZS5 = tz.tzstr('EST5EDT3') + self.assertNotEqual(TZS1, TZS5) + + def testStrInequalityStartEnd(self): + TZS1 = tz.tzstr('EST5EDT4') + + # Start delta different + TZS2 = tz.tzstr('EST5EDT4,M4.2.0/02:00:00,M10-5-0/02:00') + self.assertNotEqual(TZS1, TZS2) + + # End delta different + TZS3 = tz.tzstr('EST5EDT4,M4.2.0/02:00:00,M11-5-0/02:00') + self.assertNotEqual(TZS1, TZS3) + + def testPosixOffset(self): + TZ1 = tz.tzstr('UTC-3') + self.assertEqual(datetime(2015, 1, 1, tzinfo=TZ1).utcoffset(), + timedelta(hours=-3)) + + TZ2 = tz.tzstr('UTC-3', posix_offset=True) + self.assertEqual(datetime(2015, 1, 1, tzinfo=TZ2).utcoffset(), + timedelta(hours=+3)) + + def testStrInequalityUnsupported(self): + TZS = tz.tzstr('EST5EDT') + + self.assertFalse(TZS == 4) + self.assertTrue(TZS == ComparesEqual) + self.assertFalse(TZS != ComparesEqual) + + def testTzStrRepr(self): + TZS1 = tz.tzstr('EST5EDT4') + TZS2 = tz.tzstr('EST') + + self.assertEqual(repr(TZS1), "tzstr(" + repr('EST5EDT4') + ")") + self.assertEqual(repr(TZS2), "tzstr(" + repr('EST') + ")") + + def testTzStrFailure(self): + with self.assertRaises(ValueError): + tz.tzstr('InvalidString;439999') + + def testTzStrSingleton(self): + tz1 = tz.tzstr('EST5EDT') + tz2 = tz.tzstr('CST4CST') + tz3 = tz.tzstr('EST5EDT') + + self.assertIsNot(tz1, tz2) + self.assertIs(tz1, tz3) + + def testTzStrSingletonPosix(self): + tz_t1 = tz.tzstr('GMT+3', posix_offset=True) + tz_f1 = tz.tzstr('GMT+3', posix_offset=False) + + tz_t2 = tz.tzstr('GMT+3', posix_offset=True) + tz_f2 = tz.tzstr('GMT+3', posix_offset=False) + + self.assertIs(tz_t1, tz_t2) + self.assertIsNot(tz_t1, tz_f1) + + self.assertIs(tz_f1, tz_f2) + + def testTzStrInstance(self): + tz1 = tz.tzstr('EST5EDT') + tz2 = tz.tzstr.instance('EST5EDT') + tz3 = tz.tzstr.instance('EST5EDT') + + assert tz1 is not tz2 + assert tz2 is not tz3 + + # Ensure that these still are all the same zone + assert tz1 == tz2 == tz3 + +@pytest.mark.tzstr +@pytest.mark.parametrize('tz_str,expected', [ + # From https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + ('', tz.tzrange(None)), # TODO: Should change this so tz.tzrange('') works + ('EST+5EDT,M3.2.0/2,M11.1.0/12', + tz.tzrange('EST', -18000, 'EDT', -14400, + start=relativedelta(month=3, day=1, weekday=SU(2), hours=2), + end=relativedelta(month=11, day=1, weekday=SU(1), hours=11))), + ('WART4WARST,J1/0,J365/25', # This is DST all year, Western Argentina Summer Time + tz.tzrange('WART', timedelta(hours=-4), 'WARST', + start=relativedelta(month=1, day=1, hours=0), + end=relativedelta(month=12, day=31, days=1))), + ('IST-2IDT,M3.4.4/26,M10.5.0', # Israel Standard / Daylight Time + tz.tzrange('IST', timedelta(hours=2), 'IDT', + start=relativedelta(month=3, day=1, weekday=TH(4), days=1, hours=2), + end=relativedelta(month=10, day=31, weekday=SU(-1), hours=1))), + ('WGT3WGST,M3.5.0/2,M10.5.0/1', + tz.tzrange('WGT', timedelta(hours=-3), 'WGST', + start=relativedelta(month=3, day=31, weekday=SU(-1), hours=2), + end=relativedelta(month=10, day=31, weekday=SU(-1), hours=0))), + + # Different offset specifications + ('WGT0300WGST', + tz.tzrange('WGT', timedelta(hours=-3), 'WGST')), + ('WGT03:00WGST', + tz.tzrange('WGT', timedelta(hours=-3), 'WGST')), + ('AEST-1100AEDT', + tz.tzrange('AEST', timedelta(hours=11), 'AEDT')), + ('AEST-11:00AEDT', + tz.tzrange('AEST', timedelta(hours=11), 'AEDT')), + + # Different time formats + ('EST5EDT,M3.2.0/4:00,M11.1.0/3:00', + tz.tzrange('EST', timedelta(hours=-5), 'EDT', + start=relativedelta(month=3, day=1, weekday=SU(2), hours=4), + end=relativedelta(month=11, day=1, weekday=SU(1), hours=2))), + ('EST5EDT,M3.2.0/04:00,M11.1.0/03:00', + tz.tzrange('EST', timedelta(hours=-5), 'EDT', + start=relativedelta(month=3, day=1, weekday=SU(2), hours=4), + end=relativedelta(month=11, day=1, weekday=SU(1), hours=2))), + ('EST5EDT,M3.2.0/0400,M11.1.0/0300', + tz.tzrange('EST', timedelta(hours=-5), 'EDT', + start=relativedelta(month=3, day=1, weekday=SU(2), hours=4), + end=relativedelta(month=11, day=1, weekday=SU(1), hours=2))), +]) +def test_valid_GNU_tzstr(tz_str, expected): + tzi = tz.tzstr(tz_str) + + assert tzi == expected + + +@pytest.mark.tzstr +@pytest.mark.parametrize('tz_str, expected', [ + ('EST5EDT,5,4,0,7200,11,3,0,7200', + tz.tzrange('EST', timedelta(hours=-5), 'EDT', + start=relativedelta(month=5, day=1, weekday=SU(+4), hours=+2), + end=relativedelta(month=11, day=1, weekday=SU(+3), hours=+1))), + ('EST5EDT,5,-4,0,7200,11,3,0,7200', + tz.tzrange('EST', timedelta(hours=-5), 'EDT', + start=relativedelta(hours=+2, month=5, day=31, weekday=SU(-4)), + end=relativedelta(hours=+1, month=11, day=1, weekday=SU(+3)))), + ('EST5EDT,5,4,0,7200,11,-3,0,7200', + tz.tzrange('EST', timedelta(hours=-5), 'EDT', + start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)), + end=relativedelta(hours=+1, month=11, day=31, weekday=SU(-3)))), + ('EST5EDT,5,4,0,7200,11,-3,0,7200,3600', + tz.tzrange('EST', timedelta(hours=-5), 'EDT', + start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)), + end=relativedelta(hours=+1, month=11, day=31, weekday=SU(-3)))), + ('EST5EDT,5,4,0,7200,11,-3,0,7200,3600', + tz.tzrange('EST', timedelta(hours=-5), 'EDT', + start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)), + end=relativedelta(hours=+1, month=11, day=31, weekday=SU(-3)))), + ('EST5EDT,5,4,0,7200,11,-3,0,7200,-3600', + tz.tzrange('EST', timedelta(hours=-5), 'EDT', timedelta(hours=-6), + start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)), + end=relativedelta(hours=+3, month=11, day=31, weekday=SU(-3)))), + ('EST5EDT,5,4,0,7200,11,-3,0,7200,+7200', + tz.tzrange('EST', timedelta(hours=-5), 'EDT', timedelta(hours=-3), + start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)), + end=relativedelta(hours=0, month=11, day=31, weekday=SU(-3)))), + ('EST5EDT,5,4,0,7200,11,-3,0,7200,+3600', + tz.tzrange('EST', timedelta(hours=-5), 'EDT', + start=relativedelta(hours=+2, month=5, day=1, weekday=SU(+4)), + end=relativedelta(hours=+1, month=11, day=31, weekday=SU(-3)))), +]) +def test_valid_dateutil_format(tz_str, expected): + # This tests the dateutil-specific format that is used widely in the tests + # and examples. It is unclear where this format originated from. + with pytest.warns(tz.DeprecatedTzFormatWarning): + tzi = tz.tzstr.instance(tz_str) + + assert tzi == expected + + +@pytest.mark.tzstr +@pytest.mark.parametrize('tz_str', [ + 'hdfiughdfuig,dfughdfuigpu87ñ::', + ',dfughdfuigpu87ñ::', + '-1:WART4WARST,J1,J365/25', + 'WART4WARST,J1,J365/-25', + 'IST-2IDT,M3.4.-1/26,M10.5.0', + 'IST-2IDT,M3,2000,1/26,M10,5,0' +]) +def test_invalid_GNU_tzstr(tz_str): + with pytest.raises(ValueError): + tz.tzstr(tz_str) + + +# Different representations of the same default rule set +DEFAULT_TZSTR_RULES_EQUIV_2003 = [ + 'EST5EDT', + 'EST5EDT4,M4.1.0/02:00:00,M10-5-0/02:00', + 'EST5EDT4,95/02:00:00,298/02:00', + 'EST5EDT4,J96/02:00:00,J299/02:00', + 'EST5EDT4,J96/02:00:00,J299/02' +] + + +@pytest.mark.tzstr +@pytest.mark.parametrize('tz_str', DEFAULT_TZSTR_RULES_EQUIV_2003) +def test_tzstr_default_start(tz_str): + tzi = tz.tzstr(tz_str) + dt_std = datetime(2003, 4, 6, 1, 59, tzinfo=tzi) + dt_dst = datetime(2003, 4, 6, 2, 00, tzinfo=tzi) + + assert get_timezone_tuple(dt_std) == EST_TUPLE + assert get_timezone_tuple(dt_dst) == EDT_TUPLE + + +@pytest.mark.tzstr +@pytest.mark.parametrize('tz_str', DEFAULT_TZSTR_RULES_EQUIV_2003) +def test_tzstr_default_end(tz_str): + tzi = tz.tzstr(tz_str) + dt_dst = datetime(2003, 10, 26, 0, 59, tzinfo=tzi) + dt_dst_ambig = datetime(2003, 10, 26, 1, 00, tzinfo=tzi) + dt_std_ambig = tz.enfold(dt_dst_ambig, fold=1) + dt_std = datetime(2003, 10, 26, 2, 00, tzinfo=tzi) + + assert get_timezone_tuple(dt_dst) == EDT_TUPLE + assert get_timezone_tuple(dt_dst_ambig) == EDT_TUPLE + assert get_timezone_tuple(dt_std_ambig) == EST_TUPLE + assert get_timezone_tuple(dt_std) == EST_TUPLE + + +@pytest.mark.tzstr +@pytest.mark.parametrize('tzstr_1', ['EST5EDT', + 'EST5EDT4,M4.1.0/02:00:00,M10-5-0/02:00']) +@pytest.mark.parametrize('tzstr_2', ['EST5EDT', + 'EST5EDT4,M4.1.0/02:00:00,M10-5-0/02:00']) +def test_tzstr_default_cmp(tzstr_1, tzstr_2): + tz1 = tz.tzstr(tzstr_1) + tz2 = tz.tzstr(tzstr_2) + + assert tz1 == tz2 + +class TZICalTest(unittest.TestCase, TzFoldMixin): + def _gettz_str_tuple(self, tzname): + TZ_EST = ( + 'BEGIN:VTIMEZONE', + 'TZID:US-Eastern', + 'BEGIN:STANDARD', + 'DTSTART:19971029T020000', + 'RRULE:FREQ=YEARLY;BYDAY=+1SU;BYMONTH=11', + 'TZOFFSETFROM:-0400', + 'TZOFFSETTO:-0500', + 'TZNAME:EST', + 'END:STANDARD', + 'BEGIN:DAYLIGHT', + 'DTSTART:19980301T020000', + 'RRULE:FREQ=YEARLY;BYDAY=+2SU;BYMONTH=03', + 'TZOFFSETFROM:-0500', + 'TZOFFSETTO:-0400', + 'TZNAME:EDT', + 'END:DAYLIGHT', + 'END:VTIMEZONE' + ) + + TZ_PST = ( + 'BEGIN:VTIMEZONE', + 'TZID:US-Pacific', + 'BEGIN:STANDARD', + 'DTSTART:19971029T020000', + 'RRULE:FREQ=YEARLY;BYDAY=+1SU;BYMONTH=11', + 'TZOFFSETFROM:-0700', + 'TZOFFSETTO:-0800', + 'TZNAME:PST', + 'END:STANDARD', + 'BEGIN:DAYLIGHT', + 'DTSTART:19980301T020000', + 'RRULE:FREQ=YEARLY;BYDAY=+2SU;BYMONTH=03', + 'TZOFFSETFROM:-0800', + 'TZOFFSETTO:-0700', + 'TZNAME:PDT', + 'END:DAYLIGHT', + 'END:VTIMEZONE' + ) + + TZ_AEST = ( + 'BEGIN:VTIMEZONE', + 'TZID:Australia-Sydney', + 'BEGIN:STANDARD', + 'DTSTART:19980301T030000', + 'RRULE:FREQ=YEARLY;BYDAY=+1SU;BYMONTH=04', + 'TZOFFSETFROM:+1100', + 'TZOFFSETTO:+1000', + 'TZNAME:AEST', + 'END:STANDARD', + 'BEGIN:DAYLIGHT', + 'DTSTART:19971029T020000', + 'RRULE:FREQ=YEARLY;BYDAY=+1SU;BYMONTH=10', + 'TZOFFSETFROM:+1000', + 'TZOFFSETTO:+1100', + 'TZNAME:AEDT', + 'END:DAYLIGHT', + 'END:VTIMEZONE' + ) + + TZ_LON = ( + 'BEGIN:VTIMEZONE', + 'TZID:Europe-London', + 'BEGIN:STANDARD', + 'DTSTART:19810301T030000', + 'RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10;BYHOUR=02', + 'TZOFFSETFROM:+0100', + 'TZOFFSETTO:+0000', + 'TZNAME:GMT', + 'END:STANDARD', + 'BEGIN:DAYLIGHT', + 'DTSTART:19961001T030000', + 'RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=03;BYHOUR=01', + 'TZOFFSETFROM:+0000', + 'TZOFFSETTO:+0100', + 'TZNAME:BST', + 'END:DAYLIGHT', + 'END:VTIMEZONE' + ) + + tzname_map = {'Australia/Sydney': TZ_AEST, + 'America/Toronto': TZ_EST, + 'America/New_York': TZ_EST, + 'America/Los_Angeles': TZ_PST, + 'Europe/London': TZ_LON} + + return tzname_map[tzname] + + def _gettz_str(self, tzname): + return '\n'.join(self._gettz_str_tuple(tzname)) + + def _tzstr_dtstart_with_params(self, tzname, param_str): + # Adds parameters to the DTSTART values of a given tzstr + tz_str_tuple = self._gettz_str_tuple(tzname) + + out_tz = [] + for line in tz_str_tuple: + if line.startswith('DTSTART'): + name, value = line.split(':', 1) + line = name + ';' + param_str + ':' + value + + out_tz.append(line) + + return '\n'.join(out_tz) + + def gettz(self, tzname): + tz_str = self._gettz_str(tzname) + + tzc = tz.tzical(StringIO(tz_str)).get() + + return tzc + + def testRepr(self): + instr = StringIO(TZICAL_PST8PDT) + instr.name = 'StringIO(PST8PDT)' + tzc = tz.tzical(instr) + + self.assertEqual(repr(tzc), "tzical(" + repr(instr.name) + ")") + + # Test performance + def _test_us_zone(self, tzc, func, values, start): + if start: + dt1 = datetime(2003, 3, 9, 1, 59) + dt2 = datetime(2003, 3, 9, 2, 00) + fold = [0, 0] + else: + dt1 = datetime(2003, 11, 2, 0, 59) + dt2 = datetime(2003, 11, 2, 1, 00) + fold = [0, 1] + + dts = (tz.enfold(dt.replace(tzinfo=tzc), fold=f) + for dt, f in zip((dt1, dt2), fold)) + + for value, dt in zip(values, dts): + self.assertEqual(func(dt), value) + + def _test_multi_zones(self, tzstrs, tzids, func, values, start): + tzic = tz.tzical(StringIO('\n'.join(tzstrs))) + for tzid, vals in zip(tzids, values): + tzc = tzic.get(tzid) + + self._test_us_zone(tzc, func, vals, start) + + def _prepare_EST(self): + tz_str = self._gettz_str('America/New_York') + return tz.tzical(StringIO(tz_str)).get() + + def _testEST(self, start, test_type, tzc=None): + if tzc is None: + tzc = self._prepare_EST() + + argdict = { + 'name': (datetime.tzname, ('EST', 'EDT')), + 'offset': (datetime.utcoffset, (timedelta(hours=-5), + timedelta(hours=-4))), + 'dst': (datetime.dst, (timedelta(hours=0), + timedelta(hours=1))) + } + + func, values = argdict[test_type] + + if not start: + values = reversed(values) + + self._test_us_zone(tzc, func, values, start=start) + + def testESTStartName(self): + self._testEST(start=True, test_type='name') + + def testESTEndName(self): + self._testEST(start=False, test_type='name') + + def testESTStartOffset(self): + self._testEST(start=True, test_type='offset') + + def testESTEndOffset(self): + self._testEST(start=False, test_type='offset') + + def testESTStartDST(self): + self._testEST(start=True, test_type='dst') + + def testESTEndDST(self): + self._testEST(start=False, test_type='dst') + + def testESTValueDatetime(self): + # Violating one-test-per-test rule because we're not set up to do + # parameterized tests and the manual proliferation is getting a bit + # out of hand. + tz_str = self._tzstr_dtstart_with_params('America/New_York', + 'VALUE=DATE-TIME') + + tzc = tz.tzical(StringIO(tz_str)).get() + + for start in (True, False): + for test_type in ('name', 'offset', 'dst'): + self._testEST(start=start, test_type=test_type, tzc=tzc) + + def _testMultizone(self, start, test_type): + tzstrs = (self._gettz_str('America/New_York'), + self._gettz_str('America/Los_Angeles')) + tzids = ('US-Eastern', 'US-Pacific') + + argdict = { + 'name': (datetime.tzname, (('EST', 'EDT'), + ('PST', 'PDT'))), + 'offset': (datetime.utcoffset, ((timedelta(hours=-5), + timedelta(hours=-4)), + (timedelta(hours=-8), + timedelta(hours=-7)))), + 'dst': (datetime.dst, ((timedelta(hours=0), + timedelta(hours=1)), + (timedelta(hours=0), + timedelta(hours=1)))) + } + + func, values = argdict[test_type] + + if not start: + values = map(reversed, values) + + self._test_multi_zones(tzstrs, tzids, func, values, start) + + def testMultiZoneStartName(self): + self._testMultizone(start=True, test_type='name') + + def testMultiZoneEndName(self): + self._testMultizone(start=False, test_type='name') + + def testMultiZoneStartOffset(self): + self._testMultizone(start=True, test_type='offset') + + def testMultiZoneEndOffset(self): + self._testMultizone(start=False, test_type='offset') + + def testMultiZoneStartDST(self): + self._testMultizone(start=True, test_type='dst') + + def testMultiZoneEndDST(self): + self._testMultizone(start=False, test_type='dst') + + def testMultiZoneKeys(self): + est_str = self._gettz_str('America/New_York') + pst_str = self._gettz_str('America/Los_Angeles') + tzic = tz.tzical(StringIO('\n'.join((est_str, pst_str)))) + + # Sort keys because they are in a random order, being dictionary keys + keys = sorted(tzic.keys()) + + self.assertEqual(keys, ['US-Eastern', 'US-Pacific']) + + # Test error conditions + def testEmptyString(self): + with self.assertRaises(ValueError): + tz.tzical(StringIO("")) + + def testMultiZoneGet(self): + tzic = tz.tzical(StringIO(TZICAL_EST5EDT + TZICAL_PST8PDT)) + + with self.assertRaises(ValueError): + tzic.get() + + def testDtstartDate(self): + tz_str = self._tzstr_dtstart_with_params('America/New_York', + 'VALUE=DATE') + with self.assertRaises(ValueError): + tz.tzical(StringIO(tz_str)) + + def testDtstartTzid(self): + tz_str = self._tzstr_dtstart_with_params('America/New_York', + 'TZID=UTC') + with self.assertRaises(ValueError): + tz.tzical(StringIO(tz_str)) + + def testDtstartBadParam(self): + tz_str = self._tzstr_dtstart_with_params('America/New_York', + 'FOO=BAR') + with self.assertRaises(ValueError): + tz.tzical(StringIO(tz_str)) + + # Test Parsing + def testGap(self): + tzic = tz.tzical(StringIO('\n'.join((TZICAL_EST5EDT, TZICAL_PST8PDT)))) + + keys = sorted(tzic.keys()) + self.assertEqual(keys, ['US-Eastern', 'US-Pacific']) + + +class TZTest(unittest.TestCase): + def testFileStart1(self): + tzc = tz.tzfile(BytesIO(base64.b64decode(TZFILE_EST5EDT))) + self.assertEqual(datetime(2003, 4, 6, 1, 59, tzinfo=tzc).tzname(), "EST") + self.assertEqual(datetime(2003, 4, 6, 2, 00, tzinfo=tzc).tzname(), "EDT") + + def testFileEnd1(self): + tzc = tz.tzfile(BytesIO(base64.b64decode(TZFILE_EST5EDT))) + self.assertEqual(datetime(2003, 10, 26, 0, 59, tzinfo=tzc).tzname(), + "EDT") + end_est = tz.enfold(datetime(2003, 10, 26, 1, 00, tzinfo=tzc)) + self.assertEqual(end_est.tzname(), "EST") + + def testFileLastTransition(self): + # After the last transition, it goes to standard time in perpetuity + tzc = tz.tzfile(BytesIO(base64.b64decode(TZFILE_EST5EDT))) + self.assertEqual(datetime(2037, 10, 25, 0, 59, tzinfo=tzc).tzname(), + "EDT") + + last_date = tz.enfold(datetime(2037, 10, 25, 1, 00, tzinfo=tzc), fold=1) + self.assertEqual(last_date.tzname(), + "EST") + + self.assertEqual(datetime(2038, 5, 25, 12, 0, tzinfo=tzc).tzname(), + "EST") + + def testInvalidFile(self): + # Should throw a ValueError if an invalid file is passed + with self.assertRaises(ValueError): + tz.tzfile(BytesIO(b'BadFile')) + + def testFilestreamWithNameRepr(self): + # If fileobj is a filestream with a "name" attribute this name should + # be reflected in the tz object's repr + fileobj = BytesIO(base64.b64decode(TZFILE_EST5EDT)) + fileobj.name = 'foo' + tzc = tz.tzfile(fileobj) + self.assertEqual(repr(tzc), 'tzfile(' + repr('foo') + ')') + + def testRoundNonFullMinutes(self): + # This timezone has an offset of 5992 seconds in 1900-01-01. + tzc = tz.tzfile(BytesIO(base64.b64decode(EUROPE_HELSINKI))) + self.assertEqual(str(datetime(1900, 1, 1, 0, 0, tzinfo=tzc)), + "1900-01-01 00:00:00+01:40") + + def testLeapCountDecodesProperly(self): + # This timezone has leapcnt, and failed to decode until + # Eugene Oden notified about the issue. + + # As leap information is currently unused (and unstored) by tzfile() we + # can only indirectly test this: Take advantage of tzfile() not closing + # the input file if handed in as an opened file and assert that the + # full file content has been read by tzfile(). Note: For this test to + # work NEW_YORK must be in TZif version 1 format i.e. no more data + # after TZif v1 header + data has been read + fileobj = BytesIO(base64.b64decode(NEW_YORK)) + tz.tzfile(fileobj) + # we expect no remaining file content now, i.e. zero-length; if there's + # still data we haven't read the file format correctly + remaining_tzfile_content = fileobj.read() + self.assertEqual(len(remaining_tzfile_content), 0) + + def testIsStd(self): + # NEW_YORK tzfile contains this isstd information: + isstd_expected = (0, 0, 0, 1) + tzc = tz.tzfile(BytesIO(base64.b64decode(NEW_YORK))) + # gather the actual information as parsed by the tzfile class + isstd = [] + for ttinfo in tzc._ttinfo_list: + # ttinfo objects contain boolean values + isstd.append(int(ttinfo.isstd)) + # ttinfo list may contain more entries than isstd file content + isstd = tuple(isstd[:len(isstd_expected)]) + self.assertEqual( + isstd_expected, isstd, + "isstd UTC/local indicators parsed: %s != tzfile contents: %s" + % (isstd, isstd_expected)) + + def testGMTHasNoDaylight(self): + # tz.tzstr("GMT+2") improperly considered daylight saving time. + # Issue reported by Lennart Regebro. + dt = datetime(2007, 8, 6, 4, 10) + self.assertEqual(tz.gettz("GMT+2").dst(dt), timedelta(0)) + + def testGMTOffset(self): + # GMT and UTC offsets have inverted signal when compared to the + # usual TZ variable handling. + dt = datetime(2007, 8, 6, 4, 10, tzinfo=tz.tzutc()) + self.assertEqual(dt.astimezone(tz=tz.tzstr("GMT+2")), + datetime(2007, 8, 6, 6, 10, tzinfo=tz.tzstr("GMT+2"))) + self.assertEqual(dt.astimezone(tz=tz.gettz("UTC-2")), + datetime(2007, 8, 6, 2, 10, tzinfo=tz.tzstr("UTC-2"))) + + @unittest.skipIf(IS_WIN, "requires Unix") + @unittest.skipUnless(TZEnvContext.tz_change_allowed(), + TZEnvContext.tz_change_disallowed_message()) + def testTZSetDoesntCorrupt(self): + # if we start in non-UTC then tzset UTC make sure parse doesn't get + # confused + with TZEnvContext('UTC'): + # this should parse to UTC timezone not the original timezone + dt = parse('2014-07-20T12:34:56+00:00') + self.assertEqual(str(dt), '2014-07-20 12:34:56+00:00') + + +@unittest.skipUnless(IS_WIN, "Requires Windows") +class TzWinTest(unittest.TestCase, TzWinFoldMixin): + def setUp(self): + self.tzclass = tzwin.tzwin + + def testTzResLoadName(self): + # This may not work right on non-US locales. + tzr = tzwin.tzres() + self.assertEqual(tzr.load_name(112), "Eastern Standard Time") + + def testTzResNameFromString(self): + tzr = tzwin.tzres() + self.assertEqual(tzr.name_from_string('@tzres.dll,-221'), + 'Alaskan Daylight Time') + + self.assertEqual(tzr.name_from_string('Samoa Daylight Time'), + 'Samoa Daylight Time') + + with self.assertRaises(ValueError): + tzr.name_from_string('@tzres.dll,100') + + def testIsdstZoneWithNoDaylightSaving(self): + tz = tzwin.tzwin("UTC") + dt = parse("2013-03-06 19:08:15") + self.assertFalse(tz._isdst(dt)) + + def testOffset(self): + tz = tzwin.tzwin("Cape Verde Standard Time") + self.assertEqual(tz.utcoffset(datetime(1995, 5, 21, 12, 9, 13)), + timedelta(-1, 82800)) + + def testTzwinName(self): + # https://github.com/dateutil/dateutil/issues/143 + tw = tz.tzwin('Eastern Standard Time') + + # Cover the transitions for at least two years. + ESTs = 'Eastern Standard Time' + EDTs = 'Eastern Daylight Time' + transition_dates = [(datetime(2015, 3, 8, 0, 59), ESTs), + (datetime(2015, 3, 8, 3, 1), EDTs), + (datetime(2015, 11, 1, 0, 59), EDTs), + (datetime(2015, 11, 1, 3, 1), ESTs), + (datetime(2016, 3, 13, 0, 59), ESTs), + (datetime(2016, 3, 13, 3, 1), EDTs), + (datetime(2016, 11, 6, 0, 59), EDTs), + (datetime(2016, 11, 6, 3, 1), ESTs)] + + for t_date, expected in transition_dates: + self.assertEqual(t_date.replace(tzinfo=tw).tzname(), expected) + + def testTzwinRepr(self): + tw = tz.tzwin('Yakutsk Standard Time') + self.assertEqual(repr(tw), 'tzwin(' + + repr('Yakutsk Standard Time') + ')') + + def testTzWinEquality(self): + # https://github.com/dateutil/dateutil/issues/151 + tzwin_names = ('Eastern Standard Time', + 'West Pacific Standard Time', + 'Yakutsk Standard Time', + 'Iran Standard Time', + 'UTC') + + for tzwin_name in tzwin_names: + # Get two different instances to compare + tw1 = tz.tzwin(tzwin_name) + tw2 = tz.tzwin(tzwin_name) + + self.assertEqual(tw1, tw2) + + def testTzWinInequality(self): + # https://github.com/dateutil/dateutil/issues/151 + # Note these last two currently differ only in their name. + tzwin_names = (('Eastern Standard Time', 'Yakutsk Standard Time'), + ('Greenwich Standard Time', 'GMT Standard Time'), + ('GMT Standard Time', 'UTC'), + ('E. South America Standard Time', + 'Argentina Standard Time')) + + for tzwn1, tzwn2 in tzwin_names: + # Get two different instances to compare + tw1 = tz.tzwin(tzwn1) + tw2 = tz.tzwin(tzwn2) + + self.assertNotEqual(tw1, tw2) + + def testTzWinEqualityInvalid(self): + # Compare to objects that do not implement comparison with this + # (should default to False) + UTC = tz.tzutc() + EST = tz.tzwin('Eastern Standard Time') + + self.assertFalse(EST == UTC) + self.assertFalse(EST == 1) + self.assertFalse(UTC == EST) + + self.assertTrue(EST != UTC) + self.assertTrue(EST != 1) + + def testTzWinInequalityUnsupported(self): + # Compare it to an object that is promiscuous about equality, but for + # which tzwin does not implement an equality operator. + EST = tz.tzwin('Eastern Standard Time') + self.assertTrue(EST == ComparesEqual) + self.assertFalse(EST != ComparesEqual) + + def testTzwinTimeOnlyDST(self): + # For zones with DST, .dst() should return None + tw_est = tz.tzwin('Eastern Standard Time') + self.assertIs(dt_time(14, 10, tzinfo=tw_est).dst(), None) + + # This zone has no DST, so .dst() can return 0 + tw_sast = tz.tzwin('South Africa Standard Time') + self.assertEqual(dt_time(14, 10, tzinfo=tw_sast).dst(), + timedelta(0)) + + def testTzwinTimeOnlyUTCOffset(self): + # For zones with DST, .utcoffset() should return None + tw_est = tz.tzwin('Eastern Standard Time') + self.assertIs(dt_time(14, 10, tzinfo=tw_est).utcoffset(), None) + + # This zone has no DST, so .utcoffset() returns standard offset + tw_sast = tz.tzwin('South Africa Standard Time') + self.assertEqual(dt_time(14, 10, tzinfo=tw_sast).utcoffset(), + timedelta(hours=2)) + + def testTzwinTimeOnlyTZName(self): + # For zones with DST, the name defaults to standard time + tw_est = tz.tzwin('Eastern Standard Time') + self.assertEqual(dt_time(14, 10, tzinfo=tw_est).tzname(), + 'Eastern Standard Time') + + # For zones with no DST, this should work normally. + tw_sast = tz.tzwin('South Africa Standard Time') + self.assertEqual(dt_time(14, 10, tzinfo=tw_sast).tzname(), + 'South Africa Standard Time') + + +@unittest.skipUnless(IS_WIN, "Requires Windows") +@unittest.skipUnless(TZWinContext.tz_change_allowed(), + TZWinContext.tz_change_disallowed_message()) +class TzWinLocalTest(unittest.TestCase, TzWinFoldMixin): + + def setUp(self): + self.tzclass = tzwin.tzwinlocal + self.context = TZWinContext + + def get_args(self, tzname): + return () + + def testLocal(self): + # Not sure how to pin a local time zone, so for now we're just going + # to run this and make sure it doesn't raise an error + # See Github Issue #135: https://github.com/dateutil/dateutil/issues/135 + datetime.now(tzwin.tzwinlocal()) + + def testTzwinLocalUTCOffset(self): + with TZWinContext('Eastern Standard Time'): + tzwl = tzwin.tzwinlocal() + self.assertEqual(datetime(2014, 3, 11, tzinfo=tzwl).utcoffset(), + timedelta(hours=-4)) + + def testTzwinLocalName(self): + # https://github.com/dateutil/dateutil/issues/143 + ESTs = 'Eastern Standard Time' + EDTs = 'Eastern Daylight Time' + transition_dates = [(datetime(2015, 3, 8, 0, 59), ESTs), + (datetime(2015, 3, 8, 3, 1), EDTs), + (datetime(2015, 11, 1, 0, 59), EDTs), + (datetime(2015, 11, 1, 3, 1), ESTs), + (datetime(2016, 3, 13, 0, 59), ESTs), + (datetime(2016, 3, 13, 3, 1), EDTs), + (datetime(2016, 11, 6, 0, 59), EDTs), + (datetime(2016, 11, 6, 3, 1), ESTs)] + + with TZWinContext('Eastern Standard Time'): + tw = tz.tzwinlocal() + + for t_date, expected in transition_dates: + self.assertEqual(t_date.replace(tzinfo=tw).tzname(), expected) + + def testTzWinLocalRepr(self): + tw = tz.tzwinlocal() + self.assertEqual(repr(tw), 'tzwinlocal()') + + def testTzwinLocalRepr(self): + # https://github.com/dateutil/dateutil/issues/143 + with TZWinContext('Eastern Standard Time'): + tw = tz.tzwinlocal() + + self.assertEqual(str(tw), 'tzwinlocal(' + + repr('Eastern Standard Time') + ')') + + with TZWinContext('Pacific Standard Time'): + tw = tz.tzwinlocal() + + self.assertEqual(str(tw), 'tzwinlocal(' + + repr('Pacific Standard Time') + ')') + + def testTzwinLocalEquality(self): + tw_est = tz.tzwin('Eastern Standard Time') + tw_pst = tz.tzwin('Pacific Standard Time') + + with TZWinContext('Eastern Standard Time'): + twl1 = tz.tzwinlocal() + twl2 = tz.tzwinlocal() + + self.assertEqual(twl1, twl2) + self.assertEqual(twl1, tw_est) + self.assertNotEqual(twl1, tw_pst) + + with TZWinContext('Pacific Standard Time'): + twl1 = tz.tzwinlocal() + twl2 = tz.tzwinlocal() + tw = tz.tzwin('Pacific Standard Time') + + self.assertEqual(twl1, twl2) + self.assertEqual(twl1, tw) + self.assertEqual(twl1, tw_pst) + self.assertNotEqual(twl1, tw_est) + + def testTzwinLocalTimeOnlyDST(self): + # For zones with DST, .dst() should return None + with TZWinContext('Eastern Standard Time'): + twl = tz.tzwinlocal() + self.assertIs(dt_time(14, 10, tzinfo=twl).dst(), None) + + # This zone has no DST, so .dst() can return 0 + with TZWinContext('South Africa Standard Time'): + twl = tz.tzwinlocal() + self.assertEqual(dt_time(14, 10, tzinfo=twl).dst(), timedelta(0)) + + def testTzwinLocalTimeOnlyUTCOffset(self): + # For zones with DST, .utcoffset() should return None + with TZWinContext('Eastern Standard Time'): + twl = tz.tzwinlocal() + self.assertIs(dt_time(14, 10, tzinfo=twl).utcoffset(), None) + + # This zone has no DST, so .utcoffset() returns standard offset + with TZWinContext('South Africa Standard Time'): + twl = tz.tzwinlocal() + self.assertEqual(dt_time(14, 10, tzinfo=twl).utcoffset(), + timedelta(hours=2)) + + def testTzwinLocalTimeOnlyTZName(self): + # For zones with DST, the name defaults to standard time + with TZWinContext('Eastern Standard Time'): + twl = tz.tzwinlocal() + self.assertEqual(dt_time(14, 10, tzinfo=twl).tzname(), + 'Eastern Standard Time') + + # For zones with no DST, this should work normally. + with TZWinContext('South Africa Standard Time'): + twl = tz.tzwinlocal() + self.assertEqual(dt_time(14, 10, tzinfo=twl).tzname(), + 'South Africa Standard Time') + + +class TzPickleTest(PicklableMixin, unittest.TestCase): + _asfile = False + + def setUp(self): + self.assertPicklable = partial(self.assertPicklable, + asfile=self._asfile) + + def testPickleTzUTC(self): + self.assertPicklable(tz.tzutc(), singleton=True) + + def testPickleTzOffsetZero(self): + self.assertPicklable(tz.tzoffset('UTC', 0), singleton=True) + + def testPickleTzOffsetPos(self): + self.assertPicklable(tz.tzoffset('UTC+1', 3600), singleton=True) + + def testPickleTzOffsetNeg(self): + self.assertPicklable(tz.tzoffset('UTC-1', -3600), singleton=True) + + @pytest.mark.tzlocal + def testPickleTzLocal(self): + self.assertPicklable(tz.tzlocal()) + + def testPickleTzFileEST5EDT(self): + tzc = tz.tzfile(BytesIO(base64.b64decode(TZFILE_EST5EDT))) + self.assertPicklable(tzc) + + def testPickleTzFileEurope_Helsinki(self): + tzc = tz.tzfile(BytesIO(base64.b64decode(EUROPE_HELSINKI))) + self.assertPicklable(tzc) + + def testPickleTzFileNew_York(self): + tzc = tz.tzfile(BytesIO(base64.b64decode(NEW_YORK))) + self.assertPicklable(tzc) + + @unittest.skip("Known failure") + def testPickleTzICal(self): + tzc = tz.tzical(StringIO(TZICAL_EST5EDT)).get() + self.assertPicklable(tzc) + + def testPickleTzGettz(self): + self.assertPicklable(tz.gettz('America/New_York')) + + def testPickleZoneFileGettz(self): + zoneinfo_file = zoneinfo.get_zonefile_instance() + tzi = zoneinfo_file.get('America/New_York') + self.assertIsNot(tzi, None) + self.assertPicklable(tzi) + + +class TzPickleFileTest(TzPickleTest): + """ Run all the TzPickleTest tests, using a temporary file """ + _asfile = True + + +class DatetimeAmbiguousTest(unittest.TestCase): + """ Test the datetime_exists / datetime_ambiguous functions """ + + def testNoTzSpecified(self): + with self.assertRaises(ValueError): + tz.datetime_ambiguous(datetime(2016, 4, 1, 2, 9)) + + def _get_no_support_tzinfo_class(self, dt_start, dt_end, dst_only=False): + # Generates a class of tzinfo with no support for is_ambiguous + # where dates between dt_start and dt_end are ambiguous. + + class FoldingTzInfo(tzinfo): + def utcoffset(self, dt): + if not dst_only: + dt_n = dt.replace(tzinfo=None) + + if dt_start <= dt_n < dt_end and getattr(dt_n, 'fold', 0): + return timedelta(hours=-1) + + return timedelta(hours=0) + + def dst(self, dt): + dt_n = dt.replace(tzinfo=None) + + if dt_start <= dt_n < dt_end and getattr(dt_n, 'fold', 0): + return timedelta(hours=1) + else: + return timedelta(0) + + return FoldingTzInfo + + def _get_no_support_tzinfo(self, dt_start, dt_end, dst_only=False): + return self._get_no_support_tzinfo_class(dt_start, dt_end, dst_only)() + + def testNoSupportAmbiguityFoldNaive(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_no_support_tzinfo(dt_start, dt_end) + + self.assertTrue(tz.datetime_ambiguous(datetime(2018, 9, 1, 1, 30), + tz=tzi)) + + def testNoSupportAmbiguityFoldAware(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_no_support_tzinfo(dt_start, dt_end) + + self.assertTrue(tz.datetime_ambiguous(datetime(2018, 9, 1, 1, 30, + tzinfo=tzi))) + + def testNoSupportAmbiguityUnambiguousNaive(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_no_support_tzinfo(dt_start, dt_end) + + self.assertFalse(tz.datetime_ambiguous(datetime(2018, 10, 1, 12, 30), + tz=tzi)) + + def testNoSupportAmbiguityUnambiguousAware(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_no_support_tzinfo(dt_start, dt_end) + + self.assertFalse(tz.datetime_ambiguous(datetime(2018, 10, 1, 12, 30, + tzinfo=tzi))) + + def testNoSupportAmbiguityFoldDSTOnly(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_no_support_tzinfo(dt_start, dt_end, dst_only=True) + + self.assertTrue(tz.datetime_ambiguous(datetime(2018, 9, 1, 1, 30), + tz=tzi)) + + def testNoSupportAmbiguityUnambiguousDSTOnly(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_no_support_tzinfo(dt_start, dt_end, dst_only=True) + + self.assertFalse(tz.datetime_ambiguous(datetime(2018, 10, 1, 12, 30), + tz=tzi)) + + def testSupportAmbiguityFoldNaive(self): + tzi = tz.gettz('US/Eastern') + + dt = datetime(2011, 11, 6, 1, 30) + + self.assertTrue(tz.datetime_ambiguous(dt, tz=tzi)) + + def testSupportAmbiguityFoldAware(self): + tzi = tz.gettz('US/Eastern') + + dt = datetime(2011, 11, 6, 1, 30, tzinfo=tzi) + + self.assertTrue(tz.datetime_ambiguous(dt)) + + def testSupportAmbiguityUnambiguousAware(self): + tzi = tz.gettz('US/Eastern') + + dt = datetime(2011, 11, 6, 4, 30) + + self.assertFalse(tz.datetime_ambiguous(dt, tz=tzi)) + + def testSupportAmbiguityUnambiguousNaive(self): + tzi = tz.gettz('US/Eastern') + + dt = datetime(2011, 11, 6, 4, 30, tzinfo=tzi) + + self.assertFalse(tz.datetime_ambiguous(dt)) + + def _get_ambig_error_tzinfo(self, dt_start, dt_end, dst_only=False): + cTzInfo = self._get_no_support_tzinfo_class(dt_start, dt_end, dst_only) + + # Takes the wrong number of arguments and raises an error anyway. + class FoldTzInfoRaises(cTzInfo): + def is_ambiguous(self, dt, other_arg): + raise NotImplementedError('This is not implemented') + + return FoldTzInfoRaises() + + def testIncompatibleAmbiguityFoldNaive(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_ambig_error_tzinfo(dt_start, dt_end) + + self.assertTrue(tz.datetime_ambiguous(datetime(2018, 9, 1, 1, 30), + tz=tzi)) + + def testIncompatibleAmbiguityFoldAware(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_ambig_error_tzinfo(dt_start, dt_end) + + self.assertTrue(tz.datetime_ambiguous(datetime(2018, 9, 1, 1, 30, + tzinfo=tzi))) + + def testIncompatibleAmbiguityUnambiguousNaive(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_ambig_error_tzinfo(dt_start, dt_end) + + self.assertFalse(tz.datetime_ambiguous(datetime(2018, 10, 1, 12, 30), + tz=tzi)) + + def testIncompatibleAmbiguityUnambiguousAware(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_ambig_error_tzinfo(dt_start, dt_end) + + self.assertFalse(tz.datetime_ambiguous(datetime(2018, 10, 1, 12, 30, + tzinfo=tzi))) + + def testIncompatibleAmbiguityFoldDSTOnly(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_ambig_error_tzinfo(dt_start, dt_end, dst_only=True) + + self.assertTrue(tz.datetime_ambiguous(datetime(2018, 9, 1, 1, 30), + tz=tzi)) + + def testIncompatibleAmbiguityUnambiguousDSTOnly(self): + dt_start = datetime(2018, 9, 1, 1, 0) + dt_end = datetime(2018, 9, 1, 2, 0) + + tzi = self._get_ambig_error_tzinfo(dt_start, dt_end, dst_only=True) + + self.assertFalse(tz.datetime_ambiguous(datetime(2018, 10, 1, 12, 30), + tz=tzi)) + + def testSpecifiedTzOverridesAttached(self): + # If a tz is specified, the datetime will be treated as naive. + + # This is not ambiguous in the local zone + dt = datetime(2011, 11, 6, 1, 30, tzinfo=tz.gettz('Australia/Sydney')) + + self.assertFalse(tz.datetime_ambiguous(dt)) + + tzi = tz.gettz('US/Eastern') + self.assertTrue(tz.datetime_ambiguous(dt, tz=tzi)) + + +class DatetimeExistsTest(unittest.TestCase): + def testNoTzSpecified(self): + with self.assertRaises(ValueError): + tz.datetime_exists(datetime(2016, 4, 1, 2, 9)) + + def testInGapNaive(self): + tzi = tz.gettz('Australia/Sydney') + + dt = datetime(2012, 10, 7, 2, 30) + + self.assertFalse(tz.datetime_exists(dt, tz=tzi)) + + def testInGapAware(self): + tzi = tz.gettz('Australia/Sydney') + + dt = datetime(2012, 10, 7, 2, 30, tzinfo=tzi) + + self.assertFalse(tz.datetime_exists(dt)) + + def testExistsNaive(self): + tzi = tz.gettz('Australia/Sydney') + + dt = datetime(2012, 10, 7, 10, 30) + + self.assertTrue(tz.datetime_exists(dt, tz=tzi)) + + def testExistsAware(self): + tzi = tz.gettz('Australia/Sydney') + + dt = datetime(2012, 10, 7, 10, 30, tzinfo=tzi) + + self.assertTrue(tz.datetime_exists(dt)) + + def testSpecifiedTzOverridesAttached(self): + EST = tz.gettz('US/Eastern') + AEST = tz.gettz('Australia/Sydney') + + dt = datetime(2012, 10, 7, 2, 30, tzinfo=EST) # This time exists + + self.assertFalse(tz.datetime_exists(dt, tz=AEST)) + + +class EnfoldTest(unittest.TestCase): + def testEnterFoldDefault(self): + dt = tz.enfold(datetime(2020, 1, 19, 3, 32)) + + self.assertEqual(dt.fold, 1) + + def testEnterFold(self): + dt = tz.enfold(datetime(2020, 1, 19, 3, 32), fold=1) + + self.assertEqual(dt.fold, 1) + + def testExitFold(self): + dt = tz.enfold(datetime(2020, 1, 19, 3, 32), fold=0) + + # Before Python 3.6, dt.fold won't exist if fold is 0. + self.assertEqual(getattr(dt, 'fold', 0), 0) + + +@pytest.mark.tz_resolve_imaginary +class ImaginaryDateTest(unittest.TestCase): + def testCanberraForward(self): + tzi = tz.gettz('Australia/Canberra') + dt = datetime(2018, 10, 7, 2, 30, tzinfo=tzi) + dt_act = tz.resolve_imaginary(dt) + dt_exp = datetime(2018, 10, 7, 3, 30, tzinfo=tzi) + self.assertEqual(dt_act, dt_exp) + + def testLondonForward(self): + tzi = tz.gettz('Europe/London') + dt = datetime(2018, 3, 25, 1, 30, tzinfo=tzi) + dt_act = tz.resolve_imaginary(dt) + dt_exp = datetime(2018, 3, 25, 2, 30, tzinfo=tzi) + self.assertEqual(dt_act, dt_exp) + + def testKeivForward(self): + tzi = tz.gettz('Europe/Kiev') + dt = datetime(2018, 3, 25, 3, 30, tzinfo=tzi) + dt_act = tz.resolve_imaginary(dt) + dt_exp = datetime(2018, 3, 25, 4, 30, tzinfo=tzi) + self.assertEqual(dt_act, dt_exp) + + +@pytest.mark.tz_resolve_imaginary +@pytest.mark.parametrize('dt', [ + datetime(2017, 11, 5, 1, 30, tzinfo=tz.gettz('America/New_York')), + datetime(2018, 10, 28, 1, 30, tzinfo=tz.gettz('Europe/London')), + datetime(2017, 4, 2, 2, 30, tzinfo=tz.gettz('Australia/Sydney')), +]) +def test_resolve_imaginary_ambiguous(dt): + assert tz.resolve_imaginary(dt) is dt + + dt_f = tz.enfold(dt) + assert dt is not dt_f + assert tz.resolve_imaginary(dt_f) is dt_f + + +@pytest.mark.tz_resolve_imaginary +@pytest.mark.parametrize('dt', [ + datetime(2017, 6, 2, 12, 30, tzinfo=tz.gettz('America/New_York')), + datetime(2018, 4, 2, 9, 30, tzinfo=tz.gettz('Europe/London')), + datetime(2017, 2, 2, 16, 30, tzinfo=tz.gettz('Australia/Sydney')), + datetime(2017, 12, 2, 12, 30, tzinfo=tz.gettz('America/New_York')), + datetime(2018, 12, 2, 9, 30, tzinfo=tz.gettz('Europe/London')), + datetime(2017, 6, 2, 16, 30, tzinfo=tz.gettz('Australia/Sydney')), + datetime(2025, 9, 25, 1, 17, tzinfo=tz.tzutc()), + datetime(2025, 9, 25, 1, 17, tzinfo=tz.tzoffset('EST', -18000)), + datetime(2019, 3, 4, tzinfo=None) +]) +def test_resolve_imaginary_existing(dt): + assert tz.resolve_imaginary(dt) is dt + + +def __get_kiritimati_resolve_imaginary_test(): + # In the 2018d release of the IANA database, the Kiritimati "imaginary day" + # data was corrected, so if the system zoneinfo is older than 2018d, the + # Kiritimati test will fail. + + tzi = tz.gettz('Pacific/Kiritimati') + new_version = False + if not tz.datetime_exists(datetime(1995, 1, 1, 12, 30), tzi): + zif = zoneinfo.get_zonefile_instance() + if zif.metadata is not None: + new_version = zif.metadata['tzversion'] >= '2018d' + + if new_version: + tzi = zif.get('Pacific/Kiritimati') + else: + new_version = True + + if new_version: + dates = (datetime(1994, 12, 31, 12, 30), datetime(1995, 1, 1, 12, 30)) + else: + dates = (datetime(1995, 1, 1, 12, 30), datetime(1995, 1, 2, 12, 30)) + + return (tzi, ) + dates + + +@pytest.mark.tz_resolve_imaginary +@pytest.mark.parametrize('tzi, dt, dt_exp', [ + (tz.gettz('Europe/London'), + datetime(2018, 3, 25, 1, 30), datetime(2018, 3, 25, 2, 30)), + (tz.gettz('America/New_York'), + datetime(2017, 3, 12, 2, 30), datetime(2017, 3, 12, 3, 30)), + (tz.gettz('Australia/Sydney'), + datetime(2014, 10, 5, 2, 0), datetime(2014, 10, 5, 3, 0)), + __get_kiritimati_resolve_imaginary_test(), +]) +def test_resolve_imaginary(tzi, dt, dt_exp): + dt = dt.replace(tzinfo=tzi) + dt_exp = dt_exp.replace(tzinfo=tzi) + + dt_r = tz.resolve_imaginary(dt) + assert dt_r == dt_exp + assert dt_r.tzname() == dt_exp.tzname() + assert dt_r.utcoffset() == dt_exp.utcoffset() + + +@pytest.mark.xfail +@pytest.mark.tz_resolve_imaginary +def test_resolve_imaginary_monrovia(): + # See GH #582 - When that is resolved, move this into test_resolve_imaginary + tzi = tz.gettz('Africa/Monrovia') + dt = datetime(1972, 1, 7, hour=0, minute=30, second=0, tzinfo=tzi) + dt_exp = datetime(1972, 1, 7, hour=1, minute=14, second=30, tzinfo=tzi) + + dt_r = tz.resolve_imaginary(dt) + assert dt_r == dt_exp + assert dt_r.tzname() == dt_exp.tzname() + assert dt_r.utcoffset() == dt_exp.utcoffset() diff --git a/libraries/dateutil/test/test_utils.py b/libraries/dateutil/test/test_utils.py new file mode 100644 index 00000000..fcdec1a5 --- /dev/null +++ b/libraries/dateutil/test/test_utils.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +from datetime import timedelta, datetime + +import unittest + +from dateutil import tz +from dateutil import utils +from dateutil.utils import within_delta + +from freezegun import freeze_time + +UTC = tz.tzutc() +NYC = tz.gettz("America/New_York") + + +class UtilsTest(unittest.TestCase): + @freeze_time(datetime(2014, 12, 15, 1, 21, 33, 4003)) + def testToday(self): + self.assertEqual(utils.today(), datetime(2014, 12, 15, 0, 0, 0)) + + @freeze_time(datetime(2014, 12, 15, 12), tz_offset=5) + def testTodayTzInfo(self): + self.assertEqual(utils.today(NYC), + datetime(2014, 12, 15, 0, 0, 0, tzinfo=NYC)) + + @freeze_time(datetime(2014, 12, 15, 23), tz_offset=5) + def testTodayTzInfoDifferentDay(self): + self.assertEqual(utils.today(UTC), + datetime(2014, 12, 16, 0, 0, 0, tzinfo=UTC)) + + def testDefaultTZInfoNaive(self): + dt = datetime(2014, 9, 14, 9, 30) + self.assertIs(utils.default_tzinfo(dt, NYC).tzinfo, + NYC) + + def testDefaultTZInfoAware(self): + dt = datetime(2014, 9, 14, 9, 30, tzinfo=UTC) + self.assertIs(utils.default_tzinfo(dt, NYC).tzinfo, + UTC) + + def testWithinDelta(self): + d1 = datetime(2016, 1, 1, 12, 14, 1, 9) + d2 = d1.replace(microsecond=15) + + self.assertTrue(within_delta(d1, d2, timedelta(seconds=1))) + self.assertFalse(within_delta(d1, d2, timedelta(microseconds=1))) + + def testWithinDeltaWithNegativeDelta(self): + d1 = datetime(2016, 1, 1) + d2 = datetime(2015, 12, 31) + + self.assertTrue(within_delta(d2, d1, timedelta(days=-1))) diff --git a/libraries/dateutil/tz/__init__.py b/libraries/dateutil/tz/__init__.py new file mode 100644 index 00000000..5a2d9cd6 --- /dev/null +++ b/libraries/dateutil/tz/__init__.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +from .tz import * +from .tz import __doc__ + +#: Convenience constant providing a :class:`tzutc()` instance +#: +#: .. versionadded:: 2.7.0 +UTC = tzutc() + +__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", + "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz", + "enfold", "datetime_ambiguous", "datetime_exists", + "resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"] + + +class DeprecatedTzFormatWarning(Warning): + """Warning raised when time zones are parsed from deprecated formats.""" diff --git a/libraries/dateutil/tz/_common.py b/libraries/dateutil/tz/_common.py new file mode 100644 index 00000000..ccabb7da --- /dev/null +++ b/libraries/dateutil/tz/_common.py @@ -0,0 +1,415 @@ +from six import PY3 + +from functools import wraps + +from datetime import datetime, timedelta, tzinfo + + +ZERO = timedelta(0) + +__all__ = ['tzname_in_python2', 'enfold'] + + +def tzname_in_python2(namefunc): + """Change unicode output into bytestrings in Python 2 + + tzname() API changed in Python 3. It used to return bytes, but was changed + to unicode strings + """ + def adjust_encoding(*args, **kwargs): + name = namefunc(*args, **kwargs) + if name is not None and not PY3: + name = name.encode() + + return name + + return adjust_encoding + + +# The following is adapted from Alexander Belopolsky's tz library +# https://github.com/abalkin/tz +if hasattr(datetime, 'fold'): + # This is the pre-python 3.6 fold situation + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + return dt.replace(fold=fold) + +else: + class _DatetimeWithFold(datetime): + """ + This is a class designed to provide a PEP 495-compliant interface for + Python versions before 3.6. It is used only for dates in a fold, so + the ``fold`` attribute is fixed at ``1``. + + .. versionadded:: 2.6.0 + """ + __slots__ = () + + def replace(self, *args, **kwargs): + """ + Return a datetime with the same attributes, except for those + attributes given new values by whichever keyword arguments are + specified. Note that tzinfo=None can be specified to create a naive + datetime from an aware datetime with no conversion of date and time + data. + + This is reimplemented in ``_DatetimeWithFold`` because pypy3 will + return a ``datetime.datetime`` even if ``fold`` is unchanged. + """ + argnames = ( + 'year', 'month', 'day', 'hour', 'minute', 'second', + 'microsecond', 'tzinfo' + ) + + for arg, argname in zip(args, argnames): + if argname in kwargs: + raise TypeError('Duplicate argument: {}'.format(argname)) + + kwargs[argname] = arg + + for argname in argnames: + if argname not in kwargs: + kwargs[argname] = getattr(self, argname) + + dt_class = self.__class__ if kwargs.get('fold', 1) else datetime + + return dt_class(**kwargs) + + @property + def fold(self): + return 1 + + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + if getattr(dt, 'fold', 0) == fold: + return dt + + args = dt.timetuple()[:6] + args += (dt.microsecond, dt.tzinfo) + + if fold: + return _DatetimeWithFold(*args) + else: + return datetime(*args) + + +def _validate_fromutc_inputs(f): + """ + The CPython version of ``fromutc`` checks that the input is a ``datetime`` + object and that ``self`` is attached as its ``tzinfo``. + """ + @wraps(f) + def fromutc(self, dt): + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + return f(self, dt) + + return fromutc + + +class _tzinfo(tzinfo): + """ + Base class for all ``dateutil`` ``tzinfo`` objects. + """ + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + + dt = dt.replace(tzinfo=self) + + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) + + return same_dt and not same_offset + + def _fold_status(self, dt_utc, dt_wall): + """ + Determine the fold status of a "wall" datetime, given a representation + of the same datetime as a (naive) UTC datetime. This is calculated based + on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all + datetimes, and that this offset is the actual number of hours separating + ``dt_utc`` and ``dt_wall``. + + :param dt_utc: + Representation of the datetime as UTC + + :param dt_wall: + Representation of the datetime as "wall time". This parameter must + either have a `fold` attribute or have a fold-naive + :class:`datetime.tzinfo` attached, otherwise the calculation may + fail. + """ + if self.is_ambiguous(dt_wall): + delta_wall = dt_wall - dt_utc + _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) + else: + _fold = 0 + + return _fold + + def _fold(self, dt): + return getattr(dt, 'fold', 0) + + def _fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurence, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + + # Re-implement the algorithm from Python's datetime.py + dtoff = dt.utcoffset() + if dtoff is None: + raise ValueError("fromutc() requires a non-None utcoffset() " + "result") + + # The original datetime.py code assumes that `dst()` defaults to + # zero during ambiguous times. PEP 495 inverts this presumption, so + # for pre-PEP 495 versions of python, we need to tweak the algorithm. + dtdst = dt.dst() + if dtdst is None: + raise ValueError("fromutc() requires a non-None dst() result") + delta = dtoff - dtdst + + dt += delta + # Set fold=1 so we can default to being in the fold for + # ambiguous dates. + dtdst = enfold(dt, fold=1).dst() + if dtdst is None: + raise ValueError("fromutc(): dt.dst gave inconsistent " + "results; cannot convert") + return dt + dtdst + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurance, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + dt_wall = self._fromutc(dt) + + # Calculate the fold status given the two datetimes. + _fold = self._fold_status(dt, dt_wall) + + # Set the default fold value for ambiguous dates + return enfold(dt_wall, fold=_fold) + + +class tzrangebase(_tzinfo): + """ + This is an abstract base class for time zones represented by an annual + transition into and out of DST. Child classes should implement the following + methods: + + * ``__init__(self, *args, **kwargs)`` + * ``transitions(self, year)`` - this is expected to return a tuple of + datetimes representing the DST on and off transitions in standard + time. + + A fully initialized ``tzrangebase`` subclass should also provide the + following attributes: + * ``hasdst``: Boolean whether or not the zone uses DST. + * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects + representing the respective UTC offsets. + * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short + abbreviations in DST and STD, respectively. + * ``_hasdst``: Whether or not the zone has DST. + + .. versionadded:: 2.6.0 + """ + def __init__(self): + raise NotImplementedError('tzrangebase is an abstract base class') + + def utcoffset(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_base_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + if self._isdst(dt): + return self._dst_abbr + else: + return self._std_abbr + + def fromutc(self, dt): + """ Given a datetime in UTC, return local time """ + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # Get transitions - if there are none, fixed offset + transitions = self.transitions(dt.year) + if transitions is None: + return dt + self.utcoffset(dt) + + # Get the transition times in UTC + dston, dstoff = transitions + + dston -= self._std_offset + dstoff -= self._std_offset + + utc_transitions = (dston, dstoff) + dt_utc = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt_utc, utc_transitions) + + if isdst: + dt_wall = dt + self._dst_offset + else: + dt_wall = dt + self._std_offset + + _fold = int(not isdst and self.is_ambiguous(dt_wall)) + + return enfold(dt_wall, fold=_fold) + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if not self.hasdst: + return False + + start, end = self.transitions(dt.year) + + dt = dt.replace(tzinfo=None) + return (end <= dt < end + self._dst_base_offset) + + def _isdst(self, dt): + if not self.hasdst: + return False + elif dt is None: + return None + + transitions = self.transitions(dt.year) + + if transitions is None: + return False + + dt = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt, transitions) + + # Handle ambiguous dates + if not isdst and self.is_ambiguous(dt): + return not self._fold(dt) + else: + return isdst + + def _naive_isdst(self, dt, transitions): + dston, dstoff = transitions + + dt = dt.replace(tzinfo=None) + + if dston < dstoff: + isdst = dston <= dt < dstoff + else: + isdst = not dstoff <= dt < dston + + return isdst + + @property + def _dst_base_offset(self): + return self._dst_offset - self._std_offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(...)" % self.__class__.__name__ + + __reduce__ = object.__reduce__ diff --git a/libraries/dateutil/tz/_factories.py b/libraries/dateutil/tz/_factories.py new file mode 100644 index 00000000..de2e0c1d --- /dev/null +++ b/libraries/dateutil/tz/_factories.py @@ -0,0 +1,49 @@ +from datetime import timedelta + + +class _TzSingleton(type): + def __init__(cls, *args, **kwargs): + cls.__instance = None + super(_TzSingleton, cls).__init__(*args, **kwargs) + + def __call__(cls): + if cls.__instance is None: + cls.__instance = super(_TzSingleton, cls).__call__() + return cls.__instance + +class _TzFactory(type): + def instance(cls, *args, **kwargs): + """Alternate constructor that returns a fresh instance""" + return type.__call__(cls, *args, **kwargs) + + +class _TzOffsetFactory(_TzFactory): + def __init__(cls, *args, **kwargs): + cls.__instances = {} + + def __call__(cls, name, offset): + if isinstance(offset, timedelta): + key = (name, offset.total_seconds()) + else: + key = (name, offset) + + instance = cls.__instances.get(key, None) + if instance is None: + instance = cls.__instances.setdefault(key, + cls.instance(name, offset)) + return instance + + +class _TzStrFactory(_TzFactory): + def __init__(cls, *args, **kwargs): + cls.__instances = {} + + def __call__(cls, s, posix_offset=False): + key = (s, posix_offset) + instance = cls.__instances.get(key, None) + + if instance is None: + instance = cls.__instances.setdefault(key, + cls.instance(s, posix_offset)) + return instance + diff --git a/libraries/dateutil/tz/tz.py b/libraries/dateutil/tz/tz.py new file mode 100644 index 00000000..6fcfce86 --- /dev/null +++ b/libraries/dateutil/tz/tz.py @@ -0,0 +1,1785 @@ +# -*- coding: utf-8 -*- +""" +This module offers timezone implementations subclassing the abstract +:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format +files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, +etc), TZ environment string (in all known formats), given ranges (with help +from relative deltas), local machine timezone, fixed offset timezone, and UTC +timezone. +""" +import datetime +import struct +import time +import sys +import os +import bisect + +import six +from six import string_types +from six.moves import _thread +from _common import tzname_in_python2, _tzinfo +from _common import tzrangebase, enfold +from _common import _validate_fromutc_inputs + +from _factories import _TzSingleton, _TzOffsetFactory +from _factories import _TzStrFactory +try: + from win import tzwin, tzwinlocal +except ImportError: + tzwin = tzwinlocal = None + +ZERO = datetime.timedelta(0) +EPOCH = datetime.datetime.utcfromtimestamp(0) +EPOCHORDINAL = EPOCH.toordinal() + + +@six.add_metaclass(_TzSingleton) +class tzutc(datetime.tzinfo): + """ + This is a tzinfo object that represents the UTC time zone. + + **Examples:** + + .. doctest:: + + >>> from datetime import * + >>> from dateutil.tz import * + + >>> datetime.now() + datetime.datetime(2003, 9, 27, 9, 40, 1, 521290) + + >>> datetime.now(tzutc()) + datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc()) + + >>> datetime.now(tzutc()).tzname() + 'UTC' + + .. versionchanged:: 2.7.0 + ``tzutc()`` is now a singleton, so the result of ``tzutc()`` will + always return the same object. + + .. doctest:: + + >>> from dateutil.tz import tzutc, UTC + >>> tzutc() is tzutc() + True + >>> tzutc() is UTC + True + """ + def utcoffset(self, dt): + return ZERO + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return "UTC" + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Fast track version of fromutc() returns the original ``dt`` object for + any valid :py:class:`datetime.datetime` object. + """ + return dt + + def __eq__(self, other): + if not isinstance(other, (tzutc, tzoffset)): + return NotImplemented + + return (isinstance(other, tzutc) or + (isinstance(other, tzoffset) and other._offset == ZERO)) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +@six.add_metaclass(_TzOffsetFactory) +class tzoffset(datetime.tzinfo): + """ + A simple class for representing a fixed offset from UTC. + + :param name: + The timezone name, to be returned when ``tzname()`` is called. + :param offset: + The time zone offset in seconds, or (since version 2.6.0, represented + as a :py:class:`datetime.timedelta` object). + """ + def __init__(self, name, offset): + self._name = name + + try: + # Allow a timedelta + offset = offset.total_seconds() + except (TypeError, AttributeError): + pass + self._offset = datetime.timedelta(seconds=offset) + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._name + + @_validate_fromutc_inputs + def fromutc(self, dt): + return dt + self._offset + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + def __eq__(self, other): + if not isinstance(other, tzoffset): + return NotImplemented + + return self._offset == other._offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, + repr(self._name), + int(self._offset.total_seconds())) + + __reduce__ = object.__reduce__ + + +class tzlocal(_tzinfo): + """ + A :class:`tzinfo` subclass built around the ``time`` timezone functions. + """ + def __init__(self): + super(tzlocal, self).__init__() + + self._std_offset = datetime.timedelta(seconds=-time.timezone) + if time.daylight: + self._dst_offset = datetime.timedelta(seconds=-time.altzone) + else: + self._dst_offset = self._std_offset + + self._dst_saved = self._dst_offset - self._std_offset + self._hasdst = bool(self._dst_saved) + self._tznames = tuple(time.tzname) + + def utcoffset(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset - self._std_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._tznames[self._isdst(dt)] + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + naive_dst = self._naive_is_dst(dt) + return (not naive_dst and + (naive_dst != self._naive_is_dst(dt - self._dst_saved))) + + def _naive_is_dst(self, dt): + timestamp = _datetime_to_timestamp(dt) + return time.localtime(timestamp + time.timezone).tm_isdst + + def _isdst(self, dt, fold_naive=True): + # We can't use mktime here. It is unstable when deciding if + # the hour near to a change is DST or not. + # + # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, + # dt.minute, dt.second, dt.weekday(), 0, -1)) + # return time.localtime(timestamp).tm_isdst + # + # The code above yields the following result: + # + # >>> import tz, datetime + # >>> t = tz.tzlocal() + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # + # Here is a more stable implementation: + # + if not self._hasdst: + return False + + # Check for ambiguous times: + dstval = self._naive_is_dst(dt) + fold = getattr(dt, 'fold', None) + + if self.is_ambiguous(dt): + if fold is not None: + return not self._fold(dt) + else: + return True + + return dstval + + def __eq__(self, other): + if isinstance(other, tzlocal): + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset) + elif isinstance(other, tzutc): + return (not self._hasdst and + self._tznames[0] in {'UTC', 'GMT'} and + self._std_offset == ZERO) + elif isinstance(other, tzoffset): + return (not self._hasdst and + self._tznames[0] == other._name and + self._std_offset == other._offset) + else: + return NotImplemented + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class _ttinfo(object): + __slots__ = ["offset", "delta", "isdst", "abbr", + "isstd", "isgmt", "dstoffset"] + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def __repr__(self): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + + def __eq__(self, other): + if not isinstance(other, _ttinfo): + return NotImplemented + + return (self.offset == other.offset and + self.delta == other.delta and + self.isdst == other.isdst and + self.abbr == other.abbr and + self.isstd == other.isstd and + self.isgmt == other.isgmt and + self.dstoffset == other.dstoffset) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __getstate__(self): + state = {} + for name in self.__slots__: + state[name] = getattr(self, name, None) + return state + + def __setstate__(self, state): + for name in self.__slots__: + if name in state: + setattr(self, name, state[name]) + + +class _tzfile(object): + """ + Lightweight class for holding the relevant transition and time zone + information read from binary tzfiles. + """ + attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list', + 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first'] + + def __init__(self, **kwargs): + for attr in self.attrs: + setattr(self, attr, kwargs.get(attr, None)) + + +class tzfile(_tzinfo): + """ + This is a ``tzinfo`` subclass thant allows one to use the ``tzfile(5)`` + format timezone files to extract current and historical zone information. + + :param fileobj: + This can be an opened file stream or a file name that the time zone + information can be read from. + + :param filename: + This is an optional parameter specifying the source of the time zone + information in the event that ``fileobj`` is a file object. If omitted + and ``fileobj`` is a file stream, this parameter will be set either to + ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``. + + See `Sources for Time Zone and Daylight Saving Time Data + <https://data.iana.org/time-zones/tz-link.html>`_ for more information. + Time zone files can be compiled from the `IANA Time Zone database files + <https://www.iana.org/time-zones>`_ with the `zic time zone compiler + <https://www.freebsd.org/cgi/man.cgi?query=zic&sektion=8>`_ + + .. note:: + + Only construct a ``tzfile`` directly if you have a specific timezone + file on disk that you want to read into a Python ``tzinfo`` object. + If you want to get a ``tzfile`` representing a specific IANA zone, + (e.g. ``'America/New_York'``), you should call + :func:`dateutil.tz.gettz` with the zone identifier. + + + **Examples:** + + Using the US Eastern time zone as an example, we can see that a ``tzfile`` + provides time zone information for the standard Daylight Saving offsets: + + .. testsetup:: tzfile + + from dateutil.tz import gettz + from datetime import datetime + + .. doctest:: tzfile + + >>> NYC = gettz('America/New_York') + >>> NYC + tzfile('/usr/share/zoneinfo/America/New_York') + + >>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST + 2016-01-03 00:00:00-05:00 + + >>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT + 2016-07-07 00:00:00-04:00 + + + The ``tzfile`` structure contains a fully history of the time zone, + so historical dates will also have the right offsets. For example, before + the adoption of the UTC standards, New York used local solar mean time: + + .. doctest:: tzfile + + >>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT + 1901-04-12 00:00:00-04:56 + + And during World War II, New York was on "Eastern War Time", which was a + state of permanent daylight saving time: + + .. doctest:: tzfile + + >>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT + 1944-02-07 00:00:00-04:00 + + """ + + def __init__(self, fileobj, filename=None): + super(tzfile, self).__init__() + + file_opened_here = False + if isinstance(fileobj, string_types): + self._filename = fileobj + fileobj = open(fileobj, 'rb') + file_opened_here = True + elif filename is not None: + self._filename = filename + elif hasattr(fileobj, "name"): + self._filename = fileobj.name + else: + self._filename = repr(fileobj) + + if fileobj is not None: + if not file_opened_here: + fileobj = _ContextWrapper(fileobj) + + with fileobj as file_stream: + tzobj = self._read_tzfile(file_stream) + + self._set_tzdata(tzobj) + + def _set_tzdata(self, tzobj): + """ Set the time zone data of this object from a _tzfile object """ + # Copy the relevant attributes over as private attributes + for attr in _tzfile.attrs: + setattr(self, '_' + attr, getattr(tzobj, attr)) + + def _read_tzfile(self, fileobj): + out = _tzfile() + + # From tzfile(5): + # + # The time zone information files used by tzset(3) + # begin with the magic characters "TZif" to identify + # them as time zone information files, followed by + # sixteen bytes reserved for future use, followed by + # six four-byte values of type long, written in a + # ``standard'' byte order (the high-order byte + # of the value is written first). + if fileobj.read(4).decode() != "TZif": + raise ValueError("magic not found") + + fileobj.read(16) + + ( + # The number of UTC/local indicators stored in the file. + ttisgmtcnt, + + # The number of standard/wall indicators stored in the file. + ttisstdcnt, + + # The number of leap seconds for which data is + # stored in the file. + leapcnt, + + # The number of "transition times" for which data + # is stored in the file. + timecnt, + + # The number of "local time types" for which data + # is stored in the file (must not be zero). + typecnt, + + # The number of characters of "time zone + # abbreviation strings" stored in the file. + charcnt, + + ) = struct.unpack(">6l", fileobj.read(24)) + + # The above header is followed by tzh_timecnt four-byte + # values of type long, sorted in ascending order. + # These values are written in ``standard'' byte order. + # Each is used as a transition time (as returned by + # time(2)) at which the rules for computing local time + # change. + + if timecnt: + out.trans_list_utc = list(struct.unpack(">%dl" % timecnt, + fileobj.read(timecnt*4))) + else: + out.trans_list_utc = [] + + # Next come tzh_timecnt one-byte values of type unsigned + # char; each one tells which of the different types of + # ``local time'' types described in the file is associated + # with the same-indexed transition time. These values + # serve as indices into an array of ttinfo structures that + # appears next in the file. + + if timecnt: + out.trans_idx = struct.unpack(">%dB" % timecnt, + fileobj.read(timecnt)) + else: + out.trans_idx = [] + + # Each ttinfo structure is written as a four-byte value + # for tt_gmtoff of type long, in a standard byte + # order, followed by a one-byte value for tt_isdst + # and a one-byte value for tt_abbrind. In each + # structure, tt_gmtoff gives the number of + # seconds to be added to UTC, tt_isdst tells whether + # tm_isdst should be set by localtime(3), and + # tt_abbrind serves as an index into the array of + # time zone abbreviation characters that follow the + # ttinfo structure(s) in the file. + + ttinfo = [] + + for i in range(typecnt): + ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) + + abbr = fileobj.read(charcnt).decode() + + # Then there are tzh_leapcnt pairs of four-byte + # values, written in standard byte order; the + # first value of each pair gives the time (as + # returned by time(2)) at which a leap second + # occurs; the second gives the total number of + # leap seconds to be applied after the given time. + # The pairs of values are sorted in ascending order + # by time. + + # Not used, for now (but seek for correct file position) + if leapcnt: + fileobj.seek(leapcnt * 8, os.SEEK_CUR) + + # Then there are tzh_ttisstdcnt standard/wall + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as standard + # time or wall clock time, and are used when + # a time zone file is used in handling POSIX-style + # time zone environment variables. + + if ttisstdcnt: + isstd = struct.unpack(">%db" % ttisstdcnt, + fileobj.read(ttisstdcnt)) + + # Finally, there are tzh_ttisgmtcnt UTC/local + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as UTC or + # local time, and are used when a time zone file + # is used in handling POSIX-style time zone envi- + # ronment variables. + + if ttisgmtcnt: + isgmt = struct.unpack(">%db" % ttisgmtcnt, + fileobj.read(ttisgmtcnt)) + + # Build ttinfo list + out.ttinfo_list = [] + for i in range(typecnt): + gmtoff, isdst, abbrind = ttinfo[i] + # Round to full-minutes if that's not the case. Python's + # datetime doesn't accept sub-minute timezones. Check + # http://python.org/sf/1447945 for some information. + gmtoff = 60 * ((gmtoff + 30) // 60) + tti = _ttinfo() + tti.offset = gmtoff + tti.dstoffset = datetime.timedelta(0) + tti.delta = datetime.timedelta(seconds=gmtoff) + tti.isdst = isdst + tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] + tti.isstd = (ttisstdcnt > i and isstd[i] != 0) + tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) + out.ttinfo_list.append(tti) + + # Replace ttinfo indexes for ttinfo objects. + out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx] + + # Set standard, dst, and before ttinfos. before will be + # used when a given time is before any transitions, + # and will be set to the first non-dst ttinfo, or to + # the first dst, if all of them are dst. + out.ttinfo_std = None + out.ttinfo_dst = None + out.ttinfo_before = None + if out.ttinfo_list: + if not out.trans_list_utc: + out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0] + else: + for i in range(timecnt-1, -1, -1): + tti = out.trans_idx[i] + if not out.ttinfo_std and not tti.isdst: + out.ttinfo_std = tti + elif not out.ttinfo_dst and tti.isdst: + out.ttinfo_dst = tti + + if out.ttinfo_std and out.ttinfo_dst: + break + else: + if out.ttinfo_dst and not out.ttinfo_std: + out.ttinfo_std = out.ttinfo_dst + + for tti in out.ttinfo_list: + if not tti.isdst: + out.ttinfo_before = tti + break + else: + out.ttinfo_before = out.ttinfo_list[0] + + # Now fix transition times to become relative to wall time. + # + # I'm not sure about this. In my tests, the tz source file + # is setup to wall time, and in the binary file isstd and + # isgmt are off, so it should be in wall time. OTOH, it's + # always in gmt time. Let me know if you have comments + # about this. + laststdoffset = None + out.trans_list = [] + for i, tti in enumerate(out.trans_idx): + if not tti.isdst: + offset = tti.offset + laststdoffset = offset + else: + if laststdoffset is not None: + # Store the DST offset as well and update it in the list + tti.dstoffset = tti.offset - laststdoffset + out.trans_idx[i] = tti + + offset = laststdoffset or 0 + + out.trans_list.append(out.trans_list_utc[i] + offset) + + # In case we missed any DST offsets on the way in for some reason, make + # a second pass over the list, looking for the /next/ DST offset. + laststdoffset = None + for i in reversed(range(len(out.trans_idx))): + tti = out.trans_idx[i] + if tti.isdst: + if not (tti.dstoffset or laststdoffset is None): + tti.dstoffset = tti.offset - laststdoffset + else: + laststdoffset = tti.offset + + if not isinstance(tti.dstoffset, datetime.timedelta): + tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset) + + out.trans_idx[i] = tti + + out.trans_idx = tuple(out.trans_idx) + out.trans_list = tuple(out.trans_list) + out.trans_list_utc = tuple(out.trans_list_utc) + + return out + + def _find_last_transition(self, dt, in_utc=False): + # If there's no list, there are no transitions to find + if not self._trans_list: + return None + + timestamp = _datetime_to_timestamp(dt) + + # Find where the timestamp fits in the transition list - if the + # timestamp is a transition time, it's part of the "after" period. + trans_list = self._trans_list_utc if in_utc else self._trans_list + idx = bisect.bisect_right(trans_list, timestamp) + + # We want to know when the previous transition was, so subtract off 1 + return idx - 1 + + def _get_ttinfo(self, idx): + # For no list or after the last transition, default to _ttinfo_std + if idx is None or (idx + 1) >= len(self._trans_list): + return self._ttinfo_std + + # If there is a list and the time is before it, return _ttinfo_before + if idx < 0: + return self._ttinfo_before + + return self._trans_idx[idx] + + def _find_ttinfo(self, dt): + idx = self._resolve_ambiguous_time(dt) + + return self._get_ttinfo(idx) + + def fromutc(self, dt): + """ + The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`. + + :param dt: + A :py:class:`datetime.datetime` object. + + :raises TypeError: + Raised if ``dt`` is not a :py:class:`datetime.datetime` object. + + :raises ValueError: + Raised if this is called with a ``dt`` which does not have this + ``tzinfo`` attached. + + :return: + Returns a :py:class:`datetime.datetime` object representing the + wall time in ``self``'s time zone. + """ + # These isinstance checks are in datetime.tzinfo, so we'll preserve + # them, even if we don't care about duck typing. + if not isinstance(dt, datetime.datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # First treat UTC as wall time and get the transition we're in. + idx = self._find_last_transition(dt, in_utc=True) + tti = self._get_ttinfo(idx) + + dt_out = dt + datetime.timedelta(seconds=tti.offset) + + fold = self.is_ambiguous(dt_out, idx=idx) + + return enfold(dt_out, fold=int(fold)) + + def is_ambiguous(self, dt, idx=None): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if idx is None: + idx = self._find_last_transition(dt) + + # Calculate the difference in offsets from current to previous + timestamp = _datetime_to_timestamp(dt) + tti = self._get_ttinfo(idx) + + if idx is None or idx <= 0: + return False + + od = self._get_ttinfo(idx - 1).offset - tti.offset + tt = self._trans_list[idx] # Transition time + + return timestamp < tt + od + + def _resolve_ambiguous_time(self, dt): + idx = self._find_last_transition(dt) + + # If we have no transitions, return the index + _fold = self._fold(dt) + if idx is None or idx == 0: + return idx + + # If it's ambiguous and we're in a fold, shift to a different index. + idx_offset = int(not _fold and self.is_ambiguous(dt, idx)) + + return idx - idx_offset + + def utcoffset(self, dt): + if dt is None: + return None + + if not self._ttinfo_std: + return ZERO + + return self._find_ttinfo(dt).delta + + def dst(self, dt): + if dt is None: + return None + + if not self._ttinfo_dst: + return ZERO + + tti = self._find_ttinfo(dt) + + if not tti.isdst: + return ZERO + + # The documentation says that utcoffset()-dst() must + # be constant for every dt. + return tti.dstoffset + + @tzname_in_python2 + def tzname(self, dt): + if not self._ttinfo_std or dt is None: + return None + return self._find_ttinfo(dt).abbr + + def __eq__(self, other): + if not isinstance(other, tzfile): + return NotImplemented + return (self._trans_list == other._trans_list and + self._trans_idx == other._trans_idx and + self._ttinfo_list == other._ttinfo_list) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) + + def __reduce__(self): + return self.__reduce_ex__(None) + + def __reduce_ex__(self, protocol): + return (self.__class__, (None, self._filename), self.__dict__) + + +class tzrange(tzrangebase): + """ + The ``tzrange`` object is a time zone specified by a set of offsets and + abbreviations, equivalent to the way the ``TZ`` variable can be specified + in POSIX-like systems, but using Python delta objects to specify DST + start, end and offsets. + + :param stdabbr: + The abbreviation for standard time (e.g. ``'EST'``). + + :param stdoffset: + An integer or :class:`datetime.timedelta` object or equivalent + specifying the base offset from UTC. + + If unspecified, +00:00 is used. + + :param dstabbr: + The abbreviation for DST / "Summer" time (e.g. ``'EDT'``). + + If specified, with no other DST information, DST is assumed to occur + and the default behavior or ``dstoffset``, ``start`` and ``end`` is + used. If unspecified and no other DST information is specified, it + is assumed that this zone has no DST. + + If this is unspecified and other DST information is *is* specified, + DST occurs in the zone but the time zone abbreviation is left + unchanged. + + :param dstoffset: + A an integer or :class:`datetime.timedelta` object or equivalent + specifying the UTC offset during DST. If unspecified and any other DST + information is specified, it is assumed to be the STD offset +1 hour. + + :param start: + A :class:`relativedelta.relativedelta` object or equivalent specifying + the time and time of year that daylight savings time starts. To + specify, for example, that DST starts at 2AM on the 2nd Sunday in + March, pass: + + ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))`` + + If unspecified and any other DST information is specified, the default + value is 2 AM on the first Sunday in April. + + :param end: + A :class:`relativedelta.relativedelta` object or equivalent + representing the time and time of year that daylight savings time + ends, with the same specification method as in ``start``. One note is + that this should point to the first time in the *standard* zone, so if + a transition occurs at 2AM in the DST zone and the clocks are set back + 1 hour to 1AM, set the ``hours`` parameter to +1. + + + **Examples:** + + .. testsetup:: tzrange + + from dateutil.tz import tzrange, tzstr + + .. doctest:: tzrange + + >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") + True + + >>> from dateutil.relativedelta import * + >>> range1 = tzrange("EST", -18000, "EDT") + >>> range2 = tzrange("EST", -18000, "EDT", -14400, + ... relativedelta(hours=+2, month=4, day=1, + ... weekday=SU(+1)), + ... relativedelta(hours=+1, month=10, day=31, + ... weekday=SU(-1))) + >>> tzstr('EST5EDT') == range1 == range2 + True + + """ + def __init__(self, stdabbr, stdoffset=None, + dstabbr=None, dstoffset=None, + start=None, end=None): + + global relativedelta + from dateutil import relativedelta + + self._std_abbr = stdabbr + self._dst_abbr = dstabbr + + try: + stdoffset = stdoffset.total_seconds() + except (TypeError, AttributeError): + pass + + try: + dstoffset = dstoffset.total_seconds() + except (TypeError, AttributeError): + pass + + if stdoffset is not None: + self._std_offset = datetime.timedelta(seconds=stdoffset) + else: + self._std_offset = ZERO + + if dstoffset is not None: + self._dst_offset = datetime.timedelta(seconds=dstoffset) + elif dstabbr and stdoffset is not None: + self._dst_offset = self._std_offset + datetime.timedelta(hours=+1) + else: + self._dst_offset = ZERO + + if dstabbr and start is None: + self._start_delta = relativedelta.relativedelta( + hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) + else: + self._start_delta = start + + if dstabbr and end is None: + self._end_delta = relativedelta.relativedelta( + hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) + else: + self._end_delta = end + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = bool(self._start_delta) + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + if not self.hasdst: + return None + + base_year = datetime.datetime(year, 1, 1) + + start = base_year + self._start_delta + end = base_year + self._end_delta + + return (start, end) + + def __eq__(self, other): + if not isinstance(other, tzrange): + return NotImplemented + + return (self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr and + self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._start_delta == other._start_delta and + self._end_delta == other._end_delta) + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +@six.add_metaclass(_TzStrFactory) +class tzstr(tzrange): + """ + ``tzstr`` objects are time zone objects specified by a time-zone string as + it would be passed to a ``TZ`` variable on POSIX-style systems (see + the `GNU C Library: TZ Variable`_ for more details). + + There is one notable exception, which is that POSIX-style time zones use an + inverted offset format, so normally ``GMT+3`` would be parsed as an offset + 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an + offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX + behavior, pass a ``True`` value to ``posix_offset``. + + The :class:`tzrange` object provides the same functionality, but is + specified using :class:`relativedelta.relativedelta` objects. rather than + strings. + + :param s: + A time zone string in ``TZ`` variable format. This can be a + :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: + :class:`unicode`) or a stream emitting unicode characters + (e.g. :class:`StringIO`). + + :param posix_offset: + Optional. If set to ``True``, interpret strings such as ``GMT+3`` or + ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the + POSIX standard. + + .. caution:: + + Prior to version 2.7.0, this function also supported time zones + in the format: + + * ``EST5EDT,4,0,6,7200,10,0,26,7200,3600`` + * ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600`` + + This format is non-standard and has been deprecated; this function + will raise a :class:`DeprecatedTZFormatWarning` until + support is removed in a future version. + + .. _`GNU C Library: TZ Variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + """ + def __init__(self, s, posix_offset=False): + global parser + from dateutil.parser import _parser as parser + + self._s = s + + res = parser._parsetz(s) + if res is None or res.any_unused_tokens: + raise ValueError("unknown string format") + + # Here we break the compatibility with the TZ variable handling. + # GMT-3 actually *means* the timezone -3. + if res.stdabbr in ("GMT", "UTC") and not posix_offset: + res.stdoffset *= -1 + + # We must initialize it first, since _delta() needs + # _std_offset and _dst_offset set. Use False in start/end + # to avoid building it two times. + tzrange.__init__(self, res.stdabbr, res.stdoffset, + res.dstabbr, res.dstoffset, + start=False, end=False) + + if not res.dstabbr: + self._start_delta = None + self._end_delta = None + else: + self._start_delta = self._delta(res.start) + if self._start_delta: + self._end_delta = self._delta(res.end, isend=1) + + self.hasdst = bool(self._start_delta) + + def _delta(self, x, isend=0): + from dateutil import relativedelta + kwargs = {} + if x.month is not None: + kwargs["month"] = x.month + if x.weekday is not None: + kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) + if x.week > 0: + kwargs["day"] = 1 + else: + kwargs["day"] = 31 + elif x.day: + kwargs["day"] = x.day + elif x.yday is not None: + kwargs["yearday"] = x.yday + elif x.jyday is not None: + kwargs["nlyearday"] = x.jyday + if not kwargs: + # Default is to start on first sunday of april, and end + # on last sunday of october. + if not isend: + kwargs["month"] = 4 + kwargs["day"] = 1 + kwargs["weekday"] = relativedelta.SU(+1) + else: + kwargs["month"] = 10 + kwargs["day"] = 31 + kwargs["weekday"] = relativedelta.SU(-1) + if x.time is not None: + kwargs["seconds"] = x.time + else: + # Default is 2AM. + kwargs["seconds"] = 7200 + if isend: + # Convert to standard time, to follow the documented way + # of working with the extra hour. See the documentation + # of the tzinfo class. + delta = self._dst_offset - self._std_offset + kwargs["seconds"] -= delta.seconds + delta.days * 86400 + return relativedelta.relativedelta(**kwargs) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +class _tzicalvtzcomp(object): + def __init__(self, tzoffsetfrom, tzoffsetto, isdst, + tzname=None, rrule=None): + self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) + self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) + self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom + self.isdst = isdst + self.tzname = tzname + self.rrule = rrule + + +class _tzicalvtz(_tzinfo): + def __init__(self, tzid, comps=[]): + super(_tzicalvtz, self).__init__() + + self._tzid = tzid + self._comps = comps + self._cachedate = [] + self._cachecomp = [] + self._cache_lock = _thread.allocate_lock() + + def _find_comp(self, dt): + if len(self._comps) == 1: + return self._comps[0] + + dt = dt.replace(tzinfo=None) + + try: + with self._cache_lock: + return self._cachecomp[self._cachedate.index( + (dt, self._fold(dt)))] + except ValueError: + pass + + lastcompdt = None + lastcomp = None + + for comp in self._comps: + compdt = self._find_compdt(comp, dt) + + if compdt and (not lastcompdt or lastcompdt < compdt): + lastcompdt = compdt + lastcomp = comp + + if not lastcomp: + # RFC says nothing about what to do when a given + # time is before the first onset date. We'll look for the + # first standard component, or the first component, if + # none is found. + for comp in self._comps: + if not comp.isdst: + lastcomp = comp + break + else: + lastcomp = comp[0] + + with self._cache_lock: + self._cachedate.insert(0, (dt, self._fold(dt))) + self._cachecomp.insert(0, lastcomp) + + if len(self._cachedate) > 10: + self._cachedate.pop() + self._cachecomp.pop() + + return lastcomp + + def _find_compdt(self, comp, dt): + if comp.tzoffsetdiff < ZERO and self._fold(dt): + dt -= comp.tzoffsetdiff + + compdt = comp.rrule.before(dt, inc=True) + + return compdt + + def utcoffset(self, dt): + if dt is None: + return None + + return self._find_comp(dt).tzoffsetto + + def dst(self, dt): + comp = self._find_comp(dt) + if comp.isdst: + return comp.tzoffsetdiff + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._find_comp(dt).tzname + + def __repr__(self): + return "<tzicalvtz %s>" % repr(self._tzid) + + __reduce__ = object.__reduce__ + + +class tzical(object): + """ + This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure + as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects. + + :param `fileobj`: + A file or stream in iCalendar format, which should be UTF-8 encoded + with CRLF endings. + + .. _`RFC 5545`: https://tools.ietf.org/html/rfc5545 + """ + def __init__(self, fileobj): + global rrule + from dateutil import rrule + + if isinstance(fileobj, string_types): + self._s = fileobj + # ical should be encoded in UTF-8 with CRLF + fileobj = open(fileobj, 'r') + else: + self._s = getattr(fileobj, 'name', repr(fileobj)) + fileobj = _ContextWrapper(fileobj) + + self._vtz = {} + + with fileobj as fobj: + self._parse_rfc(fobj.read()) + + def keys(self): + """ + Retrieves the available time zones as a list. + """ + return list(self._vtz.keys()) + + def get(self, tzid=None): + """ + Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``. + + :param tzid: + If there is exactly one time zone available, omitting ``tzid`` + or passing :py:const:`None` value returns it. Otherwise a valid + key (which can be retrieved from :func:`keys`) is required. + + :raises ValueError: + Raised if ``tzid`` is not specified but there are either more + or fewer than 1 zone defined. + + :returns: + Returns either a :py:class:`datetime.tzinfo` object representing + the relevant time zone or :py:const:`None` if the ``tzid`` was + not found. + """ + if tzid is None: + if len(self._vtz) == 0: + raise ValueError("no timezones defined") + elif len(self._vtz) > 1: + raise ValueError("more than one timezone available") + tzid = next(iter(self._vtz)) + + return self._vtz.get(tzid) + + def _parse_offset(self, s): + s = s.strip() + if not s: + raise ValueError("empty offset") + if s[0] in ('+', '-'): + signal = (-1, +1)[s[0] == '+'] + s = s[1:] + else: + signal = +1 + if len(s) == 4: + return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal + elif len(s) == 6: + return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal + else: + raise ValueError("invalid offset: " + s) + + def _parse_rfc(self, s): + lines = s.splitlines() + if not lines: + raise ValueError("empty string") + + # Unfold + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + + tzid = None + comps = [] + invtz = False + comptype = None + for line in lines: + if not line: + continue + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0].upper() + parms = parms[1:] + if invtz: + if name == "BEGIN": + if value in ("STANDARD", "DAYLIGHT"): + # Process component + pass + else: + raise ValueError("unknown component: "+value) + comptype = value + founddtstart = False + tzoffsetfrom = None + tzoffsetto = None + rrulelines = [] + tzname = None + elif name == "END": + if value == "VTIMEZONE": + if comptype: + raise ValueError("component not closed: "+comptype) + if not tzid: + raise ValueError("mandatory TZID not found") + if not comps: + raise ValueError( + "at least one component is needed") + # Process vtimezone + self._vtz[tzid] = _tzicalvtz(tzid, comps) + invtz = False + elif value == comptype: + if not founddtstart: + raise ValueError("mandatory DTSTART not found") + if tzoffsetfrom is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + if tzoffsetto is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + # Process component + rr = None + if rrulelines: + rr = rrule.rrulestr("\n".join(rrulelines), + compatible=True, + ignoretz=True, + cache=True) + comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, + (comptype == "DAYLIGHT"), + tzname, rr) + comps.append(comp) + comptype = None + else: + raise ValueError("invalid component end: "+value) + elif comptype: + if name == "DTSTART": + # DTSTART in VTIMEZONE takes a subset of valid RRULE + # values under RFC 5545. + for parm in parms: + if parm != 'VALUE=DATE-TIME': + msg = ('Unsupported DTSTART param in ' + + 'VTIMEZONE: ' + parm) + raise ValueError(msg) + rrulelines.append(line) + founddtstart = True + elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): + rrulelines.append(line) + elif name == "TZOFFSETFROM": + if parms: + raise ValueError( + "unsupported %s parm: %s " % (name, parms[0])) + tzoffsetfrom = self._parse_offset(value) + elif name == "TZOFFSETTO": + if parms: + raise ValueError( + "unsupported TZOFFSETTO parm: "+parms[0]) + tzoffsetto = self._parse_offset(value) + elif name == "TZNAME": + if parms: + raise ValueError( + "unsupported TZNAME parm: "+parms[0]) + tzname = value + elif name == "COMMENT": + pass + else: + raise ValueError("unsupported property: "+name) + else: + if name == "TZID": + if parms: + raise ValueError( + "unsupported TZID parm: "+parms[0]) + tzid = value + elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): + pass + else: + raise ValueError("unsupported property: "+name) + elif name == "BEGIN" and value == "VTIMEZONE": + tzid = None + comps = [] + invtz = True + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +if sys.platform != "win32": + TZFILES = ["/etc/localtime", "localtime"] + TZPATHS = ["/usr/share/zoneinfo", + "/usr/lib/zoneinfo", + "/usr/share/lib/zoneinfo", + "/etc/zoneinfo"] +else: + TZFILES = [] + TZPATHS = [] + + +def __get_gettz(): + tzlocal_classes = (tzlocal,) + if tzwinlocal is not None: + tzlocal_classes += (tzwinlocal,) + + class GettzFunc(object): + """ + Retrieve a time zone object from a string representation + + This function is intended to retrieve the :py:class:`tzinfo` subclass + that best represents the time zone that would be used if a POSIX + `TZ variable`_ were set to the same value. + + If no argument or an empty string is passed to ``gettz``, local time + is returned: + + .. code-block:: python3 + + >>> gettz() + tzfile('/etc/localtime') + + This function is also the preferred way to map IANA tz database keys + to :class:`tzfile` objects: + + .. code-block:: python3 + + >>> gettz('Pacific/Kiritimati') + tzfile('/usr/share/zoneinfo/Pacific/Kiritimati') + + On Windows, the standard is extended to include the Windows-specific + zone names provided by the operating system: + + .. code-block:: python3 + + >>> gettz('Egypt Standard Time') + tzwin('Egypt Standard Time') + + Passing a GNU ``TZ`` style string time zone specification returns a + :class:`tzstr` object: + + .. code-block:: python3 + + >>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') + tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') + + :param name: + A time zone name (IANA, or, on Windows, Windows keys), location of + a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone + specifier. An empty string, no argument or ``None`` is interpreted + as local time. + + :return: + Returns an instance of one of ``dateutil``'s :py:class:`tzinfo` + subclasses. + + .. versionchanged:: 2.7.0 + + After version 2.7.0, any two calls to ``gettz`` using the same + input strings will return the same object: + + .. code-block:: python3 + + >>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago') + True + + In addition to improving performance, this ensures that + `"same zone" semantics`_ are used for datetimes in the same zone. + + + .. _`TZ variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + + .. _`"same zone" semantics`: + https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html + """ + def __init__(self): + + self.__instances = {} + self._cache_lock = _thread.allocate_lock() + + def __call__(self, name=None): + with self._cache_lock: + rv = self.__instances.get(name, None) + + if rv is None: + rv = self.nocache(name=name) + if not (name is None or isinstance(rv, tzlocal_classes)): + # tzlocal is slightly more complicated than the other + # time zone providers because it depends on environment + # at construction time, so don't cache that. + self.__instances[name] = rv + + return rv + + def cache_clear(self): + with self._cache_lock: + self.__instances = {} + + @staticmethod + def nocache(name=None): + """A non-cached version of gettz""" + tz = None + if not name: + try: + name = os.environ["TZ"] + except KeyError: + pass + if name is None or name == ":": + for filepath in TZFILES: + if not os.path.isabs(filepath): + filename = filepath + for path in TZPATHS: + filepath = os.path.join(path, filename) + if os.path.isfile(filepath): + break + else: + continue + if os.path.isfile(filepath): + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = tzlocal() + else: + if name.startswith(":"): + name = name[1:] + if os.path.isabs(name): + if os.path.isfile(name): + tz = tzfile(name) + else: + tz = None + else: + for path in TZPATHS: + filepath = os.path.join(path, name) + if not os.path.isfile(filepath): + filepath = filepath.replace(' ', '_') + if not os.path.isfile(filepath): + continue + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = None + if tzwin is not None: + try: + tz = tzwin(name) + except WindowsError: + tz = None + + if not tz: + from dateutil.zoneinfo import get_zonefile_instance + tz = get_zonefile_instance().get(name) + + if not tz: + for c in name: + # name is not a tzstr unless it has at least + # one offset. For short values of "name", an + # explicit for loop seems to be the fastest way + # To determine if a string contains a digit + if c in "0123456789": + try: + tz = tzstr(name) + except ValueError: + pass + break + else: + if name in ("GMT", "UTC"): + tz = tzutc() + elif name in time.tzname: + tz = tzlocal() + return tz + + return GettzFunc() + + +gettz = __get_gettz() +del __get_gettz + + +def datetime_exists(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + would fall in a gap. + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" exists in + ``tz``. + + .. versionadded:: 2.7.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + tz = dt.tzinfo + + dt = dt.replace(tzinfo=None) + + # This is essentially a test of whether or not the datetime can survive + # a round trip to UTC. + dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz) + dt_rt = dt_rt.replace(tzinfo=None) + + return dt == dt_rt + + +def datetime_ambiguous(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + is ambiguous (i.e if there are two times differentiated only by their DST + status). + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" is ambiguous in + ``tz``. + + .. versionadded:: 2.6.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + + tz = dt.tzinfo + + # If a time zone defines its own "is_ambiguous" function, we'll use that. + is_ambiguous_fn = getattr(tz, 'is_ambiguous', None) + if is_ambiguous_fn is not None: + try: + return tz.is_ambiguous(dt) + except Exception: + pass + + # If it doesn't come out and tell us it's ambiguous, we'll just check if + # the fold attribute has any effect on this particular date and time. + dt = dt.replace(tzinfo=tz) + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dst = wall_0.dst() == wall_1.dst() + + return not (same_offset and same_dst) + + +def resolve_imaginary(dt): + """ + Given a datetime that may be imaginary, return an existing datetime. + + This function assumes that an imaginary datetime represents what the + wall time would be in a zone had the offset transition not occurred, so + it will always fall forward by the transition's change in offset. + + .. doctest:: + + >>> from dateutil import tz + >>> from datetime import datetime + >>> NYC = tz.gettz('America/New_York') + >>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC))) + 2017-03-12 03:30:00-04:00 + + >>> KIR = tz.gettz('Pacific/Kiritimati') + >>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR))) + 1995-01-02 12:30:00+14:00 + + As a note, :func:`datetime.astimezone` is guaranteed to produce a valid, + existing datetime, so a round-trip to and from UTC is sufficient to get + an extant datetime, however, this generally "falls back" to an earlier time + rather than falling forward to the STD side (though no guarantees are made + about this behavior). + + :param dt: + A :class:`datetime.datetime` which may or may not exist. + + :return: + Returns an existing :class:`datetime.datetime`. If ``dt`` was not + imaginary, the datetime returned is guaranteed to be the same object + passed to the function. + + .. versionadded:: 2.7.0 + """ + if dt.tzinfo is not None and not datetime_exists(dt): + + curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset() + old_offset = (dt - datetime.timedelta(hours=24)).utcoffset() + + dt += curr_offset - old_offset + + return dt + + +def _datetime_to_timestamp(dt): + """ + Convert a :class:`datetime.datetime` object to an epoch timestamp in + seconds since January 1, 1970, ignoring the time zone. + """ + return (dt.replace(tzinfo=None) - EPOCH).total_seconds() + + +class _ContextWrapper(object): + """ + Class for wrapping contexts so that they are passed through in a + with statement. + """ + def __init__(self, context): + self.context = context + + def __enter__(self): + return self.context + + def __exit__(*args, **kwargs): + pass + +# vim:ts=4:sw=4:et diff --git a/libraries/dateutil/tz/win.py b/libraries/dateutil/tz/win.py new file mode 100644 index 00000000..def4353a --- /dev/null +++ b/libraries/dateutil/tz/win.py @@ -0,0 +1,331 @@ +# This code was originally contributed by Jeffrey Harris. +import datetime +import struct + +from six.moves import winreg +from six import text_type + +try: + import ctypes + from ctypes import wintypes +except ValueError: + # ValueError is raised on non-Windows systems for some horrible reason. + raise ImportError("Running tzwin on non-Windows system") + +from ._common import tzrangebase + +__all__ = ["tzwin", "tzwinlocal", "tzres"] + +ONEWEEK = datetime.timedelta(7) + +TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" +TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" +TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" + + +def _settzkeyname(): + handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + try: + winreg.OpenKey(handle, TZKEYNAMENT).Close() + TZKEYNAME = TZKEYNAMENT + except WindowsError: + TZKEYNAME = TZKEYNAME9X + handle.Close() + return TZKEYNAME + + +TZKEYNAME = _settzkeyname() + + +class tzres(object): + """ + Class for accessing `tzres.dll`, which contains timezone name related + resources. + + .. versionadded:: 2.5.0 + """ + p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char + + def __init__(self, tzres_loc='tzres.dll'): + # Load the user32 DLL so we can load strings from tzres + user32 = ctypes.WinDLL('user32') + + # Specify the LoadStringW function + user32.LoadStringW.argtypes = (wintypes.HINSTANCE, + wintypes.UINT, + wintypes.LPWSTR, + ctypes.c_int) + + self.LoadStringW = user32.LoadStringW + self._tzres = ctypes.WinDLL(tzres_loc) + self.tzres_loc = tzres_loc + + def load_name(self, offset): + """ + Load a timezone name from a DLL offset (integer). + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.load_name(112)) + 'Eastern Standard Time' + + :param offset: + A positive integer value referring to a string from the tzres dll. + + ..note: + Offsets found in the registry are generally of the form + `@tzres.dll,-114`. The offset in this case if 114, not -114. + + """ + resource = self.p_wchar() + lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) + nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) + return resource[:nchar] + + def name_from_string(self, tzname_str): + """ + Parse strings as returned from the Windows registry into the time zone + name as defined in the registry. + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.name_from_string('@tzres.dll,-251')) + 'Dateline Daylight Time' + >>> print(tzr.name_from_string('Eastern Standard Time')) + 'Eastern Standard Time' + + :param tzname_str: + A timezone name string as returned from a Windows registry key. + + :return: + Returns the localized timezone string from tzres.dll if the string + is of the form `@tzres.dll,-offset`, else returns the input string. + """ + if not tzname_str.startswith('@'): + return tzname_str + + name_splt = tzname_str.split(',-') + try: + offset = int(name_splt[1]) + except: + raise ValueError("Malformed timezone string.") + + return self.load_name(offset) + + +class tzwinbase(tzrangebase): + """tzinfo class based on win32's timezones available in the registry.""" + def __init__(self): + raise NotImplementedError('tzwinbase is an abstract base class') + + def __eq__(self, other): + # Compare on all relevant dimensions, including name. + if not isinstance(other, tzwinbase): + return NotImplemented + + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._stddayofweek == other._stddayofweek and + self._dstdayofweek == other._dstdayofweek and + self._stdweeknumber == other._stdweeknumber and + self._dstweeknumber == other._dstweeknumber and + self._stdhour == other._stdhour and + self._dsthour == other._dsthour and + self._stdminute == other._stdminute and + self._dstminute == other._dstminute and + self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr) + + @staticmethod + def list(): + """Return a list of all time zones known to the system.""" + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZKEYNAME) as tzkey: + result = [winreg.EnumKey(tzkey, i) + for i in range(winreg.QueryInfoKey(tzkey)[0])] + return result + + def display(self): + return self._display + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + + if not self.hasdst: + return None + + dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, + self._dsthour, self._dstminute, + self._dstweeknumber) + + dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, + self._stdhour, self._stdminute, + self._stdweeknumber) + + # Ambiguous dates default to the STD side + dstoff -= self._dst_base_offset + + return dston, dstoff + + def _get_hasdst(self): + return self._dstmonth != 0 + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +class tzwin(tzwinbase): + + def __init__(self, name): + self._name = name + + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + keydict = valuestodict(tzkey) + + self._std_abbr = keydict["Std"] + self._dst_abbr = keydict["Dlt"] + + self._display = keydict["Display"] + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=3l16h", keydict["TZI"]) + stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 + dstoffset = stdoffset-tup[2] # + DaylightBias * -1 + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs + # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[4:9] + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[12:17] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwin(%s)" % repr(self._name) + + def __reduce__(self): + return (self.__class__, (self._name,)) + + +class tzwinlocal(tzwinbase): + def __init__(self): + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: + keydict = valuestodict(tzlocalkey) + + self._std_abbr = keydict["StandardName"] + self._dst_abbr = keydict["DaylightName"] + + try: + tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, + sn=self._std_abbr) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + _keydict = valuestodict(tzkey) + self._display = _keydict["Display"] + except OSError: + self._display = None + + stdoffset = -keydict["Bias"]-keydict["StandardBias"] + dstoffset = stdoffset-keydict["DaylightBias"] + + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # For reasons unclear, in this particular key, the day of week has been + # moved to the END of the SYSTEMTIME structure. + tup = struct.unpack("=8h", keydict["StandardStart"]) + + (self._stdmonth, + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[1:5] + + self._stddayofweek = tup[7] + + tup = struct.unpack("=8h", keydict["DaylightStart"]) + + (self._dstmonth, + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[1:5] + + self._dstdayofweek = tup[7] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwinlocal()" + + def __str__(self): + # str will return the standard name, not the daylight name. + return "tzwinlocal(%s)" % repr(self._std_abbr) + + def __reduce__(self): + return (self.__class__, ()) + + +def picknthweekday(year, month, dayofweek, hour, minute, whichweek): + """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ + first = datetime.datetime(year, month, 1, hour, minute) + + # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), + # Because 7 % 7 = 0 + weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) + wd = weekdayone + ((whichweek - 1) * ONEWEEK) + if (wd.month != month): + wd -= ONEWEEK + + return wd + + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dout = {} + size = winreg.QueryInfoKey(key)[1] + tz_res = None + + for i in range(size): + key_name, value, dtype = winreg.EnumValue(key, i) + if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: + # If it's a DWORD (32-bit integer), it's stored as unsigned - convert + # that to a proper signed integer + if value & (1 << 31): + value = value - (1 << 32) + elif dtype == winreg.REG_SZ: + # If it's a reference to the tzres DLL, load the actual string + if value.startswith('@tzres'): + tz_res = tz_res or tzres() + value = tz_res.name_from_string(value) + + value = value.rstrip('\x00') # Remove trailing nulls + + dout[key_name] = value + + return dout diff --git a/libraries/dateutil/tzwin.py b/libraries/dateutil/tzwin.py new file mode 100644 index 00000000..cebc673e --- /dev/null +++ b/libraries/dateutil/tzwin.py @@ -0,0 +1,2 @@ +# tzwin has moved to dateutil.tz.win +from .tz.win import * diff --git a/libraries/dateutil/utils.py b/libraries/dateutil/utils.py new file mode 100644 index 00000000..ebcce6aa --- /dev/null +++ b/libraries/dateutil/utils.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" +This module offers general convenience and utility functions for dealing with +datetimes. + +.. versionadded:: 2.7.0 +""" +from __future__ import unicode_literals + +from datetime import datetime, time + + +def today(tzinfo=None): + """ + Returns a :py:class:`datetime` representing the current day at midnight + + :param tzinfo: + The time zone to attach (also used to determine the current day). + + :return: + A :py:class:`datetime.datetime` object representing the current day + at midnight. + """ + + dt = datetime.now(tzinfo) + return datetime.combine(dt.date(), time(0, tzinfo=tzinfo)) + + +def default_tzinfo(dt, tzinfo): + """ + Sets the the ``tzinfo`` parameter on naive datetimes only + + This is useful for example when you are provided a datetime that may have + either an implicit or explicit time zone, such as when parsing a time zone + string. + + .. doctest:: + + >>> from dateutil.tz import tzoffset + >>> from dateutil.parser import parse + >>> from dateutil.utils import default_tzinfo + >>> dflt_tz = tzoffset("EST", -18000) + >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz)) + 2014-01-01 12:30:00+00:00 + >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz)) + 2014-01-01 12:30:00-05:00 + + :param dt: + The datetime on which to replace the time zone + + :param tzinfo: + The :py:class:`datetime.tzinfo` subclass instance to assign to + ``dt`` if (and only if) it is naive. + + :return: + Returns an aware :py:class:`datetime.datetime`. + """ + if dt.tzinfo is not None: + return dt + else: + return dt.replace(tzinfo=tzinfo) + + +def within_delta(dt1, dt2, delta): + """ + Useful for comparing two datetimes that may a negilible difference + to be considered equal. + """ + delta = abs(delta) + difference = dt1 - dt2 + return -delta <= difference <= delta diff --git a/libraries/dateutil/zoneinfo/__init__.py b/libraries/dateutil/zoneinfo/__init__.py new file mode 100644 index 00000000..34f11ad6 --- /dev/null +++ b/libraries/dateutil/zoneinfo/__init__.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +import warnings +import json + +from tarfile import TarFile +from pkgutil import get_data +from io import BytesIO + +from dateutil.tz import tzfile as _tzfile + +__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"] + +ZONEFILENAME = "dateutil-zoneinfo.tar.gz" +METADATA_FN = 'METADATA' + + +class tzfile(_tzfile): + def __reduce__(self): + return (gettz, (self._filename,)) + + +def getzoneinfofile_stream(): + try: + return BytesIO(get_data(__name__, ZONEFILENAME)) + except IOError as e: # TODO switch to FileNotFoundError? + warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) + return None + + +class ZoneInfoFile(object): + def __init__(self, zonefile_stream=None): + if zonefile_stream is not None: + with TarFile.open(fileobj=zonefile_stream) as tf: + self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name) + for zf in tf.getmembers() + if zf.isfile() and zf.name != METADATA_FN} + # deal with links: They'll point to their parent object. Less + # waste of memory + links = {zl.name: self.zones[zl.linkname] + for zl in tf.getmembers() if + zl.islnk() or zl.issym()} + self.zones.update(links) + try: + metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) + metadata_str = metadata_json.read().decode('UTF-8') + self.metadata = json.loads(metadata_str) + except KeyError: + # no metadata in tar file + self.metadata = None + else: + self.zones = {} + self.metadata = None + + def get(self, name, default=None): + """ + Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method + for retrieving zones from the zone dictionary. + + :param name: + The name of the zone to retrieve. (Generally IANA zone names) + + :param default: + The value to return in the event of a missing key. + + .. versionadded:: 2.6.0 + + """ + return self.zones.get(name, default) + + +# The current API has gettz as a module function, although in fact it taps into +# a stateful class. So as a workaround for now, without changing the API, we +# will create a new "global" class instance the first time a user requests a +# timezone. Ugly, but adheres to the api. +# +# TODO: Remove after deprecation period. +_CLASS_ZONE_INSTANCE = [] + + +def get_zonefile_instance(new_instance=False): + """ + This is a convenience function which provides a :class:`ZoneInfoFile` + instance using the data provided by the ``dateutil`` package. By default, it + caches a single instance of the ZoneInfoFile object and returns that. + + :param new_instance: + If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and + used as the cached instance for the next call. Otherwise, new instances + are created only as necessary. + + :return: + Returns a :class:`ZoneInfoFile` object. + + .. versionadded:: 2.6 + """ + if new_instance: + zif = None + else: + zif = getattr(get_zonefile_instance, '_cached_instance', None) + + if zif is None: + zif = ZoneInfoFile(getzoneinfofile_stream()) + + get_zonefile_instance._cached_instance = zif + + return zif + + +def gettz(name): + """ + This retrieves a time zone from the local zoneinfo tarball that is packaged + with dateutil. + + :param name: + An IANA-style time zone name, as found in the zoneinfo file. + + :return: + Returns a :class:`dateutil.tz.tzfile` time zone object. + + .. warning:: + It is generally inadvisable to use this function, and it is only + provided for API compatibility with earlier versions. This is *not* + equivalent to ``dateutil.tz.gettz()``, which selects an appropriate + time zone based on the inputs, favoring system zoneinfo. This is ONLY + for accessing the dateutil-specific zoneinfo (which may be out of + date compared to the system zoneinfo). + + .. deprecated:: 2.6 + If you need to use a specific zoneinfofile over the system zoneinfo, + instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call + :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead. + + Use :func:`get_zonefile_instance` to retrieve an instance of the + dateutil-provided zoneinfo. + """ + warnings.warn("zoneinfo.gettz() will be removed in future versions, " + "to use the dateutil-provided zoneinfo files, instantiate a " + "ZoneInfoFile object and use ZoneInfoFile.zones.get() " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].zones.get(name) + + +def gettz_db_metadata(): + """ Get the zonefile metadata + + See `zonefile_metadata`_ + + :returns: + A dictionary with the database metadata + + .. deprecated:: 2.6 + See deprecation warning in :func:`zoneinfo.gettz`. To get metadata, + query the attribute ``zoneinfo.ZoneInfoFile.metadata``. + """ + warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future " + "versions, to use the dateutil-provided zoneinfo files, " + "ZoneInfoFile object and query the 'metadata' attribute " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].metadata diff --git a/libraries/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/libraries/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz new file mode 100644 index 00000000..e86b54fe Binary files /dev/null and b/libraries/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz differ diff --git a/libraries/dateutil/zoneinfo/rebuild.py b/libraries/dateutil/zoneinfo/rebuild.py new file mode 100644 index 00000000..78f0d1a0 --- /dev/null +++ b/libraries/dateutil/zoneinfo/rebuild.py @@ -0,0 +1,53 @@ +import logging +import os +import tempfile +import shutil +import json +from subprocess import check_call +from tarfile import TarFile + +from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME + + +def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None): + """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* + + filename is the timezone tarball from ``ftp.iana.org/tz``. + + """ + tmpdir = tempfile.mkdtemp() + zonedir = os.path.join(tmpdir, "zoneinfo") + moduledir = os.path.dirname(__file__) + try: + with TarFile.open(filename) as tf: + for name in zonegroups: + tf.extract(name, tmpdir) + filepaths = [os.path.join(tmpdir, n) for n in zonegroups] + try: + check_call(["zic", "-d", zonedir] + filepaths) + except OSError as e: + _print_on_nosuchfile(e) + raise + # write metadata file + with open(os.path.join(zonedir, METADATA_FN), 'w') as f: + json.dump(metadata, f, indent=4, sort_keys=True) + target = os.path.join(moduledir, ZONEFILENAME) + with TarFile.open(target, "w:%s" % format) as tf: + for entry in os.listdir(zonedir): + entrypath = os.path.join(zonedir, entry) + tf.add(entrypath, entry) + finally: + shutil.rmtree(tmpdir) + + +def _print_on_nosuchfile(e): + """Print helpful troubleshooting message + + e is an exception raised by subprocess.check_call() + + """ + if e.errno == 2: + logging.error( + "Could not find zic. Perhaps you need to install " + "libc-bin or some other package that provides it, " + "or it's not in your PATH?") diff --git a/resources/lib/libraries/requests/__init__.py b/libraries/requests/__init__.py similarity index 100% rename from resources/lib/libraries/requests/__init__.py rename to libraries/requests/__init__.py diff --git a/resources/lib/libraries/requests/adapters.py b/libraries/requests/adapters.py similarity index 100% rename from resources/lib/libraries/requests/adapters.py rename to libraries/requests/adapters.py diff --git a/resources/lib/libraries/requests/api.py b/libraries/requests/api.py similarity index 100% rename from resources/lib/libraries/requests/api.py rename to libraries/requests/api.py diff --git a/resources/lib/libraries/requests/auth.py b/libraries/requests/auth.py similarity index 100% rename from resources/lib/libraries/requests/auth.py rename to libraries/requests/auth.py diff --git a/resources/lib/libraries/requests/cacert.pem b/libraries/requests/cacert.pem similarity index 100% rename from resources/lib/libraries/requests/cacert.pem rename to libraries/requests/cacert.pem diff --git a/resources/lib/libraries/requests/certs.py b/libraries/requests/certs.py similarity index 100% rename from resources/lib/libraries/requests/certs.py rename to libraries/requests/certs.py diff --git a/resources/lib/libraries/requests/compat.py b/libraries/requests/compat.py similarity index 100% rename from resources/lib/libraries/requests/compat.py rename to libraries/requests/compat.py diff --git a/resources/lib/libraries/requests/cookies.py b/libraries/requests/cookies.py similarity index 100% rename from resources/lib/libraries/requests/cookies.py rename to libraries/requests/cookies.py diff --git a/resources/lib/libraries/requests/exceptions.py b/libraries/requests/exceptions.py similarity index 100% rename from resources/lib/libraries/requests/exceptions.py rename to libraries/requests/exceptions.py diff --git a/resources/lib/libraries/requests/hooks.py b/libraries/requests/hooks.py similarity index 100% rename from resources/lib/libraries/requests/hooks.py rename to libraries/requests/hooks.py diff --git a/resources/lib/libraries/requests/models.py b/libraries/requests/models.py similarity index 100% rename from resources/lib/libraries/requests/models.py rename to libraries/requests/models.py diff --git a/resources/lib/libraries/requests/packages/README.rst b/libraries/requests/packages/README.rst similarity index 100% rename from resources/lib/libraries/requests/packages/README.rst rename to libraries/requests/packages/README.rst diff --git a/resources/lib/libraries/requests/packages/__init__.py b/libraries/requests/packages/__init__.py similarity index 100% rename from resources/lib/libraries/requests/packages/__init__.py rename to libraries/requests/packages/__init__.py diff --git a/resources/lib/libraries/requests/packages/chardet/__init__.py b/libraries/requests/packages/chardet/__init__.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/__init__.py rename to libraries/requests/packages/chardet/__init__.py diff --git a/resources/lib/libraries/requests/packages/chardet/big5freq.py b/libraries/requests/packages/chardet/big5freq.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/big5freq.py rename to libraries/requests/packages/chardet/big5freq.py diff --git a/resources/lib/libraries/requests/packages/chardet/big5prober.py b/libraries/requests/packages/chardet/big5prober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/big5prober.py rename to libraries/requests/packages/chardet/big5prober.py diff --git a/resources/lib/libraries/requests/packages/chardet/chardetect.py b/libraries/requests/packages/chardet/chardetect.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/chardetect.py rename to libraries/requests/packages/chardet/chardetect.py diff --git a/resources/lib/libraries/requests/packages/chardet/chardistribution.py b/libraries/requests/packages/chardet/chardistribution.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/chardistribution.py rename to libraries/requests/packages/chardet/chardistribution.py diff --git a/resources/lib/libraries/requests/packages/chardet/charsetgroupprober.py b/libraries/requests/packages/chardet/charsetgroupprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/charsetgroupprober.py rename to libraries/requests/packages/chardet/charsetgroupprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/charsetprober.py b/libraries/requests/packages/chardet/charsetprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/charsetprober.py rename to libraries/requests/packages/chardet/charsetprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/codingstatemachine.py b/libraries/requests/packages/chardet/codingstatemachine.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/codingstatemachine.py rename to libraries/requests/packages/chardet/codingstatemachine.py diff --git a/resources/lib/libraries/requests/packages/chardet/compat.py b/libraries/requests/packages/chardet/compat.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/compat.py rename to libraries/requests/packages/chardet/compat.py diff --git a/resources/lib/libraries/requests/packages/chardet/constants.py b/libraries/requests/packages/chardet/constants.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/constants.py rename to libraries/requests/packages/chardet/constants.py diff --git a/resources/lib/libraries/requests/packages/chardet/cp949prober.py b/libraries/requests/packages/chardet/cp949prober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/cp949prober.py rename to libraries/requests/packages/chardet/cp949prober.py diff --git a/resources/lib/libraries/requests/packages/chardet/escprober.py b/libraries/requests/packages/chardet/escprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/escprober.py rename to libraries/requests/packages/chardet/escprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/escsm.py b/libraries/requests/packages/chardet/escsm.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/escsm.py rename to libraries/requests/packages/chardet/escsm.py diff --git a/resources/lib/libraries/requests/packages/chardet/eucjpprober.py b/libraries/requests/packages/chardet/eucjpprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/eucjpprober.py rename to libraries/requests/packages/chardet/eucjpprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/euckrfreq.py b/libraries/requests/packages/chardet/euckrfreq.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/euckrfreq.py rename to libraries/requests/packages/chardet/euckrfreq.py diff --git a/resources/lib/libraries/requests/packages/chardet/euckrprober.py b/libraries/requests/packages/chardet/euckrprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/euckrprober.py rename to libraries/requests/packages/chardet/euckrprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/euctwfreq.py b/libraries/requests/packages/chardet/euctwfreq.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/euctwfreq.py rename to libraries/requests/packages/chardet/euctwfreq.py diff --git a/resources/lib/libraries/requests/packages/chardet/euctwprober.py b/libraries/requests/packages/chardet/euctwprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/euctwprober.py rename to libraries/requests/packages/chardet/euctwprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/gb2312freq.py b/libraries/requests/packages/chardet/gb2312freq.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/gb2312freq.py rename to libraries/requests/packages/chardet/gb2312freq.py diff --git a/resources/lib/libraries/requests/packages/chardet/gb2312prober.py b/libraries/requests/packages/chardet/gb2312prober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/gb2312prober.py rename to libraries/requests/packages/chardet/gb2312prober.py diff --git a/resources/lib/libraries/requests/packages/chardet/hebrewprober.py b/libraries/requests/packages/chardet/hebrewprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/hebrewprober.py rename to libraries/requests/packages/chardet/hebrewprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/jisfreq.py b/libraries/requests/packages/chardet/jisfreq.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/jisfreq.py rename to libraries/requests/packages/chardet/jisfreq.py diff --git a/resources/lib/libraries/requests/packages/chardet/jpcntx.py b/libraries/requests/packages/chardet/jpcntx.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/jpcntx.py rename to libraries/requests/packages/chardet/jpcntx.py diff --git a/resources/lib/libraries/requests/packages/chardet/langbulgarianmodel.py b/libraries/requests/packages/chardet/langbulgarianmodel.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/langbulgarianmodel.py rename to libraries/requests/packages/chardet/langbulgarianmodel.py diff --git a/resources/lib/libraries/requests/packages/chardet/langcyrillicmodel.py b/libraries/requests/packages/chardet/langcyrillicmodel.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/langcyrillicmodel.py rename to libraries/requests/packages/chardet/langcyrillicmodel.py diff --git a/resources/lib/libraries/requests/packages/chardet/langgreekmodel.py b/libraries/requests/packages/chardet/langgreekmodel.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/langgreekmodel.py rename to libraries/requests/packages/chardet/langgreekmodel.py diff --git a/resources/lib/libraries/requests/packages/chardet/langhebrewmodel.py b/libraries/requests/packages/chardet/langhebrewmodel.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/langhebrewmodel.py rename to libraries/requests/packages/chardet/langhebrewmodel.py diff --git a/resources/lib/libraries/requests/packages/chardet/langhungarianmodel.py b/libraries/requests/packages/chardet/langhungarianmodel.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/langhungarianmodel.py rename to libraries/requests/packages/chardet/langhungarianmodel.py diff --git a/resources/lib/libraries/requests/packages/chardet/langthaimodel.py b/libraries/requests/packages/chardet/langthaimodel.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/langthaimodel.py rename to libraries/requests/packages/chardet/langthaimodel.py diff --git a/resources/lib/libraries/requests/packages/chardet/latin1prober.py b/libraries/requests/packages/chardet/latin1prober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/latin1prober.py rename to libraries/requests/packages/chardet/latin1prober.py diff --git a/resources/lib/libraries/requests/packages/chardet/mbcharsetprober.py b/libraries/requests/packages/chardet/mbcharsetprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/mbcharsetprober.py rename to libraries/requests/packages/chardet/mbcharsetprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/mbcsgroupprober.py b/libraries/requests/packages/chardet/mbcsgroupprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/mbcsgroupprober.py rename to libraries/requests/packages/chardet/mbcsgroupprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/mbcssm.py b/libraries/requests/packages/chardet/mbcssm.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/mbcssm.py rename to libraries/requests/packages/chardet/mbcssm.py diff --git a/resources/lib/libraries/requests/packages/chardet/sbcharsetprober.py b/libraries/requests/packages/chardet/sbcharsetprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/sbcharsetprober.py rename to libraries/requests/packages/chardet/sbcharsetprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/sbcsgroupprober.py b/libraries/requests/packages/chardet/sbcsgroupprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/sbcsgroupprober.py rename to libraries/requests/packages/chardet/sbcsgroupprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/sjisprober.py b/libraries/requests/packages/chardet/sjisprober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/sjisprober.py rename to libraries/requests/packages/chardet/sjisprober.py diff --git a/resources/lib/libraries/requests/packages/chardet/universaldetector.py b/libraries/requests/packages/chardet/universaldetector.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/universaldetector.py rename to libraries/requests/packages/chardet/universaldetector.py diff --git a/resources/lib/libraries/requests/packages/chardet/utf8prober.py b/libraries/requests/packages/chardet/utf8prober.py similarity index 100% rename from resources/lib/libraries/requests/packages/chardet/utf8prober.py rename to libraries/requests/packages/chardet/utf8prober.py diff --git a/resources/lib/libraries/requests/packages/urllib3/__init__.py b/libraries/requests/packages/urllib3/__init__.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/__init__.py rename to libraries/requests/packages/urllib3/__init__.py diff --git a/resources/lib/libraries/requests/packages/urllib3/_collections.py b/libraries/requests/packages/urllib3/_collections.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/_collections.py rename to libraries/requests/packages/urllib3/_collections.py diff --git a/resources/lib/libraries/requests/packages/urllib3/connection.py b/libraries/requests/packages/urllib3/connection.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/connection.py rename to libraries/requests/packages/urllib3/connection.py diff --git a/resources/lib/libraries/requests/packages/urllib3/connectionpool.py b/libraries/requests/packages/urllib3/connectionpool.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/connectionpool.py rename to libraries/requests/packages/urllib3/connectionpool.py diff --git a/libraries/requests/packages/urllib3/contrib/__init__.py b/libraries/requests/packages/urllib3/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/resources/lib/libraries/requests/packages/urllib3/contrib/appengine.py b/libraries/requests/packages/urllib3/contrib/appengine.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/contrib/appengine.py rename to libraries/requests/packages/urllib3/contrib/appengine.py diff --git a/resources/lib/libraries/requests/packages/urllib3/contrib/ntlmpool.py b/libraries/requests/packages/urllib3/contrib/ntlmpool.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/contrib/ntlmpool.py rename to libraries/requests/packages/urllib3/contrib/ntlmpool.py diff --git a/resources/lib/libraries/requests/packages/urllib3/contrib/pyopenssl.py b/libraries/requests/packages/urllib3/contrib/pyopenssl.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/contrib/pyopenssl.py rename to libraries/requests/packages/urllib3/contrib/pyopenssl.py diff --git a/resources/lib/libraries/requests/packages/urllib3/exceptions.py b/libraries/requests/packages/urllib3/exceptions.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/exceptions.py rename to libraries/requests/packages/urllib3/exceptions.py diff --git a/resources/lib/libraries/requests/packages/urllib3/fields.py b/libraries/requests/packages/urllib3/fields.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/fields.py rename to libraries/requests/packages/urllib3/fields.py diff --git a/resources/lib/libraries/requests/packages/urllib3/filepost.py b/libraries/requests/packages/urllib3/filepost.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/filepost.py rename to libraries/requests/packages/urllib3/filepost.py diff --git a/resources/lib/libraries/requests/packages/urllib3/packages/__init__.py b/libraries/requests/packages/urllib3/packages/__init__.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/packages/__init__.py rename to libraries/requests/packages/urllib3/packages/__init__.py diff --git a/resources/lib/libraries/requests/packages/urllib3/packages/ordered_dict.py b/libraries/requests/packages/urllib3/packages/ordered_dict.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/packages/ordered_dict.py rename to libraries/requests/packages/urllib3/packages/ordered_dict.py diff --git a/resources/lib/libraries/requests/packages/urllib3/packages/six.py b/libraries/requests/packages/urllib3/packages/six.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/packages/six.py rename to libraries/requests/packages/urllib3/packages/six.py diff --git a/resources/lib/libraries/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py b/libraries/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py rename to libraries/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py diff --git a/resources/lib/libraries/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py b/libraries/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py rename to libraries/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py diff --git a/resources/lib/libraries/requests/packages/urllib3/poolmanager.py b/libraries/requests/packages/urllib3/poolmanager.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/poolmanager.py rename to libraries/requests/packages/urllib3/poolmanager.py diff --git a/resources/lib/libraries/requests/packages/urllib3/request.py b/libraries/requests/packages/urllib3/request.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/request.py rename to libraries/requests/packages/urllib3/request.py diff --git a/resources/lib/libraries/requests/packages/urllib3/response.py b/libraries/requests/packages/urllib3/response.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/response.py rename to libraries/requests/packages/urllib3/response.py diff --git a/resources/lib/libraries/requests/packages/urllib3/util/__init__.py b/libraries/requests/packages/urllib3/util/__init__.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/util/__init__.py rename to libraries/requests/packages/urllib3/util/__init__.py diff --git a/resources/lib/libraries/requests/packages/urllib3/util/connection.py b/libraries/requests/packages/urllib3/util/connection.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/util/connection.py rename to libraries/requests/packages/urllib3/util/connection.py diff --git a/resources/lib/libraries/requests/packages/urllib3/util/request.py b/libraries/requests/packages/urllib3/util/request.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/util/request.py rename to libraries/requests/packages/urllib3/util/request.py diff --git a/resources/lib/libraries/requests/packages/urllib3/util/response.py b/libraries/requests/packages/urllib3/util/response.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/util/response.py rename to libraries/requests/packages/urllib3/util/response.py diff --git a/resources/lib/libraries/requests/packages/urllib3/util/retry.py b/libraries/requests/packages/urllib3/util/retry.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/util/retry.py rename to libraries/requests/packages/urllib3/util/retry.py diff --git a/resources/lib/libraries/requests/packages/urllib3/util/ssl_.py b/libraries/requests/packages/urllib3/util/ssl_.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/util/ssl_.py rename to libraries/requests/packages/urllib3/util/ssl_.py diff --git a/resources/lib/libraries/requests/packages/urllib3/util/timeout.py b/libraries/requests/packages/urllib3/util/timeout.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/util/timeout.py rename to libraries/requests/packages/urllib3/util/timeout.py diff --git a/resources/lib/libraries/requests/packages/urllib3/util/url.py b/libraries/requests/packages/urllib3/util/url.py similarity index 100% rename from resources/lib/libraries/requests/packages/urllib3/util/url.py rename to libraries/requests/packages/urllib3/util/url.py diff --git a/resources/lib/libraries/requests/sessions.py b/libraries/requests/sessions.py similarity index 100% rename from resources/lib/libraries/requests/sessions.py rename to libraries/requests/sessions.py diff --git a/resources/lib/libraries/requests/status_codes.py b/libraries/requests/status_codes.py similarity index 100% rename from resources/lib/libraries/requests/status_codes.py rename to libraries/requests/status_codes.py diff --git a/resources/lib/libraries/requests/structures.py b/libraries/requests/structures.py similarity index 100% rename from resources/lib/libraries/requests/structures.py rename to libraries/requests/structures.py diff --git a/resources/lib/libraries/requests/utils.py b/libraries/requests/utils.py similarity index 100% rename from resources/lib/libraries/requests/utils.py rename to libraries/requests/utils.py diff --git a/libraries/six.py b/libraries/six.py new file mode 100644 index 00000000..6bf4fd38 --- /dev/null +++ b/libraries/six.py @@ -0,0 +1,891 @@ +# Copyright (c) 2010-2017 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Utilities for writing code that runs on Python 2 and 3""" + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson <benjamin@python.org>" +__version__ = "1.11.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + try: + raise tp, value, tb + finally: + tb = None +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + try: + if from_value is None: + raise value + raise value from from_value + finally: + value = None +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + try: + raise value from from_value + finally: + value = None +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) diff --git a/resources/language/resource.language.de_de/strings.po b/resources/language/resource.language.de_de/strings.po index 1225328f..34c9eb7c 100644 --- a/resources/language/resource.language.de_de/strings.po +++ b/resources/language/resource.language.de_de/strings.po @@ -4,14 +4,15 @@ # Addon Provider: angelblue05 # Translators: # Wolfgang Petri <horstepipe@googlemail.com>, 2018 -# Benni <semool@secure-mail.biz>, 2018 # sualfred <su4lfred@gmail.com>, 2018 +# Benni <semool@secure-mail.biz>, 2019 +# msgid "" msgstr "" "Project-Id-Version: Emby for Kodi\n" "POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" -"Last-Translator: sualfred <su4lfred@gmail.com>, 2018\n" +"PO-Revision-Date: 2018-09-07 20:10+0000\n" +"Last-Translator: Benni <semool@secure-mail.biz>, 2019\n" "Language-Team: German (https://www.transifex.com/emby-for-kodi/teams/91090/de/)\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" @@ -460,10 +461,9 @@ msgstr "" "Fortfahren?" msgctxt "#33023" -msgid "Emby for Kodi will work correctly until the database is reset." +msgid "Emby for Kodi will not work correctly until the database is reset." msgstr "" -"'Emby for Kodi' wird einwandfrei funktionieren, bis zum zurücksetzen der " -"Datenbank." +"'Emby for Kodi' wird nicht richtig funktionieren ohne einen Datenbank Reset." msgctxt "#33025" msgid "Completed in:" @@ -1010,3 +1010,73 @@ msgstr "Updates werden verarbeitet" msgctxt "#33179" msgid "Force transcode" msgstr "Transkodierung erzwingen" + +msgctxt "#33180" +msgid "Restart Emby for Kodi" +msgstr "'Emby for Kodi' neustarten" + +msgctxt "#33181" +msgid "Restarting to apply the patch" +msgstr "Neustart zum anwenden des Patches" + +msgctxt "#33182" +msgid "Play with cinema mode" +msgstr "Abspielen mit Kino Modus" + +msgctxt "#33183" +msgid "Enable the option to play with cinema mode" +msgstr "Aktiviere diese Option zum abspielen mit Kino Modus" + +msgctxt "#33184" +msgid "Remove libraries" +msgstr "Datenbanken entfernen" + +msgctxt "#33185" +msgid "Enable sync during playback (may cause some lag)" +msgstr "" +"Aktiviere Syncronisierung während der Wiedergabe (kann Aussetzer " +"verursachen)" + +msgctxt "#33186" +msgid "" +"The Kodi companion speeds up the start up sync. Other syncs are triggered by" +" server events." +msgstr "" +"'Kodi Companion' beschleunigt die Erst Syncronisierung. Weitere Sync's " +"werden vom Server ausgelöst." + +msgctxt "#33187" +msgid "Sync Rotten Tomatoes ratings" +msgstr "Rotten Tomatoes Bewertungen syncronisieren" + +msgctxt "#33188" +msgid "Would you like to sync Rotten Tomatoes ratings?" +msgstr "Sollen Rotten Tomatoes Bewertungen syncronisiert werden?" + +msgctxt "#33189" +msgid "" +"The database version detected is unsupported. Syncing may not work, proceed " +"anyway?" +msgstr "" +"Die erkannte Datenbankversion wird nicht unterstützt. Die Syncronisierung " +"wird nicht funktionieren. Trotzdem fortfahren?" + +msgctxt "#33190" +msgid "Enable Kodi database discovery" +msgstr "'Kodi Database discovery' aktivieren?" + +msgctxt "#33191" +msgid "Restart Emby for Kodi to apply this change?" +msgstr "'Emby for Kodi' Neustarten zum anwenden der Änderung?" + +msgctxt "#33192" +msgid "Restart Emby for Kodi" +msgstr "'Emby for Kodi' neustarten" + +msgctxt "#33193" +msgid "Restarting..." +msgstr "Neustart..." + +msgctxt "#33194" +msgid "Manage libraries" +msgstr "Datenbanken verwalten" diff --git a/resources/language/resource.language.en_gb/strings.po b/resources/language/resource.language.en_gb/strings.po index a5c088bd..897396a3 100644 --- a/resources/language/resource.language.en_gb/strings.po +++ b/resources/language/resource.language.en_gb/strings.po @@ -223,7 +223,7 @@ msgid "Verify connection" msgstr "" msgctxt "#30504" -msgid "Use altername device name" +msgid "Use alternate device name" msgstr "" msgctxt "#30506" @@ -451,7 +451,7 @@ msgid "Detected the database needs to be recreated for this version of Emby for msgstr "" msgctxt "#33023" -msgid "Emby for Kodi will work correctly until the database is reset." +msgid "Emby for Kodi will not work correctly until the database is reset." msgstr "" msgctxt "#33025" @@ -915,5 +915,37 @@ msgid "The Kodi companion speeds up the start up sync. Other syncs are triggered msgstr "" msgctxt "#33187" -msgid "Sync rotten tomatoes ratings" +msgid "Sync Rotten Tomatoes ratings" +msgstr "" + +msgctxt "#33188" +msgid "Would you like to sync Rotten Tomatoes ratings?" +msgstr "" + +msgctxt "#33191" +msgid "Restart Emby for Kodi to apply this change?" +msgstr "" + +msgctxt "#33192" +msgid "Restart Emby for Kodi" +msgstr "" + +msgctxt "#33193" +msgid "Restarting..." +msgstr "" + +msgctxt "#33194" +msgid "Manage libraries" +msgstr "" + +msgctxt "#33195" +msgid "Enable Emby for Kodi" +msgstr "" + +msgctxt "#33196" +msgid "Advanced options" +msgstr "" + +msgctxt "#33197" +msgid "A sync is already running, please wait until it completes and try again." msgstr "" diff --git a/resources/language/resource.language.fr_fr/strings.po b/resources/language/resource.language.fr_fr/strings.po index 45685f1c..356133c1 100644 --- a/resources/language/resource.language.fr_fr/strings.po +++ b/resources/language/resource.language.fr_fr/strings.po @@ -4,11 +4,12 @@ # Addon Provider: angelblue05 # Translators: # Jean Fontaine <balayop@yahoo.fr>, 2018 +# msgid "" msgstr "" "Project-Id-Version: Emby for Kodi\n" "POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE\n" -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"PO-Revision-Date: 2018-09-07 20:10+0000\n" "Last-Translator: Jean Fontaine <balayop@yahoo.fr>, 2018\n" "Language-Team: French (https://www.transifex.com/emby-for-kodi/teams/91090/fr/)\n" "MIME-Version: 1.0\n" @@ -459,10 +460,8 @@ msgstr "" "Continuer ?" msgctxt "#33023" -msgid "Emby for Kodi will work correctly until the database is reset." +msgid "Emby for Kodi will not work correctly until the database is reset." msgstr "" -"Emby pour Kodi fonctionnera normalement jusqu'à ce que la base de données " -"soit réinitialisée." msgctxt "#33025" msgid "Completed in:" @@ -1015,3 +1014,53 @@ msgstr "Traitement des mises à jour" msgctxt "#33179" msgid "Force transcode" msgstr "Forcer le transcodage" + +msgctxt "#33180" +msgid "Restart Emby for Kodi" +msgstr "" + +msgctxt "#33181" +msgid "Restarting to apply the patch" +msgstr "" + +msgctxt "#33182" +msgid "Play with cinema mode" +msgstr "" + +msgctxt "#33183" +msgid "Enable the option to play with cinema mode" +msgstr "" + +msgctxt "#33184" +msgid "Remove libraries" +msgstr "" + +msgctxt "#33185" +msgid "Enable sync during playback (may cause some lag)" +msgstr "" + +msgctxt "#33186" +msgid "" +"The Kodi companion speeds up the start up sync. Other syncs are triggered by" +" server events." +msgstr "" + +msgctxt "#33187" +msgid "Sync Rotten Tomatoes ratings" +msgstr "" + +msgctxt "#33188" +msgid "Would you like to sync Rotten Tomatoes ratings?" +msgstr "" + +msgctxt "#33191" +msgid "Restart Emby for Kodi to apply this change?" +msgstr "" + +msgctxt "#33192" +msgid "Restart Emby for Kodi" +msgstr "" + +msgctxt "#33193" +msgid "Restarting..." +msgstr "" diff --git a/resources/language/resource.language.it_it/strings.po b/resources/language/resource.language.it_it/strings.po index e628d856..e94cf631 100644 --- a/resources/language/resource.language.it_it/strings.po +++ b/resources/language/resource.language.it_it/strings.po @@ -3,14 +3,14 @@ # Addon id: plugin.video.emby # Addon Provider: angelblue05 # Translators: -# EffeF, 2018 +# EffeF, 2019 # msgid "" msgstr "" "Project-Id-Version: Emby for Kodi\n" "POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE\n" "PO-Revision-Date: 2018-09-07 20:10+0000\n" -"Last-Translator: EffeF, 2018\n" +"Last-Translator: EffeF, 2019\n" "Language-Team: Italian (https://www.transifex.com/emby-for-kodi/teams/91090/it/)\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" @@ -459,9 +459,9 @@ msgstr "" "per Kodi. Procedere?" msgctxt "#33023" -msgid "Emby for Kodi will work correctly until the database is reset." +msgid "Emby for Kodi will not work correctly until the database is reset." msgstr "" -"Emby for Kodi funzionerà correttamente fino al ripristino del database." +"Emby per Kodi non funzionerà correttamente fino al ripristino del database." msgctxt "#33025" msgid "Completed in:" @@ -1011,3 +1011,65 @@ msgstr "Riavvia Emby per Kodi" msgctxt "#33181" msgid "Restarting to apply the patch" msgstr "Riavvia per applicare la patch" + +msgctxt "#33182" +msgid "Play with cinema mode" +msgstr "Riproduci con la modalita cinema" + +msgctxt "#33183" +msgid "Enable the option to play with cinema mode" +msgstr "Abilita l'opzione per la riproduzione con la modalità cinema" + +msgctxt "#33184" +msgid "Remove libraries" +msgstr "Rimuovi librerie" + +msgctxt "#33185" +msgid "Enable sync during playback (may cause some lag)" +msgstr "" +"Abilita la sincronizzazione durante la riproduzione (potrebbe causare alcuni" +" lag)" + +msgctxt "#33186" +msgid "" +"The Kodi companion speeds up the start up sync. Other syncs are triggered by" +" server events." +msgstr "" +"Kodi companion accelera la sincronizzazione all'avvio. Altre " +"sincronizzazioni sono attivate da eventi del server." + +msgctxt "#33187" +msgid "Sync Rotten Tomatoes ratings" +msgstr "Sincronizza valutazioni di Rotten Tomatoes" + +msgctxt "#33188" +msgid "Would you like to sync Rotten Tomatoes ratings?" +msgstr "Vorresti sincronizzare le valutazioni di Rotten Tomatoes?" + +msgctxt "#33189" +msgid "" +"The database version detected is unsupported. Syncing may not work, proceed " +"anyway?" +msgstr "" +"La versione del database rilevata non è supportata. La sincronizzazione " +"potrebbe non funzionare, procedere comunque?" + +msgctxt "#33190" +msgid "Enable Kodi database discovery" +msgstr "Abilita il rilevamento del database Kodi" + +msgctxt "#33191" +msgid "Restart Emby for Kodi to apply this change?" +msgstr "Riavvia Emby per Kodi per applicare questo cambiamento?" + +msgctxt "#33192" +msgid "Restart Emby for Kodi" +msgstr "Riavvia Emby per Kodi" + +msgctxt "#33193" +msgid "Restarting..." +msgstr "Riavvio..." + +msgctxt "#33194" +msgid "Manage libraries" +msgstr "Gestisci le librerie" diff --git a/resources/language/resource.language.nl_nl/strings.po b/resources/language/resource.language.nl_nl/strings.po new file mode 100644 index 00000000..724b877c --- /dev/null +++ b/resources/language/resource.language.nl_nl/strings.po @@ -0,0 +1,1070 @@ +# Emby for Kodi language file +# Addon Name: Emby for Kodi +# Addon id: plugin.video.emby +# Addon Provider: angelblue05 +# Translators: +# 63ac71fcbd0581bb567b1f0d798c7970, 2019 +# +msgid "" +msgstr "" +"Project-Id-Version: Emby for Kodi\n" +"POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE\n" +"PO-Revision-Date: 2018-09-07 20:10+0000\n" +"Last-Translator: 63ac71fcbd0581bb567b1f0d798c7970, 2019\n" +"Language-Team: Dutch (https://www.transifex.com/emby-for-kodi/teams/91090/nl/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: nl\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +msgctxt "#29999" +msgid "Emby for Kodi" +msgstr "Emby voor Kodi" + +msgctxt "#30000" +msgid "Server address" +msgstr "Server adres" + +msgctxt "#30001" +msgid "Server name" +msgstr "Server naam" + +msgctxt "#30002" +msgid "Force HTTP playback" +msgstr "Afspelen via HTTP forceren" + +msgctxt "#30003" +msgid "Login method" +msgstr "Login methode" + +msgctxt "#30004" +msgid "Log level" +msgstr "Log niveau" + +msgctxt "#30016" +msgid "Device name" +msgstr "Apparaat naam" + +msgctxt "#30022" +msgid "Advanced" +msgstr "Geavanceerd" + +msgctxt "#30024" +msgid "Username" +msgstr "Gebruikersnaam" + +msgctxt "#30030" +msgid "Port number" +msgstr "Poort nummer" + +msgctxt "#30091" +msgid "Confirm file deletion" +msgstr "Bestand verwijderen bevestigen" + +msgctxt "#30114" +msgid "Offer delete after playback" +msgstr "Na afspelen verwijderen aanbieden" + +msgctxt "#30115" +msgid "For Episodes" +msgstr "Voor afleveringen" + +msgctxt "#30116" +msgid "For Movies" +msgstr "Voor films" + +msgctxt "#30157" +msgid "Enable enhanced artwork (i.e. cover art)" +msgstr "Geevanceerde artwork inschakelen (bv cover art)" + +msgctxt "#30160" +msgid "Video quality" +msgstr "Video qualiteit" + +msgctxt "#30170" +msgid "Recently Added TV Shows" +msgstr "Recent toegevoegde TV series" + +msgctxt "#30171" +msgid "In Progress TV Shows" +msgstr "Wordt uitgevoerd TV serien" + +msgctxt "#30174" +msgid "Recently Added Movies" +msgstr "Recent toegevoegde films" + +msgctxt "#30175" +msgid "Recently Added Episodes" +msgstr "Recent toegevoegde afleveringen" + +msgctxt "#30177" +msgid "In Progress Movies" +msgstr "Wordt uitgevoerd films" + +msgctxt "#30178" +msgid "In Progress Episodes" +msgstr "Word uitgevoerd afleveringen" + +msgctxt "#30179" +msgid "Next Episodes" +msgstr "Volgende afleveringen" + +msgctxt "#30180" +msgid "Favorite Movies" +msgstr "Favoriete films" + +msgctxt "#30181" +msgid "Favorite Shows" +msgstr "Favoriete series" + +msgctxt "#30182" +msgid "Favorite Episodes" +msgstr "Favoriete afleveringen" + +msgctxt "#30185" +msgid "Boxsets" +msgstr "Boxsets" + +msgctxt "#30189" +msgid "Unwatched Movies" +msgstr "Niet bekeken films" + +msgctxt "#30229" +msgid "Random Items" +msgstr "Willekeurige items" + +msgctxt "#30230" +msgid "Recommended Items" +msgstr "Aanbevolen items" + +msgctxt "#30235" +msgid "Interface" +msgstr "Interface" + +msgctxt "#30239" +msgid "Reset local Kodi database" +msgstr "Reset lokale Kodi databank" + +msgctxt "#30249" +msgid "Enable welcome message" +msgstr "Welkomstbericht inschakelen" + +msgctxt "#30251" +msgid "Recently added Home Videos" +msgstr "Recent toegevoegde Video's" + +msgctxt "#30252" +msgid "Recently added Photos" +msgstr "Recent toegevoegde foto's" + +msgctxt "#30253" +msgid "Favourite Home Videos" +msgstr "Favoriete video's" + +msgctxt "#30254" +msgid "Favourite Photos" +msgstr "Favoriete foto's" + +msgctxt "#30255" +msgid "Favourite Albums" +msgstr "Favoriete albums" + +msgctxt "#30256" +msgid "Recently added Music videos" +msgstr "Recent toegevoegde muziek video's" + +msgctxt "#30257" +msgid "In progress Music videos" +msgstr "Wordt uitgevoerd muziek video's" + +msgctxt "#30258" +msgid "Unwatched Music videos" +msgstr "Niet bekeken muziek video's" + +msgctxt "#30302" +msgid "Movies" +msgstr "Films" + +msgctxt "#30305" +msgid "TV Shows" +msgstr "TV series" + +msgctxt "#30401" +msgid "Emby options" +msgstr "Emby opties" + +msgctxt "#30402" +msgid "Emby transcode" +msgstr "Emby transcode" + +msgctxt "#30405" +msgid "Add to favorites" +msgstr "Aan favorieten toevoegen" + +msgctxt "#30406" +msgid "Remove from favorites" +msgstr "Verwijderen uit favorieten" + +msgctxt "#30408" +msgid "Settings" +msgstr "Instellingen" + +msgctxt "#30409" +msgid "Delete from Emby" +msgstr "Uit Emby verwijderen" + +msgctxt "#30410" +msgid "Refresh this item" +msgstr "Dit item vernieuwen" + +msgctxt "#30412" +msgid "Transcode" +msgstr "Transcoderen" + +msgctxt "#30500" +msgid "Verify connection" +msgstr "Verbinding controleren" + +msgctxt "#30504" +msgid "Use alternate device name" +msgstr "Alternatieve apparaat naam gebruiken" + +msgctxt "#30506" +msgid "Sync" +msgstr "Synchroniseren" + +msgctxt "#30507" +msgid "Enable notification if update count is greater than" +msgstr "Melding inschakelen als er meer updates zijn dan" + +msgctxt "#30508" +msgid "Sync empty shows" +msgstr "Synchroniseer lege shows" + +msgctxt "#30509" +msgid "Enable music library" +msgstr "Muziek bibliotheek inschakelen" + +msgctxt "#30511" +msgid "Playback mode" +msgstr "Afspeel modus" + +msgctxt "#30512" +msgid "Enable artwork caching" +msgstr "Artwork caching inschakelen" + +msgctxt "#30515" +msgid "Paging - max items requested (default: 15)" +msgstr "Paging - max items aangevraagd (standaard: 15)" + +msgctxt "#30516" +msgid "Playback" +msgstr "Afspelen" + +msgctxt "#30517" +msgid "Network credentials" +msgstr "Netwerk login gegevens" + +msgctxt "#30518" +msgid "Enable cinema mode" +msgstr "Cinema modus inschakelen" + +msgctxt "#30519" +msgid "Ask to play trailers" +msgstr "Vragen om trailers af te spelen" + +msgctxt "#30520" +msgid "Skip the delete confirmation (use at your own risk)" +msgstr "Bevestigen verwijderen overslaan (gebruik op eigen risico)" + +msgctxt "#30521" +msgid "Jump back on resume (in seconds)" +msgstr "Bij hervatten kort terug spoelen (in seconden)" + +msgctxt "#30522" +msgid "Transcode H265/HEVC" +msgstr "Transcoderen H265/HEVC" + +msgctxt "#30527" +msgid "Ignore specials in next episodes" +msgstr "Specials in volgende afleveringen negeren" + +msgctxt "#30528" +msgid "Permanent users" +msgstr "Permanente gebruikers" + +msgctxt "#30529" +msgid "Startup delay (in seconds)" +msgstr "Start vertraging (in seconden)" + +msgctxt "#30530" +msgid "Enable server restart message" +msgstr "Server herstart melding inschakelen" + +msgctxt "#30531" +msgid "Enable new content" +msgstr "Nieuwe inhoud inschakelen" + +msgctxt "#30532" +msgid "Duration of the video library pop up" +msgstr "Video looptijd pop-up" + +msgctxt "#30533" +msgid "Duration of the music library pop up" +msgstr "Muziek looptijd pop-up" + +msgctxt "#30534" +msgid "Notifications (in seconds)" +msgstr "Meldingen (in seconden)" + +msgctxt "#30535" +msgid "Generate a new device Id" +msgstr "Maak een nieuwe apparaat id aan" + +msgctxt "#30536" +msgid "Allow the screensaver during syncs" +msgstr "Schermbeveiliging tijdens synchronisatie toestaan" + +msgctxt "#30537" +msgid "Transcode Hi10P" +msgstr "Transcoderen Hi10P" + +msgctxt "#30539" +msgid "Login" +msgstr "Login" + +msgctxt "#30540" +msgid "Manual login" +msgstr "Handmatige login" + +msgctxt "#30543" +msgid "Username or email" +msgstr "Gebruikersnaam of e-mail" + +msgctxt "#30545" +msgid "Enable server offline" +msgstr "Server offline inschakelen" + +msgctxt "#30547" +msgid "Display message" +msgstr "Melding tonen" + +msgctxt "#30600" +msgid "Sign in with Emby Connect" +msgstr "Aanmelden met Emby Connect" + +msgctxt "#30602" +msgid "Password" +msgstr "Wachtwoord" + +msgctxt "#30605" +msgid "Sign in" +msgstr "Aanmelden" + +msgctxt "#30606" +msgid "Cancel" +msgstr "Annuleren" + +msgctxt "#30607" +msgid "Select main server" +msgstr "Hoofd server selecteren" + +msgctxt "#30608" +msgid "Username or password cannot be empty" +msgstr "Gebruikersnaam of wachtwoord kan niet leeg zijn" + +msgctxt "#30609" +msgid "Unable to connect to the selected server" +msgstr "Niet in staat om met de geselecteerde server verbinding te maken" + +msgctxt "#30610" +msgid "Connect to" +msgstr "Verbinden met" + +msgctxt "#30611" +msgid "Manually add server" +msgstr "Handmatig server toevoegen" + +msgctxt "#30612" +msgid "Please sign in" +msgstr "Aanmelden" + +msgctxt "#30613" +msgid "Change Emby Connect user" +msgstr "Emby Connect gebruiker wijzigen" + +msgctxt "#30614" +msgid "Connect to server" +msgstr "Verbinden met server" + +msgctxt "#30615" +msgid "Host" +msgstr "Host" + +msgctxt "#30616" +msgid "Connect" +msgstr "Verbinden" + +msgctxt "#30617" +msgid "Server or port cannot be empty" +msgstr "Server of poort kan niet leeg zijn" + +msgctxt "#30618" +msgid "Change Emby Connect user" +msgstr "Emby Connect gebruiker wijzigen" + +msgctxt "#33000" +msgid "Welcome" +msgstr "Welkom" + +msgctxt "#33006" +msgid "Server is restarting" +msgstr "Server start opnieuw op" + +msgctxt "#33009" +msgid "Invalid username or password" +msgstr "Ongeldige gebruikersnaam of wachtwoord" + +msgctxt "#33013" +msgid "Choose the audio stream" +msgstr "Kies audio stream" + +msgctxt "#33014" +msgid "Choose the subtitles stream" +msgstr "Kies ondertitel stream" + +msgctxt "#33015" +msgid "Delete file from Emby?" +msgstr "Bestand uit Emby verwijderen?" + +msgctxt "#33016" +msgid "Play trailers?" +msgstr "Trailers afspelen?" + +msgctxt "#33018" +msgid "Gathering boxsets" +msgstr "Boxsets verzamelen" + +msgctxt "#33021" +msgid "Gathering:" +msgstr "Verzamelen:" + +msgctxt "#33022" +msgid "" +"Detected the database needs to be recreated for this version of Emby for " +"Kodi. Proceed?" +msgstr "" +"De databank moet opnieuw gemaakt worden voor deze versie van Emby for Kodi. " +"Verder gaan?" + +msgctxt "#33023" +msgid "Emby for Kodi will not work correctly until the database is reset." +msgstr "Emby for Kodi werkt niet correct voor dat de databank gereset is." + +msgctxt "#33025" +msgid "Completed in:" +msgstr "Voltooid in:" + +msgctxt "#33033" +msgid "A new device Id has been generated. Kodi will now restart." +msgstr "Een nieuw apparaat id is aangemaakt. Kodi start nu opnieuw." + +msgctxt "#33035" +msgid "" +"Caution! If you choose Native mode, certain Emby features will be missing, " +"such as: Emby cinema mode, direct stream/transcode options and parental " +"access schedule." +msgstr "" +"Opgepast! Bij het kiezen van Native mode, zullen bepaalde Emby mogelijkheden" +" ontbreken zoals: Emby Cinema modus, direct stream/transcode opties and " +"ouderlijke toegang planning." + +msgctxt "#33036" +msgid "Add-on (default)" +msgstr "Add-on (standaard)" + +msgctxt "#33037" +msgid "Native (direct paths)" +msgstr "Native (direct paths)" + +msgctxt "#33039" +msgid "Enable music library?" +msgstr "Muziek bibliotheek inschakelen?" + +msgctxt "#33047" +msgid "Kodi can't locate file:" +msgstr "Kodi kan volgende bestand niet vinden:" + +msgctxt "#33048" +msgid "" +"You may need to verify your network credentials in the add-on settings or " +"use the Emby path substitution to format your path correctly (Emby dashboard" +" > library). Stop syncing?" +msgstr "" +"Misschien is het nodig uw netwerk gegevens na te kijken in de add-on " +"instellingen of gebruik de Emby path vervanging om uw folder correct te " +"formateren (Emby dashboard > bibliotheek). Synchroniseren stoppen?" + +msgctxt "#33049" +msgid "New" +msgstr "Nieuw" + +msgctxt "#33054" +msgid "Add user to session" +msgstr "Gebruiker aan sessie toevoegen" + +msgctxt "#33058" +msgid "Perform local database reset" +msgstr "Lokale databank reset doorvoeren" + +msgctxt "#33060" +msgid "Sync theme media" +msgstr "Synchroniseer thema media" + +msgctxt "#33061" +msgid "Add/Remove user from the session" +msgstr "Gebruiker aan sessie toevoegen of verwijderen" + +msgctxt "#33062" +msgid "Add user" +msgstr "Gebruiker toevoegen" + +msgctxt "#33063" +msgid "Remove user" +msgstr "Gebruiker verwijderen" + +msgctxt "#33064" +msgid "Remove user from the session" +msgstr "Gebruiker uit sessie verwijderen" + +msgctxt "#33074" +msgid "Are you sure you want to reset your local Kodi database?" +msgstr "Weet u zeker dat u de lokale Kodi databank resetten wilt?" + +msgctxt "#33086" +msgid "Remove all cached artwork?" +msgstr "Cached artwork verwijderen?" + +msgctxt "#33087" +msgid "Reset all Emby add-on settings?" +msgstr "Alle Emby add-on instellingen resetten?" + +msgctxt "#33088" +msgid "" +"Database reset has completed, Kodi will now restart to apply the changes." +msgstr "" +"Databank reset compleet, Kodi start opnieuw op om de veranderingen toe te " +"passen." + +msgctxt "#33089" +msgid "Enter folder name for backup" +msgstr "Vul de mapnaam in voor de backup" + +msgctxt "#33090" +msgid "Replace existing backup?" +msgstr "Bestaande backup vervangen?" + +msgctxt "#33091" +msgid "Created backup at:" +msgstr "Backup aangemaakt op:" + +msgctxt "#33092" +msgid "Create a backup" +msgstr "Backup aanmaken" + +msgctxt "#33093" +msgid "Backup folder" +msgstr "Backup folder" + +msgctxt "#33097" +msgid "" +"Important, cleanonupdate was removed in your advanced settings to prevent " +"conflict with Emby for Kodi. Kodi will restart now." +msgstr "" +"Belangrijd, cleanonupdate is verwijderen uit de geavanceerde instellingen om" +" een conflict met Emby for Kodi te voorkomen. Kodi start nu opnieuw op." + +msgctxt "#33098" +msgid "Refresh boxsets" +msgstr "Boxsets vernieuwen" + +msgctxt "#33099" +msgid "" +"Install the server plugin Kodi companion to automatically apply emby library" +" updates at startup. This setting can be found in the add-on settings > sync" +" options > Enable Kodi Companion." +msgstr "" +"Installeer de server plugin Kodi companion om Emby bibliotheek updates " +"automatisch te laten lopen bij het opstarten. Deze instellingen kan gevonden" +" worden in de add-on instellingen > synchroniseer opties > Kodi Companion " +"inschakelen." + +msgctxt "#33100" +msgid "Would you like to sync empty shows?" +msgstr "Lege shows synchroniseren?" + +msgctxt "#33101" +msgid "" +"Since you are using native playback mode with music enabled, do you want to " +"import music rating from files?" +msgstr "" +"Aangezien u native afspeel modus gebruikt met muziek ingeschakeld, wilt u de" +" muziek rating van bestanden importeren?" + +msgctxt "#33102" +msgid "Resume the previous sync?" +msgstr "De vorige synchronisatoe hervatten?" + +msgctxt "#33103" +msgid "" +"Enable the webserver service in the Kodi settings to allow artwork caching." +msgstr "" +"Schakel de webserver service in in de Kodi instellingen om artwork caching " +"toe te staan." + +msgctxt "#33104" +msgid "Find more info in the github wiki/Create-and-restore-from-backup." +msgstr "" +"Vind meer informatie in de github wiki/Create-and-restore-from-backup." + +msgctxt "#33105" +msgid "Enable the context menu" +msgstr "Context menu inschakelen" + +msgctxt "#33106" +msgid "Enable the option to transcode" +msgstr "Schakel de optie om te transkoderen in" + +msgctxt "#33107" +msgid "" +"Users added to the session (no space between users). (eg username,username2)" +msgstr "" +"Voeg gebruikers toe aan de sessie (geen spatie tussen gebruikers). (bv " +"gebruikersnaam,gebruikersnaam2)" + +msgctxt "#33108" +msgid "Notifications are delayed during video playback (except live tv)." +msgstr "" +"Meldingen worden vertraagd tijdens het afspelen van video's. (met " +"uitzondering van live tv)" + +msgctxt "#33109" +msgid "Plugin" +msgstr "Plugin" + +msgctxt "#33110" +msgid "Restart Kodi to take effect." +msgstr "Start Kodi opnieuw op." + +msgctxt "#33111" +msgid "Reset the local database to apply the playback mode change." +msgstr "" +"Reset de lokale databank om de wissel van afspeel modus toe te passen." + +msgctxt "#33112" +msgid "Applies to Native and Add-on playback mode" +msgstr "Is van toepassing op Native en add-on afspeel modus" + +msgctxt "#33113" +msgid "Applies to Add-on playback mode only" +msgstr "Wordt alleen toegepast op Add-on afspeel modus" + +msgctxt "#33114" +msgid "Enable external subtitles" +msgstr "Schakel externe ondertitels in" + +msgctxt "#33115" +msgid "Adjust for remote connection" +msgstr "Pas aan voor verbindingen op afstand" + +msgctxt "#33116" +msgid "Compress artwork (reduces quality)" +msgstr "Artwork comprimeren (vermindert kwaliteit)" + +msgctxt "#33117" +msgid "" +"Enable artwork caching? If not, Kodi will still cache your artwork at a " +"slower pace." +msgstr "" +"Artwork caching inschakelen? Uitgeschakeld cached Kodi uw artwork langzamer." + +msgctxt "#33118" +msgid "" +"You've change the playback mode. Kodi needs to be reset to apply the change," +" would you like to do this now?" +msgstr "" +"u heeft de afspeel modus gewijzigd. Kodi opnieuw opstarten om de verandering" +" toe te passen. Wilt u nu opnieuw opstarten?" + +msgctxt "#33119" +msgid "" +"Something went wrong during the sync. You'll be able to restore progress " +"when restarting Kodi. If the problem persists, please report on the Emby for" +" Kodi forums, with your Kodi log." +msgstr "" +"Er is iets mis gegaan tijdens de synchronisatie. U kunt de voortgang " +"herstellen als u Kodi opnieuw opstart. Als het probleem blijft, meld dit " +"probleem dan op de Emby for Kodi forums, met uw Kodi log." + +msgctxt "#33120" +msgid "Select the libraries to add" +msgstr "Selecteeer de bibliotheken die u wilt toevoegen" + +msgctxt "#33121" +msgid "All" +msgstr "Alle" + +msgctxt "#33122" +msgid "Restart Kodi to resume where you left off." +msgstr "Start Kodi opnieuw op om verder te gaan waar u gebleven was." + +msgctxt "#33123" +msgid "Sync library to Kodi" +msgstr "Synchroniseer bibliotheek naar Kodi" + +msgctxt "#33124" +msgid "Include people (slow)" +msgstr "Mensen meerekenen (langzaam)" + +msgctxt "#33125" +msgid "" +"Choose the Emby views to sync to Kodi. You can optionally sync libraries at " +"a later time." +msgstr "" +"Kies de Emby zichten om naar Kodi te synchroniseren. U kunt later de " +"bibliotheken optioneel synchroniseren." + +msgctxt "#33126" +msgid "Sync later" +msgstr "Later synchroniseren" + +msgctxt "#33127" +msgid "Proceed" +msgstr "Doorgaan" + +msgctxt "#33128" +msgid "" +"Failed to retrieve latest content updates. No content updates will be " +"applied until Kodi is restarted. If this issue persists, please report on " +"the Emby for Kodi forums, with your Kodi log." +msgstr "" +"Het is mislukt om de laatste inhoud updates te verkrijgen. Er worden geen " +"inhoud updates toe gepast totdat Kodi opnieuw is opgestart. Als deze fout " +"blijft komen, meld u dan op de Emby for Kodi forums met uw Kodi log." + +msgctxt "#33129" +msgid "You can sync libraries by launching the Emby add-on > Add libraries." +msgstr "" +"U kunt bibliotheken synchroniseren door de Emby add-on > bibliotheken " +"toevoegen te starten." + +msgctxt "#33130" +msgid "Select the source" +msgstr "Bron selecteren" + +msgctxt "#33131" +msgid "Refreshing boxsets" +msgstr "Boxsets vernieuwen" + +msgctxt "#33132" +msgid "Repair library" +msgstr "Bibliotheek repareren" + +msgctxt "#33133" +msgid "Remove library from Kodi" +msgstr "Bibliotheek uit Kodi verwijderen" + +msgctxt "#33134" +msgid "Add server" +msgstr "Server toevoegen" + +msgctxt "#33135" +msgid "Kodi will now restart to apply a small patch for your Kodi version." +msgstr "" +"Kodi start opnieuw op om een kleine update toe te passen voor uw Kodi " +"versie." + +msgctxt "#33136" +msgid "Update library" +msgstr "Bibliotheek updaten" + +msgctxt "#33137" +msgid "Enable Kodi companion" +msgstr "Kodi companion inschakelen" + +msgctxt "#33138" +msgid "" +"You can update your library manually rather than rely on the server plugin " +"Kodi companion. Launch the add-on and update libraries (or per library). To " +"remove content, you'll need to repair the library." +msgstr "" +"U kunt uw bibliotheek manueel updaten in plaats van op de server plugin Kodi" +" companion te vertrouwen. Start de add-on en update bibliotheken (of per " +"bibliotheek). Om inhoud te verwijderen moet u de blibliotheek repareren." + +msgctxt "#33139" +msgid "Update libraries" +msgstr "Bibliotheken updaten" + +msgctxt "#33140" +msgid "Repair libraries" +msgstr "Bibliotheken repareren" + +msgctxt "#33141" +msgid "Remove server" +msgstr "Server verwijderen" + +msgctxt "#33142" +msgid "Something went wrong. Try again later." +msgstr "Er is iets fout gegaan. Probeer het later nog eens." + +msgctxt "#33143" +msgid "Enable the option to delete" +msgstr "Schakel de optie om te verwijderen in" + +msgctxt "#33144" +msgid "Removing library" +msgstr "Bibliotheek wordt verwijderen" + +msgctxt "#33145" +msgid "" +"Please make sure your Samba (smb) share of your Emby server is accessible to" +" your Kodi installation and that you have path substitution configured on " +"your server. Otherwise, Kodi may fail to locate your files." +msgstr "" +"Controleer of uw Samba (smb) gedeelde map van uw Emby server toegankelijk is" +" voor uw Kodi installatie en dat path substitution op uw server is " +"ingesteld. anders kan voorkomen dat het Kodi mislukt uw bestanden te vinden." + +msgctxt "#33146" +msgid "Unable to connect to Emby." +msgstr "Niet in staat om met Emby te verbinden." + +msgctxt "#33147" +msgid "Your access to Emby is restricted." +msgstr "Uw toegang tot Emby is beperkt." + +msgctxt "#33148" +msgid "Your access to this server is restricted." +msgstr "Uw toegang tot deze server is beperkt." + +msgctxt "#33149" +msgid "Unable to connect to this server." +msgstr "Niet in staat om met deze server te verbinden." + +msgctxt "#33150" +msgid "Update server information" +msgstr "Server information updaten" + +msgctxt "#33151" +msgid "" +"Reconnect to the same server that was previously loaded. If you want to use " +"a different server, reset your local database, including your user " +"information." +msgstr "" +"Verbind opnieuw met de server die voorheen geladen was. als u een andere " +"server gebruiken wilt, reset dan de lokale databank inclusief uw gebruikers " +"informatie." + +msgctxt "#33152" +msgid "Unable to locate TV Tunes in Kodi." +msgstr "Niet in staat om TV Tunes in Kodi te vinden." + +msgctxt "#33153" +msgid "Your Emby theme media has been synced to Kodi" +msgstr "Uw Emby thema media is gesynchroniseerd met Kodi" + +msgctxt "#33154" +msgid "Add libraries" +msgstr "Bibliotheken toevoegen" + +msgctxt "#33155" +msgid "" +"The currently applied patch for Emby for Kodi is corrupted! Please post to " +"the Emby for Kodi forums if this issue persists. This will need to be fixed " +"as soon as possible." +msgstr "" +"De huidige toegepaste patch voor Emby is beschadigd! Maak alstublieft een " +"melding op de Emby voor Kodi forums als het probleem blijft. Dit moet yo " +"snel als mogelijk verholpen worden." + +msgctxt "#33156" +msgid "A patch has been applied!" +msgstr "Een patch is toegepast!" + +msgctxt "#33157" +msgid "Audio only" +msgstr "Alleen audio" + +msgctxt "#33158" +msgid "Subtitles only" +msgstr "Alleen ondertitels" + +msgctxt "#33159" +msgid "Enable audio/subtitles selection" +msgstr "Schaken audio/ondertitel keuze in" + +msgctxt "#33160" +msgid "To avoid errors, please update Emby for Kodi to version: " +msgstr "Om problemen voorkomen, update Emby for Kodi naar versie:" + +msgctxt "#33161" +msgid "Check for updates" +msgstr "Naar updates zoeken" + +msgctxt "#33162" +msgid "Reset the music library?" +msgstr "Muziek bibliotheek resetten?" + +msgctxt "#33163" +msgid "Support this project" +msgstr "Dit project ondersteunen" + +msgctxt "#33164" +msgid "Mask sensitive information in log (does not apply to kodi logging)" +msgstr "" +"Gevoelige informatie in log verbergen (wordt niet toegepast op Kodi log)" + +msgctxt "#33165" +msgid "Failed to create backup" +msgstr "Het maken van een backup is mislukt" + +msgctxt "#33166" +msgid "(dynamic)" +msgstr "(dynamisch)" + +msgctxt "#33167" +msgid "Recently added" +msgstr "Recent toegevoegd" + +msgctxt "#33168" +msgid "Favourites" +msgstr "Favorieten" + +msgctxt "#33169" +msgid "In Progress" +msgstr "In bewerking" + +msgctxt "#33170" +msgid "Unwatched" +msgstr "Niet bekeken" + +msgctxt "#33171" +msgid "By first letter" +msgstr "Op eerste letter" + +msgctxt "#33172" +msgid "" +"You have {number} updates pending. This may take a little while before " +"seeing new content. It might be faster to update your libraries via " +"launching the Emby add-on > update libraries. Proceed anyway?" +msgstr "" +"U heeft {number} wachtende updates. Het kan misschien even during voordat u " +"nieuwe inhoud ziet. Misschien gaat het updaten van uw bibliotheken sneller " +"door de Emby add-on bibliotheken updaten te starten. Toch doorgaan?" + +msgctxt "#33173" +msgid "Forget about the previous sync? This is not recommended." +msgstr "Vorige synchronisatie vergeten? Dit is niet aanbevolen." + +msgctxt "#33174" +msgid "Paging - download threads (default: 3)" +msgstr "Paging - download threads (standaard: 3)" + +msgctxt "#33175" +msgid "" +"Paging tip: Each download thread requests your max items value from Emby at " +"the same time." +msgstr "" +"Paging tip: Elke download thread vraagt naar uw maximum items waarde by Emby" +" op hetzelfde moment." + +msgctxt "#33176" +msgid "Update or repair your libraries to apply the changes below." +msgstr "" +"Update of herstel uw bibliotheken om onderstaande veranderingen toe te " +"passen." + +msgctxt "#33177" +msgid "Display the progress bar if update count greater than" +msgstr "Toon de voortgangsbalk als het aantal updates groter is dan" + +msgctxt "#33178" +msgid "Processing updates" +msgstr "Updates verwerken" + +msgctxt "#33179" +msgid "Force transcode" +msgstr "Transkodering dwingen" + +msgctxt "#33180" +msgid "Restart Emby for Kodi" +msgstr "Emby for Kodi opnieuw starten" + +msgctxt "#33181" +msgid "Restarting to apply the patch" +msgstr "Er wordt opnieuw opgestart om de patch toe te passen" + +msgctxt "#33182" +msgid "Play with cinema mode" +msgstr "Afspelen met Cinema modus" + +msgctxt "#33183" +msgid "Enable the option to play with cinema mode" +msgstr "Cinema modus optie inschakelen" + +msgctxt "#33184" +msgid "Remove libraries" +msgstr "Bibliotheken verwijderen" + +msgctxt "#33185" +msgid "Enable sync during playback (may cause some lag)" +msgstr "Synchronisatie tijdens afspelen toestaan (kan vertraging veroorzaken)" + +msgctxt "#33186" +msgid "" +"The Kodi companion speeds up the start up sync. Other syncs are triggered by" +" server events." +msgstr "" +"De Kodi companion maakt het opstarten van de synchronisatie sneller. Andere " +"synchronisaties worden aangeroepen door server voorvallen." + +msgctxt "#33187" +msgid "Sync Rotten Tomatoes ratings" +msgstr "Rotten Tomatoes ratings synchroniseren" + +msgctxt "#33188" +msgid "Would you like to sync Rotten Tomatoes ratings?" +msgstr "Rotten Tomatoes ratings synchroniseren?" + +msgctxt "#33191" +msgid "Restart Emby for Kodi to apply this change?" +msgstr "Emby for Kodi opnieuw opstarten om de wijziging toe te passen?" + +msgctxt "#33192" +msgid "Restart Emby for Kodi" +msgstr "Emby for Kodi opnieuw starten" + +msgctxt "#33193" +msgid "Restarting..." +msgstr "Opnieuw opstarten..." + +msgctxt "#33194" +msgid "Manage libraries" +msgstr "Bibliotheken beheren" + +msgctxt "#33195" +msgid "Enable Emby for Kodi" +msgstr "Emby for Kodi inschakelen" + +msgctxt "#33196" +msgid "Advanced options" +msgstr "Geavanceerde opties" + +msgctxt "#33197" +msgid "" +"A sync is already running, please wait until it completes and try again." +msgstr "" +"Synchronisatie is al in voortgang, wacht tot deze compleet is en probeer het" +" nog eens." diff --git a/resources/language/resource.language.pl_pl/strings.po b/resources/language/resource.language.pl_pl/strings.po new file mode 100644 index 00000000..7877ad59 --- /dev/null +++ b/resources/language/resource.language.pl_pl/strings.po @@ -0,0 +1,1058 @@ +# Emby for Kodi language file +# Addon Name: Emby for Kodi +# Addon id: plugin.video.emby +# Addon Provider: angelblue05 +# Translators: +# Michał Sawicz <michal@sawicz.net>, 2019 +# +msgid "" +msgstr "" +"Project-Id-Version: Emby for Kodi\n" +"POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE\n" +"PO-Revision-Date: 2018-09-07 20:10+0000\n" +"Last-Translator: Michał Sawicz <michal@sawicz.net>, 2019\n" +"Language-Team: Polish (https://www.transifex.com/emby-for-kodi/teams/91090/pl/)\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Language: pl\n" +"Plural-Forms: nplurals=4; plural=(n==1 ? 0 : (n%10>=2 && n%10<=4) && (n%100<12 || n%100>14) ? 1 : n!=1 && (n%10>=0 && n%10<=1) || (n%10>=5 && n%10<=9) || (n%100>=12 && n%100<=14) ? 2 : 3);\n" + +msgctxt "#29999" +msgid "Emby for Kodi" +msgstr "Emby dla Kodi" + +msgctxt "#30000" +msgid "Server address" +msgstr "Adres serwera" + +msgctxt "#30001" +msgid "Server name" +msgstr "Nazwa serwera" + +msgctxt "#30002" +msgid "Force HTTP playback" +msgstr "Wymuś odtwarzanie HTTP" + +msgctxt "#30003" +msgid "Login method" +msgstr "Metoda logowania" + +msgctxt "#30004" +msgid "Log level" +msgstr "Poziom dziennika zdarzeń" + +msgctxt "#30016" +msgid "Device name" +msgstr "Nazwa urządzenia" + +msgctxt "#30022" +msgid "Advanced" +msgstr "Zaawansowane" + +msgctxt "#30024" +msgid "Username" +msgstr "Nazwa użytkownika" + +msgctxt "#30030" +msgid "Port number" +msgstr "Port" + +msgctxt "#30091" +msgid "Confirm file deletion" +msgstr "Potwierdzanie usuwania plików" + +msgctxt "#30114" +msgid "Offer delete after playback" +msgstr "Zapytaj o usunięcie po odtwarzaniu" + +msgctxt "#30115" +msgid "For Episodes" +msgstr "Dla odcinków" + +msgctxt "#30116" +msgid "For Movies" +msgstr "Dla filmów" + +msgctxt "#30157" +msgid "Enable enhanced artwork (i.e. cover art)" +msgstr "Włącz rozszerzone grafiki (np. okładka)" + +msgctxt "#30160" +msgid "Video quality" +msgstr "Jakość wideo" + +msgctxt "#30170" +msgid "Recently Added TV Shows" +msgstr "Niedawno dodane seriale" + +msgctxt "#30171" +msgid "In Progress TV Shows" +msgstr "Seriale w trakcie oglądania" + +msgctxt "#30174" +msgid "Recently Added Movies" +msgstr "Niedawno dodane filmy" + +msgctxt "#30175" +msgid "Recently Added Episodes" +msgstr "Niedawno dodane odcinki" + +msgctxt "#30177" +msgid "In Progress Movies" +msgstr "Filmy w trakcie ogladania" + +msgctxt "#30178" +msgid "In Progress Episodes" +msgstr "Odcinki w trakcie oglądania" + +msgctxt "#30179" +msgid "Next Episodes" +msgstr "Następne odcinki" + +msgctxt "#30180" +msgid "Favorite Movies" +msgstr "Ulubione filmy" + +msgctxt "#30181" +msgid "Favorite Shows" +msgstr "Ulubione seriale" + +msgctxt "#30182" +msgid "Favorite Episodes" +msgstr "Ulubione odcinki" + +msgctxt "#30185" +msgid "Boxsets" +msgstr "Kolekcje" + +msgctxt "#30189" +msgid "Unwatched Movies" +msgstr "Nieobejrzane filmy" + +msgctxt "#30229" +msgid "Random Items" +msgstr "Losowe pozycje" + +msgctxt "#30230" +msgid "Recommended Items" +msgstr "Pozycje rekomendowane" + +msgctxt "#30235" +msgid "Interface" +msgstr "Interfejs" + +msgctxt "#30239" +msgid "Reset local Kodi database" +msgstr "Wyczyść lokalną bazę Kodi" + +msgctxt "#30249" +msgid "Enable welcome message" +msgstr "Włącz wiadomość powitalną" + +msgctxt "#30251" +msgid "Recently added Home Videos" +msgstr "Niedawno dodane wideo" + +msgctxt "#30252" +msgid "Recently added Photos" +msgstr "Niedawno dodane zdjęcia" + +msgctxt "#30253" +msgid "Favourite Home Videos" +msgstr "Ulubione wideo" + +msgctxt "#30254" +msgid "Favourite Photos" +msgstr "Ulubione zdjęcia" + +msgctxt "#30255" +msgid "Favourite Albums" +msgstr "Ulubione albumy" + +msgctxt "#30256" +msgid "Recently added Music videos" +msgstr "Niedawno dodane teledyski" + +msgctxt "#30257" +msgid "In progress Music videos" +msgstr "Teledyski w trakcie odtwarzania" + +msgctxt "#30258" +msgid "Unwatched Music videos" +msgstr "Nieobejrzane teledyski" + +msgctxt "#30302" +msgid "Movies" +msgstr "Filmy" + +msgctxt "#30305" +msgid "TV Shows" +msgstr "Seriale" + +msgctxt "#30401" +msgid "Emby options" +msgstr "Opcje Emby" + +msgctxt "#30402" +msgid "Emby transcode" +msgstr "Transkodowanie Emby" + +msgctxt "#30405" +msgid "Add to favorites" +msgstr "Dodaj do ulubionych" + +msgctxt "#30406" +msgid "Remove from favorites" +msgstr "Usuń z ulubionych" + +msgctxt "#30408" +msgid "Settings" +msgstr "Ustawienia" + +msgctxt "#30409" +msgid "Delete from Emby" +msgstr "Usuń z Emby" + +msgctxt "#30410" +msgid "Refresh this item" +msgstr "Odśwież tę pozycję" + +msgctxt "#30412" +msgid "Transcode" +msgstr "Transkoduj" + +msgctxt "#30500" +msgid "Verify connection" +msgstr "Sprawdź połączenie" + +msgctxt "#30504" +msgid "Use altername device name" +msgstr "Użyj własnej nazwy urządzenia" + +msgctxt "#30506" +msgid "Sync" +msgstr "Synchronizacja" + +msgctxt "#30507" +msgid "Enable notification if update count is greater than" +msgstr "Wyświetl powiadomienie jeśli ilość aktualizacji przekroczy" + +msgctxt "#30508" +msgid "Sync empty shows" +msgstr "Synchronizuj puste seriale" + +msgctxt "#30509" +msgid "Enable music library" +msgstr "Włącz bibliotekę muzyki" + +msgctxt "#30511" +msgid "Playback mode" +msgstr "Tryb odtwarzania" + +msgctxt "#30512" +msgid "Enable artwork caching" +msgstr "Włącz pobieranie grafiki" + +msgctxt "#30515" +msgid "Paging - max items requested (default: 15)" +msgstr "Paginacja - maksymalna ilość pobieranych pozycji (domyślnie: 15)" + +msgctxt "#30516" +msgid "Playback" +msgstr "Odtwarzanie" + +msgctxt "#30517" +msgid "Network credentials" +msgstr "Dane uwierzytelniania" + +msgctxt "#30518" +msgid "Enable cinema mode" +msgstr "Włącz tryb kinowy" + +msgctxt "#30519" +msgid "Ask to play trailers" +msgstr "Zapytaj o odtwarzanie zwiastunów" + +msgctxt "#30520" +msgid "Skip the delete confirmation (use at your own risk)" +msgstr "Pomiń potwierdzenie usunięcia (niebezpieczne)" + +msgctxt "#30521" +msgid "Jump back on resume (in seconds)" +msgstr "Cofnij przy wznawianiu odtwarzania (w sekundach)" + +msgctxt "#30522" +msgid "Transcode H265/HEVC" +msgstr "Transkoduj H265/HEVC" + +msgctxt "#30527" +msgid "Ignore specials in next episodes" +msgstr "Ignoruj odcinki specjalne w następnych odcinkach" + +msgctxt "#30528" +msgid "Permanent users" +msgstr "Stali użytkownicy" + +msgctxt "#30529" +msgid "Startup delay (in seconds)" +msgstr "Opóźnienie uruchomienia (w sekundach)" + +msgctxt "#30530" +msgid "Enable server restart message" +msgstr "Włącz powiadomienie o restarcie serwera" + +msgctxt "#30531" +msgid "Enable new content" +msgstr "Włącz powiadomienie o nowej zawartości" + +msgctxt "#30532" +msgid "Duration of the video library pop up" +msgstr "Czas wyświetlania powiadomienia o bibliotece wideo" + +msgctxt "#30533" +msgid "Duration of the music library pop up" +msgstr "Czas wyświetlania powiadomienia o bibliotece muzyki" + +msgctxt "#30534" +msgid "Notifications (in seconds)" +msgstr "Powiadomienia (w sekundach)" + +msgctxt "#30535" +msgid "Generate a new device Id" +msgstr "Wygeneruj nowy identyfikator urządzenia" + +msgctxt "#30536" +msgid "Allow the screensaver during syncs" +msgstr "Pozwól na wygaszenie ekranu podczas synchronizacji" + +msgctxt "#30537" +msgid "Transcode Hi10P" +msgstr "Transkoduj Hi10P" + +msgctxt "#30539" +msgid "Login" +msgstr "Logowanie" + +msgctxt "#30540" +msgid "Manual login" +msgstr "Logowanie ręczne" + +msgctxt "#30543" +msgid "Username or email" +msgstr "Nazwa użytkownika lub adres e-mail" + +msgctxt "#30545" +msgid "Enable server offline" +msgstr "Włącz powiadomienie o niedostępności serwera" + +msgctxt "#30547" +msgid "Display message" +msgstr "Wyświetl wiadomość" + +msgctxt "#30600" +msgid "Sign in with Emby Connect" +msgstr "Zaloguj z Emby Connect" + +msgctxt "#30602" +msgid "Password" +msgstr "Hasło" + +msgctxt "#30605" +msgid "Sign in" +msgstr "Zaloguj" + +msgctxt "#30606" +msgid "Cancel" +msgstr "Anuluj" + +msgctxt "#30607" +msgid "Select main server" +msgstr "Wybierz serwer podstawowy" + +msgctxt "#30608" +msgid "Username or password cannot be empty" +msgstr "Nazwa użytkownika i hasło nie mogą być puste" + +msgctxt "#30609" +msgid "Unable to connect to the selected server" +msgstr "Nie udało się połączyć z wybranym serwerem" + +msgctxt "#30610" +msgid "Connect to" +msgstr "Podłącz do" + +msgctxt "#30611" +msgid "Manually add server" +msgstr "Ręcznie dodaj serwer" + +msgctxt "#30612" +msgid "Please sign in" +msgstr "Proszę się zalogować" + +msgctxt "#30613" +msgid "Change Emby Connect user" +msgstr "Zmień użytkownika Emby Connect" + +msgctxt "#30614" +msgid "Connect to server" +msgstr "Połącz z serwerem" + +msgctxt "#30615" +msgid "Host" +msgstr "Nazwa hosta" + +msgctxt "#30616" +msgid "Connect" +msgstr "Połącz" + +msgctxt "#30617" +msgid "Server or port cannot be empty" +msgstr "Nazwa hosta i port nie mogą być puste" + +msgctxt "#30618" +msgid "Change Emby Connect user" +msgstr "Zmień użytkownika Emby Connect" + +msgctxt "#33000" +msgid "Welcome" +msgstr "Witaj" + +msgctxt "#33006" +msgid "Server is restarting" +msgstr "Serwer jest uruchamiany ponownie" + +msgctxt "#33009" +msgid "Invalid username or password" +msgstr "Błędna nazwa użytkownika lub hasło" + +msgctxt "#33013" +msgid "Choose the audio stream" +msgstr "Wybierz ścieżkę dźwiękową" + +msgctxt "#33014" +msgid "Choose the subtitles stream" +msgstr "Wybierz napisy" + +msgctxt "#33015" +msgid "Delete file from Emby?" +msgstr "Usunąć plik z Emby?" + +msgctxt "#33016" +msgid "Play trailers?" +msgstr "Odtwarzać zwiastuny?" + +msgctxt "#33018" +msgid "Gathering boxsets" +msgstr "Pobieranie kolekcji" + +msgctxt "#33021" +msgid "Gathering:" +msgstr "Pobieranie:" + +msgctxt "#33022" +msgid "" +"Detected the database needs to be recreated for this version of Emby for " +"Kodi. Proceed?" +msgstr "" +"Wykryto konieczność rekonstrukcji bazy dla tej wersji Emby dla Kodi. " +"Kontynuować?" + +msgctxt "#33023" +msgid "Emby for Kodi will not work correctly until the database is reset." +msgstr "" +"Emby dla Kodi nie będzie działało poprawnie dopóki baza nie zostanie " +"zresetowana." + +msgctxt "#33025" +msgid "Completed in:" +msgstr "Zakończono w:" + +msgctxt "#33033" +msgid "A new device Id has been generated. Kodi will now restart." +msgstr "" +"Wygenerowano nowy identyfikator urządzenia. Kodi zostanie uruchomione " +"ponownie." + +msgctxt "#33035" +msgid "" +"Caution! If you choose Native mode, certain Emby features will be missing, " +"such as: Emby cinema mode, direct stream/transcode options and parental " +"access schedule." +msgstr "" +"Uwaga! Jeśli wybierzesz tryb Natywny, niektóre funkcje Emby będą " +"niedostępne, np.: tryb kinowy, opcje strumieniowania bezpośredniego i " +"transkodowania, harmonogram ograniczeń wiekowych." + +msgctxt "#33036" +msgid "Add-on (default)" +msgstr "Dodatek (domyślnie)" + +msgctxt "#33037" +msgid "Native (direct paths)" +msgstr "Natywnie (bezpośredni dostęp do plików)" + +msgctxt "#33039" +msgid "Enable music library?" +msgstr "Włączyć bibliotekę muzyki?" + +msgctxt "#33047" +msgid "Kodi can't locate file:" +msgstr "Kodi nie odnalazł pliku:" + +msgctxt "#33048" +msgid "" +"You may need to verify your network credentials in the add-on settings or " +"use the Emby path substitution to format your path correctly (Emby dashboard" +" > library). Stop syncing?" +msgstr "" +"Być może należy sprawdzić dane logowania w sieci w ustawieniach dodatku lub " +"format podstawienia ścieżki w Emby (Kokpit Emby > Biblioteki). Zatrzymać " +"synchronizację?" + +msgctxt "#33049" +msgid "New" +msgstr "Nowe" + +msgctxt "#33054" +msgid "Add user to session" +msgstr "Dodaj użytkownika do sesji" + +msgctxt "#33058" +msgid "Perform local database reset" +msgstr "Zresetuj lokalną bazę danych" + +msgctxt "#33060" +msgid "Sync theme media" +msgstr "Synchronizuj media przewodnie" + +msgctxt "#33061" +msgid "Add/Remove user from the session" +msgstr "Dodaj/usuń użytkownika z sesji" + +msgctxt "#33062" +msgid "Add user" +msgstr "Dodaj użytkownika" + +msgctxt "#33063" +msgid "Remove user" +msgstr "Usuń użytkownika" + +msgctxt "#33064" +msgid "Remove user from the session" +msgstr "Usuń użytkownika z sesji" + +msgctxt "#33074" +msgid "Are you sure you want to reset your local Kodi database?" +msgstr "Na pewno chcesz zresetować lokalną bazę Kodi?" + +msgctxt "#33086" +msgid "Remove all cached artwork?" +msgstr "Usunąć pobrane grafiki?" + +msgctxt "#33087" +msgid "Reset all Emby add-on settings?" +msgstr "Zresetować wszystkie ustawienia dodatku Emby?" + +msgctxt "#33088" +msgid "" +"Database reset has completed, Kodi will now restart to apply the changes." +msgstr "" +"Zresetowano bazę danych, Kodi zostanie teraz zrestartowane, by zatwierdzić " +"zmiany." + +msgctxt "#33089" +msgid "Enter folder name for backup" +msgstr "Podaj nazwę folderu kopii zapasowej" + +msgctxt "#33090" +msgid "Replace existing backup?" +msgstr "Nadpisać istniejącą kopię zapasową?" + +msgctxt "#33091" +msgid "Created backup at:" +msgstr "Zapisano kopię zapasową w:" + +msgctxt "#33092" +msgid "Create a backup" +msgstr "Utwórz kopię zapasową" + +msgctxt "#33093" +msgid "Backup folder" +msgstr "Folder kopii zapasowej" + +msgctxt "#33097" +msgid "" +"Important, cleanonupdate was removed in your advanced settings to prevent " +"conflict with Emby for Kodi. Kodi will restart now." +msgstr "" +"Ważne: \"cleanonupdate\" zostało usunięte z ustawień zaawansowanych aby " +"zapobiec konfliktowi z Emby dla Kodi. Kodi zostanie uruchomione ponownie." + +msgctxt "#33098" +msgid "Refresh boxsets" +msgstr "Odśwież kolekcje" + +msgctxt "#33099" +msgid "" +"Install the server plugin Kodi companion to automatically apply emby library" +" updates at startup. This setting can be found in the add-on settings > sync" +" options > Enable Kodi Companion." +msgstr "" +"Zainstaluj wtyczkę serwera Emby \"Kodi Companion\" by automatycznie pobierać" +" aktualizacje bazy danych. Właściwa opcja znajduje się w ustawieniach " +"dodatku > Synchronizacja > Włącz Kodi Companion." + +msgctxt "#33100" +msgid "Would you like to sync empty shows?" +msgstr "Synchronizować puste seriale?" + +msgctxt "#33101" +msgid "" +"Since you are using native playback mode with music enabled, do you want to " +"import music rating from files?" +msgstr "" +"Ponieważ używasz natywnego trybu odtwarzania muzyki, czy importować ocenę " +"muzyki z plików?" + +msgctxt "#33102" +msgid "Resume the previous sync?" +msgstr "Wznowić poprzednią synchronizację?" + +msgctxt "#33103" +msgid "" +"Enable the webserver service in the Kodi settings to allow artwork caching." +msgstr "" +"Włącz usługę serwera w ustawieniach Kodi by pozwolić na pobieranie grafik." + +msgctxt "#33104" +msgid "Find more info in the github wiki/Create-and-restore-from-backup." +msgstr "Dowiedz się więcej na github: wiki/Create-and-restore-from-backup" + +msgctxt "#33105" +msgid "Enable the context menu" +msgstr "Włącz menu podręczne" + +msgctxt "#33106" +msgid "Enable the option to transcode" +msgstr "Włącz opcję transkodowania" + +msgctxt "#33107" +msgid "" +"Users added to the session (no space between users). (eg username,username2)" +msgstr "" +"Użytkownicy dodawani do sesji (bez spacji). (np. użytkownik,użytkownik2)" + +msgctxt "#33108" +msgid "Notifications are delayed during video playback (except live tv)." +msgstr "" +"Powiadomienia zostaną wyświetlone po zakończeniu odtwarzania (z wyjątkiem " +"telewizji na żywo)." + +msgctxt "#33109" +msgid "Plugin" +msgstr "Dodatek" + +msgctxt "#33110" +msgid "Restart Kodi to take effect." +msgstr "Zrestartuj Kodi by zobaczyć zmianę." + +msgctxt "#33111" +msgid "Reset the local database to apply the playback mode change." +msgstr "Zresetuj lokalną bazę danych by zmienić tryb odtwarzania." + +msgctxt "#33112" +msgid "Applies to Native and Add-on playback mode" +msgstr "Dotyczy obu trybów odtwarzania" + +msgctxt "#33113" +msgid "Applies to Add-on playback mode only" +msgstr "Dotyczy tylko trybu odtwarzania z dodatkiem" + +msgctxt "#33114" +msgid "Enable external subtitles" +msgstr "Włącz napisy zewnętrzne" + +msgctxt "#33115" +msgid "Adjust for remote connection" +msgstr "Dostosuj dla połączenia zdalnego" + +msgctxt "#33116" +msgid "Compress artwork (reduces quality)" +msgstr "Kompresuj grafiki (zmniejsza jakość)" + +msgctxt "#33117" +msgid "" +"Enable artwork caching? If not, Kodi will still cache your artwork at a " +"slower pace." +msgstr "" +"Włączyć pobieranie grafiki? W innym przypadku Kodi będzie również je " +"pobierać, tylko wolniej." + +msgctxt "#33118" +msgid "" +"You've change the playback mode. Kodi needs to be reset to apply the change," +" would you like to do this now?" +msgstr "" +"Zmieniono tryb odtwarzania. Baza Kodi musi zostać zresetowana by to " +"zadziałało. Zrobić to teraz?" + +msgctxt "#33119" +msgid "" +"Something went wrong during the sync. You'll be able to restore progress " +"when restarting Kodi. If the problem persists, please report on the Emby for" +" Kodi forums, with your Kodi log." +msgstr "" +"Coś poszło nie tak podczas synchronizacji. Po uruchomieniu Kodi ponownie " +"będzie możliwe jej wznowienie. Jeśli problem się powtórzy, proszę zgłoś go " +"na forum Emby for Kodi, dołączając dziennik zdarzeń Kodi." + +msgctxt "#33120" +msgid "Select the libraries to add" +msgstr "Wybierz biblioteki do dodania" + +msgctxt "#33121" +msgid "All" +msgstr "Wszystkie" + +msgctxt "#33122" +msgid "Restart Kodi to resume where you left off." +msgstr "Zrestartuj Kodi by wznowić synchronizację." + +msgctxt "#33123" +msgid "Sync library to Kodi" +msgstr "Synchronizuj biblioteki z Kodi" + +msgctxt "#33124" +msgid "Include people (slow)" +msgstr "Pobieraj twórców (wolne)" + +msgctxt "#33125" +msgid "" +"Choose the Emby views to sync to Kodi. You can optionally sync libraries at " +"a later time." +msgstr "" +"Wybierz biblioteki Emby do synchronizacji z Kodi. Możesz to także zrobić " +"później." + +msgctxt "#33126" +msgid "Sync later" +msgstr "Później" + +msgctxt "#33127" +msgid "Proceed" +msgstr "Dalej" + +msgctxt "#33128" +msgid "" +"Failed to retrieve latest content updates. No content updates will be " +"applied until Kodi is restarted. If this issue persists, please report on " +"the Emby for Kodi forums, with your Kodi log." +msgstr "" +"Nie udało się pobrać ostatnich aktualizacji. Nie zostaną one zapisane dopóki" +" Kodi nie zostanie zrestartowane. Jeśli problem się powtórzy, proszę zgłoś " +"go na forum Emby for Kodi, dołączając dziennik zdarzeń Kodi." + +msgctxt "#33129" +msgid "You can sync libraries by launching the Emby add-on > Add libraries." +msgstr "" +"Możesz zsynchronizować biblioteki wybierając dodatek Emby > Dodaj biblioteki" + +msgctxt "#33130" +msgid "Select the source" +msgstr "Wybierz źródło" + +msgctxt "#33131" +msgid "Refreshing boxsets" +msgstr "Odświeżanie kolekcji" + +msgctxt "#33132" +msgid "Repair library" +msgstr "Napraw bibliotekę" + +msgctxt "#33133" +msgid "Remove library from Kodi" +msgstr "Usuń bibliotekę z Kodi" + +msgctxt "#33134" +msgid "Add server" +msgstr "Dodaj serwer" + +msgctxt "#33135" +msgid "Kodi will now restart to apply a small patch for your Kodi version." +msgstr "Kodi zostanie zrestartowane by dodać poprawkę do twojej wersji." + +msgctxt "#33136" +msgid "Update library" +msgstr "Zaktualizuj bibliotekę" + +msgctxt "#33137" +msgid "Enable Kodi companion" +msgstr "Włącz Kodi Companion" + +msgctxt "#33138" +msgid "" +"You can update your library manually rather than rely on the server plugin " +"Kodi companion. Launch the add-on and update libraries (or per library). To " +"remove content, you'll need to repair the library." +msgstr "" +"Możesz ręcznie aktualizować bibliotekę zamiast polegać na wtyczce serwera " +"Kodi Companion. Wybierz \"Zaktualizuj biblioteki\" w dodatku Emby. Aby " +"usunąć zawartość, należy naprawić biblioteki." + +msgctxt "#33139" +msgid "Update libraries" +msgstr "Zaktualizuj biblioteki" + +msgctxt "#33140" +msgid "Repair libraries" +msgstr "Napraw biblioteki" + +msgctxt "#33141" +msgid "Remove server" +msgstr "Usuń serwer" + +msgctxt "#33142" +msgid "Something went wrong. Try again later." +msgstr "Coś poszło nie tak. Spróbuj ponownie później." + +msgctxt "#33143" +msgid "Enable the option to delete" +msgstr "Włącz opcję usuwania" + +msgctxt "#33144" +msgid "Removing library" +msgstr "Usuwanie biblioteki" + +msgctxt "#33145" +msgid "" +"Please make sure your Samba (smb) share of your Emby server is accessible to" +" your Kodi installation and that you have path substitution configured on " +"your server. Otherwise, Kodi may fail to locate your files." +msgstr "" +"Upewnij się, że udział Samba (smb) twojego serwera Emby jest dostępny dla " +"tej instalacji Kodi i udostępniony folder sieciowy jest skonfigurowany na " +"serwerze. W przeciwnym razie Kodi może nie odnaleźć twoich plików." + +msgctxt "#33146" +msgid "Unable to connect to Emby." +msgstr "Błąd połączenia z Emby" + +msgctxt "#33147" +msgid "Your access to Emby is restricted." +msgstr "Dostęp do Emby jest ograniczony." + +msgctxt "#33148" +msgid "Your access to this server is restricted." +msgstr "Dostęp do tego serwera jest ograniczony." + +msgctxt "#33149" +msgid "Unable to connect to this server." +msgstr "Błąd połączenia z tym serwerem." + +msgctxt "#33150" +msgid "Update server information" +msgstr "Aktualizuj informacje o serwerze" + +msgctxt "#33151" +msgid "" +"Reconnect to the same server that was previously loaded. If you want to use " +"a different server, reset your local database, including your user " +"information." +msgstr "" +"Połącz się z poprzednim serwerem. Jeśli chcesz zmienić serwer, zresetuj " +"lokalną bazę danych wraz z informacjami o użytkownikach." + +msgctxt "#33152" +msgid "Unable to locate TV Tunes in Kodi." +msgstr "Nie znaleziono TV Tunes w Kodi." + +msgctxt "#33153" +msgid "Your Emby theme media has been synced to Kodi" +msgstr "Media przewodnie zostały zsynchronizowane z Kodi" + +msgctxt "#33154" +msgid "Add libraries" +msgstr "Dodaj biblioteki" + +msgctxt "#33155" +msgid "" +"The currently applied patch for Emby for Kodi is corrupted! Please post to " +"the Emby for Kodi forums if this issue persists. This will need to be fixed " +"as soon as possible." +msgstr "" +"Zainstalowana poprawka Emby dla Kodi jest uszkodzona! Proszę zgłoś ten błąd " +"na forum Emby for Kodi jeśli problem się powtórzy. Konieczna jest szybka " +"aktualizacja." + +msgctxt "#33156" +msgid "A patch has been applied!" +msgstr "Zainstalowano poprawkę!" + +msgctxt "#33157" +msgid "Audio only" +msgstr "Tylko dźwięk" + +msgctxt "#33158" +msgid "Subtitles only" +msgstr "Tylko napisy" + +msgctxt "#33159" +msgid "Enable audio/subtitles selection" +msgstr "Włącz wybór ścieżki dźwiękowej/napisów" + +msgctxt "#33160" +msgid "To avoid errors, please update Emby for Kodi to version: " +msgstr "Aby uniknąć błędów, należy zaktualizować Emby dla Kodi do wersji:" + +msgctxt "#33161" +msgid "Check for updates" +msgstr "Sprawdź aktualizacje" + +msgctxt "#33162" +msgid "Reset the music library?" +msgstr "Zresetować bibliotekę muzyki?" + +msgctxt "#33163" +msgid "Support this project" +msgstr "Wesprzyj projekt" + +msgctxt "#33164" +msgid "Mask sensitive information in log (does not apply to kodi logging)" +msgstr "" +"Ukryj informacje prywatne w dzienniku zdarzeń (nie dotyczy dziennika Kodi)" + +msgctxt "#33165" +msgid "Failed to create backup" +msgstr "Błąd tworzenia kopii zapasowej" + +msgctxt "#33166" +msgid "(dynamic)" +msgstr "(dynamiczny)" + +msgctxt "#33167" +msgid "Recently added" +msgstr "Niedawno dodane" + +msgctxt "#33168" +msgid "Favourites" +msgstr "Ulubione" + +msgctxt "#33169" +msgid "In Progress" +msgstr "W trakcie oglądania" + +msgctxt "#33170" +msgid "Unwatched" +msgstr "Nieobejrzane" + +msgctxt "#33171" +msgid "By first letter" +msgstr "Według pierwszej litery" + +msgctxt "#33172" +msgid "" +"You have {number} updates pending. This may take a little while before " +"seeing new content. It might be faster to update your libraries via " +"launching the Emby add-on > update libraries. Proceed anyway?" +msgstr "" +"Do pobrania jest {number} aktualizacji. To może trochę potrwać. Szybszą " +"opcją może być pełna aktualizacja w dodatku Emby > Aktualizuj biblioteki. " +"Kontynuować mimo to?" + +msgctxt "#33173" +msgid "Forget about the previous sync? This is not recommended." +msgstr "Porzucić poprzednią synchronizację? Nie jest to wskazane." + +msgctxt "#33174" +msgid "Paging - download threads (default: 3)" +msgstr "Paginacja - wątki pobierania (domyślnie: 3)" + +msgctxt "#33175" +msgid "" +"Paging tip: Each download thread requests your max items value from Emby at " +"the same time." +msgstr "" +"Paginacja: każdy z wątków pobiera maksymalną ilość pozycji w tym samym " +"czasie." + +msgctxt "#33176" +msgid "Update or repair your libraries to apply the changes below." +msgstr "Aktualizuj lub napraw biblioteki aby zatwierdzić poniższe zmiany." + +msgctxt "#33177" +msgid "Display the progress bar if update count greater than" +msgstr "Wyświetl pasek postępu przy aktualizacji większej niż" + +msgctxt "#33178" +msgid "Processing updates" +msgstr "Przetwarzanie aktualizacji" + +msgctxt "#33179" +msgid "Force transcode" +msgstr "Wymuś transkodowanie" + +msgctxt "#33180" +msgid "Restart Emby for Kodi" +msgstr "Zrestartuj Emby dla Kodi" + +msgctxt "#33181" +msgid "Restarting to apply the patch" +msgstr "Restartowanie by zainstalować poprawkę" + +msgctxt "#33182" +msgid "Play with cinema mode" +msgstr "Odtwarzaj w trybie kinowym" + +msgctxt "#33183" +msgid "Enable the option to play with cinema mode" +msgstr "Włącz tę opcję by odtwarzać w trybie kinowym" + +msgctxt "#33184" +msgid "Remove libraries" +msgstr "Usuń biblioteki" + +msgctxt "#33185" +msgid "Enable sync during playback (may cause some lag)" +msgstr "" +"Włącz synchronizację podczas odtwarzania (może spowodować spowolnienia)" + +msgctxt "#33186" +msgid "" +"The Kodi companion speeds up the start up sync. Other syncs are triggered by" +" server events." +msgstr "" +"Wtyczka Kodi Companion przyspiesza synchronizację na starcie. Zdarzenia na " +"serwerze także wywołują synchronizację." + +msgctxt "#33187" +msgid "Sync Rotten Tomatoes ratings" +msgstr "Synchronizuj ocenę z Rotten Tomatoes" + +msgctxt "#33188" +msgid "Would you like to sync Rotten Tomatoes ratings?" +msgstr "Czy synchronizować ocenę z Rotten Tomatoes?" + +msgctxt "#33191" +msgid "Restart Emby for Kodi to apply this change?" +msgstr "Zrestartować Emby dla Kodi by zaaplikować tę zmianę?" + +msgctxt "#33192" +msgid "Restart Emby for Kodi" +msgstr "Zrestartuj Emby dla Kodi" + +msgctxt "#33193" +msgid "Restarting..." +msgstr "Restartowanie..." + +msgctxt "#33194" +msgid "Manage libraries" +msgstr "Zarządzaj bibliotekami" + +msgctxt "#33195" +msgid "Enable Emby for Kodi" +msgstr "" + +msgctxt "#33196" +msgid "Advanced options" +msgstr "" diff --git a/resources/lib/database/__init__.py b/resources/lib/database/__init__.py index 8d7e623a..6ed45e6a 100644 --- a/resources/lib/database/__init__.py +++ b/resources/lib/database/__init__.py @@ -31,6 +31,8 @@ class Database(object): db.conn.commit() ''' timeout = 120 + discovered = False + discovered_file = None def __init__(self, file=None, commit_close=True): @@ -42,14 +44,14 @@ class Database(object): def __enter__(self): ''' Open the connection and return the Database class. - This is to allow for both the cursor and conn to be accessible. - at any time. + This is to allow for the cursor, conn and others to be accessible. ''' - self.conn = sqlite3.connect(self._sql(self.db_file), timeout=self.timeout) + self.path = self._sql(self.db_file) + self.conn = sqlite3.connect(self.path, timeout=self.timeout) self.cursor = self.conn.cursor() if self.db_file in ('video', 'music', 'texture', 'emby'): - self.conn.execute("PRAGMA journal_mode=WAL") + self.conn.execute("PRAGMA journal_mode=WAL") # to avoid writing conflict with kodi LOG.debug("--->[ database: %s ] %s", self.db_file, id(self.conn)) @@ -61,11 +63,107 @@ class Database(object): return self + def _get_database(self, path, silent=False): + + path = xbmc.translatePath(path).decode('utf-8') + + if not silent: + + if not xbmcvfs.exists(path): + raise Exception("Database: %s missing" % path) + + conn = sqlite3.connect(path) + cursor = conn.cursor() + cursor.execute("SELECT name FROM sqlite_master WHERE type='table';") + tables = cursor.fetchall() + conn.close() + + if not len(tables): + raise Exception("Database: %s malformed?" % path) + + return path + + def _discover_database(self, database): + + ''' Use UpdateLibrary(video) to update the date modified + on the database file used by Kodi. + ''' + if database == 'video': + + xbmc.executebuiltin('UpdateLibrary(video)') + xbmc.sleep(200) + + databases = xbmc.translatePath("special://database/").decode('utf-8') + types = { + 'video': "MyVideos", + 'music': "MyMusic", + 'texture': "Textures" + } + database = types[database] + dirs, files = xbmcvfs.listdir(databases) + modified = {'file': None, 'time': 0} + + for file in reversed(files): + + if (file.startswith(database) and not file.endswith('-wal') and + not file.endswith('-shm') and not file.endswith('db-journal')): + + st = xbmcvfs.Stat(databases + file.decode('utf-8')) + modified_int = st.st_mtime() + LOG.debug("Database detected: %s time: %s", file.decode('utf-8'), modified_int) + + if modified_int > modified['time']: + + modified['time'] = modified_int + modified['file'] = file.decode('utf-8') + + LOG.info("Discovered database: %s", modified) + self.discovered_file = modified['file'] + + return xbmc.translatePath("special://database/%s" % modified['file']).decode('utf-8') + def _sql(self, file): + ''' Get the database path based on the file objects/obj_map.json + Compatible check, in the event multiple db version are supported with the same Kodi version. + Discover by file as a last resort. + ''' databases = obj.Objects().objects - return xbmc.translatePath(databases[file]).decode('utf-8') if file in databases else file + if file not in ('video', 'music', 'texture') or databases.get('database_set%s' % file): + return self._get_database(databases[file], True) + + discovered = self._discover_database(file) if not databases.get('database_set%s' % file) else None + + try: + loaded = self._get_database(databases[file]) if file in databases else file + except Exception as error: + + for i in range(1, 10): + alt_file = "%s-%s" % (file, i) + + try: + loaded = self._get_database(databases[alt_file]) + + break + except KeyError: # No other db options + loaded = None + + break + except Exception: + pass + + if discovered and discovered != loaded: + + databases[file] = discovered + self.discovered = True + else: + databases[file] = loaded + + databases['database_set%s' % file] = True + LOG.info("Database locked in: %s", databases[file]) + + return databases[file] def __exit__(self, exc_type, exc_val, exc_tb): @@ -152,7 +250,7 @@ def reset(): xbmcvfs.delete(os.path.join(addon_data, "sync.json")) settings('enableMusic.bool', False) - settings('MinimumSetup.bool', False) + settings('MinimumSetup', "") settings('MusicRescan.bool', False) settings('SyncInstallRunDone.bool', False) dialog("ok", heading="{emby}", line1=_(33088)) diff --git a/resources/lib/downloader.py b/resources/lib/downloader.py index 749412c4..43c43d1c 100644 --- a/resources/lib/downloader.py +++ b/resources/lib/downloader.py @@ -13,7 +13,7 @@ import xbmc import xbmcvfs import xbmcaddon -from libraries import requests +import requests from helper.utils import should_stop, delete_folder from helper import settings, stop, event, window, kodi_version, unzip, create_id from emby import Emby @@ -210,6 +210,15 @@ def get_albums_by_artist(artist_id, basic=False): for items in get_items(None, "MusicAlbum", basic, params): yield items +def get_songs_by_artist(artist_id, basic=False): + + params = { + 'SortBy': "DateCreated", + 'ArtistIds': artist_id + } + for items in get_items(None, "Audio", basic, params): + yield items + @stop() def _get_items(query, server_id=None): @@ -363,22 +372,26 @@ def get_objects(src, filename): LOG.warn("Something went wrong applying this patch %s previously.", filename) restart = False - if not xbmcvfs.exists(path): + if not xbmcvfs.exists(path) or filename.startswith('DEV'): delete_folder(CACHE) LOG.info("From %s to %s", src, path.decode('utf-8')) try: - response = requests.get(src, stream=True, verify=False) + response = requests.get(src, stream=True, verify=True) response.raise_for_status() + except requests.exceptions.SSLError as error: + + LOG.error(error) + response = requests.get(src, stream=True, verify=False) except Exception as error: raise - else: - dl = xbmcvfs.File(path, 'w') - dl.write(response.content) - dl.close() - del response - settings('appliedPatch', filename) + dl = xbmcvfs.File(path, 'w') + dl.write(response.content) + dl.close() + del response + + settings('appliedPatch', filename) unzip(path, temp, "objects") diff --git a/resources/lib/emby/__init__.py b/resources/lib/emby/__init__.py index fc478bd2..cc9f4775 100644 --- a/resources/lib/emby/__init__.py +++ b/resources/lib/emby/__init__.py @@ -40,11 +40,16 @@ def ensure_client(): class Emby(object): - ''' This is your Embyclient, you can create more than one. The server_id is only a temporary thing. + ''' This is your Embyclient, you can create more than one. The server_id is only a temporary thing + to communicate with the EmbyClient(). + from emby import Emby - default_client = Emby()['config/app'] - another_client = Emby('123456')['config/app'] + Emby('123456')['config/app'] + + # Permanent client reference + client = Emby('123456').get_client() + client['config/app'] ''' # Borg - multiple instances, shared state @@ -57,6 +62,9 @@ class Emby(object): self.__dict__ = self._shared_state self.server_id = server_id or "default" + def get_client(self): + return self.client[self.server_id] + @classmethod def set_loghandler(cls, func=loghandler, level=logging.INFO): diff --git a/resources/lib/emby/client.py b/resources/lib/emby/client.py index f5a2847c..d78fc07d 100644 --- a/resources/lib/emby/client.py +++ b/resources/lib/emby/client.py @@ -17,6 +17,7 @@ LOG = logging.getLogger('Emby.'+__name__) ################################################################################################# def callback(message, data): + ''' Callback function should received message, data message: string data: json dictionary @@ -35,8 +36,7 @@ class EmbyClient(object): self.http = HTTP(self) self.wsc = WSClient(self) self.auth = ConnectionManager(self) - self.emby = api - self.emby.client = self.http + self.emby = api.API(self.http) self.callback_ws = callback self.callback = callback @@ -100,8 +100,6 @@ class EmbyClient(object): return self.auth.__shortcuts__(key.replace('auth/', "", 1)) elif key.startswith('api'): - self.emby.client = self.http # Since api is not a class, re-assign global var to correct http adapter - return self.emby elif key == 'connected': diff --git a/resources/lib/emby/core/api.py b/resources/lib/emby/core/api.py index 8a537953..7dbdc19a 100644 --- a/resources/lib/emby/core/api.py +++ b/resources/lib/emby/core/api.py @@ -2,25 +2,7 @@ ################################################################################################# -client = None - -################################################################################################# - -def _http(action, url, request={}): - request.update({'type': action, 'handler': url}) - - return client.request(request) - -def _get(handler, params=None): - return _http("GET", handler, {'params': params}) - -def _post(handler, json=None, params=None): - return _http("POST", handler, {'params': params, 'json': json}) - -def _delete(handler, params=None): - return _http("DELETE", handler, {'params': params}) - -def emby_url(handler): +def emby_url(client, handler): return "%s/emby/%s" % (client.config['auth.server'], handler) def basic_info(): @@ -45,295 +27,318 @@ def music_info(): ################################################################################################# -# Bigger section of the Emby api +class API(object): -################################################################################################# + ''' All the api calls to the server. + ''' + def __init__(self, client, *args, **kwargs): + self.client = client -def try_server(): - return _get("System/Info/Public") + def _http(self, action, url, request={}): + request.update({'type': action, 'handler': url}) -def sessions(handler="", action="GET", params=None, json=None): + return self.client.request(request) - if action == "POST": - return _post("Sessions%s" % handler, json, params) - elif action == "DELETE": - return _delete("Sessions%s" % handler, params) - else: - return _get("Sessions%s" % handler, params) + def _get(self, handler, params=None): + return self._http("GET", handler, {'params': params}) -def users(handler="", action="GET", params=None, json=None): + def _post(self, handler, json=None, params=None): + return self._http("POST", handler, {'params': params, 'json': json}) - if action == "POST": - return _post("Users/{UserId}%s" % handler, json, params) - elif action == "DELETE": - return _delete("Users/{UserId}%s" % handler, params) - else: - return _get("Users/{UserId}%s" % handler, params) + def _delete(self, handler, params=None): + return self._http("DELETE", handler, {'params': params}) -def items(handler="", action="GET", params=None, json=None): - - if action == "POST": - return _post("Items%s" % handler, json, params) - elif action == "DELETE": - return _delete("Items%s" % handler, params) - else: - return _get("Items%s" % handler, params) + ################################################################################################# -def user_items(handler="", params=None): - return users("/Items%s" % handler, params=params) + # Bigger section of the Emby api -def shows(handler, params): - return _get("Shows%s" % handler, params) + ################################################################################################# -def videos(handler): - return _get("Videos%s" % handler) + def try_server(self): + return self._get("System/Info/Public") -def artwork(item_id, art, max_width, ext="jpg", index=None): + def sessions(self, handler="", action="GET", params=None, json=None): - if index is None: - return emby_url("Items/%s/Images/%s?MaxWidth=%s&format=%s" % (item_id, art, max_width, ext)) + if action == "POST": + return self._post("Sessions%s" % handler, json, params) + elif action == "DELETE": + return self._delete("Sessions%s" % handler, params) + else: + return self._get("Sessions%s" % handler, params) - return emby_url("Items/%s/Images/%s/%s?MaxWidth=%s&format=%s" % (item_id, art, index, max_width, ext)) + def users(self, handler="", action="GET", params=None, json=None): -################################################################################################# + if action == "POST": + return self._post("Users/{UserId}%s" % handler, json, params) + elif action == "DELETE": + return self._delete("Users/{UserId}%s" % handler, params) + else: + return self._get("Users/{UserId}%s" % handler, params) -# More granular api + def items(self, handler="", action="GET", params=None, json=None): + + if action == "POST": + return self._post("Items%s" % handler, json, params) + elif action == "DELETE": + return self._delete("Items%s" % handler, params) + else: + return self._get("Items%s" % handler, params) -################################################################################################# + def user_items(self, handler="", params=None): + return self.users("/Items%s" % handler, params=params) -def get_users(): - return _get("Users") + def shows(self, handler, params): + return self._get("Shows%s" % handler, params) -def get_public_users(): - return _get("Users/Public") + def videos(self, handler): + return self._get("Videos%s" % handler) -def get_user(user_id=None): - return users() if user_id is None else _get("Users/%s" % user_id) + def artwork(self, item_id, art, max_width, ext="jpg", index=None): -def get_views(): - return users("/Views") + if index is None: + return emby_url(self.client, "Items/%s/Images/%s?MaxWidth=%s&format=%s" % (item_id, art, max_width, ext)) -def get_media_folders(): - return users("/Items") + return emby_url(self.client, "Items/%s/Images/%s/%s?MaxWidth=%s&format=%s" % (item_id, art, index, max_width, ext)) -def get_item(item_id): - return users("/Items/%s" % item_id) + ################################################################################################# -def get_items(item_ids): - return users("/Items", params={ - 'Ids': ','.join(str(x) for x in item_ids), - 'Fields': info() - }) + # More granular api -def get_sessions(): - return sessions(params={'ControllableByUserId': "{UserId}"}) + ################################################################################################# -def get_device(device_id): - return sessions(params={'DeviceId': device_id}) + def get_users(self): + return self._get("Users") -def post_session(session_id, url, params=None, data=None): - return sessions("/%s/%s" % (session_id, url), "POST", params, data) + def get_public_users(self): + return self._get("Users/Public") -def get_images(item_id): - return items("/%s/Images" % item_id) + def get_user(self, user_id=None): + return self.users() if user_id is None else self._get("Users/%s" % user_id) -def get_suggestion(media="Movie,Episode", limit=1): - return users("/Suggestions", { - 'Type': media, - 'Limit': limit - }) + def get_views(self): + return self.users("/Views") -def get_recently_added(media=None, parent_id=None, limit=20): - return user_items("/Latest", { - 'Limit': limit, - 'UserId': "{UserId}", - 'IncludeItemTypes': media, - 'ParentId': parent_id, - 'Fields': info() - }) + def get_media_folders(self): + return self.users("/Items") -def get_next(index=None, limit=1): - return shows("/NextUp", { - 'Limit': limit, - 'UserId': "{UserId}", - 'StartIndex': None if index is None else int(index) - }) + def get_item(self, item_id): + return self.users("/Items/%s" % item_id) -def get_adjacent_episodes(show_id, item_id): - return shows("/%s/Episodes" % show_id, { - 'UserId': "{UserId}", - 'AdjacentTo': item_id, - 'Fields': "Overview" - }) + def get_items(self, item_ids): + return self.users("/Items", params={ + 'Ids': ','.join(str(x) for x in item_ids), + 'Fields': info() + }) -def get_genres(parent_id=None): - return _get("Genres", { - 'ParentId': parent_id, - 'UserId': "{UserId}", - 'Fields': info() - }) + def get_sessions(self): + return self.sessions(params={'ControllableByUserId': "{UserId}"}) -def get_recommendation(parent_id=None, limit=20): - return _get("Movies/Recommendations", { - 'ParentId': parent_id, - 'UserId': "{UserId}", - 'Fields': info(), - 'Limit': limit - }) + def get_device(self, device_id): + return self.sessions(params={'DeviceId': device_id}) -def get_items_by_letter(parent_id=None, media=None, letter=None): - return user_items(params={ - 'ParentId': parent_id, - 'NameStartsWith': letter, - 'Fields': info(), - 'Recursive': True, - 'IncludeItemTypes': media - }) + def post_session(self, session_id, url, params=None, data=None): + return self.sessions("/%s/%s" % (session_id, url), "POST", params, data) -def get_channels(): - return _get("LiveTv/Channels", { - 'UserId': "{UserId}", - 'EnableImages': True, - 'EnableUserData': True - }) + def get_images(self, item_id): + return self.items("/%s/Images" % item_id) -def get_intros(item_id): - return user_items("/%s/Intros" % item_id) + def get_suggestion(self, media="Movie,Episode", limit=1): + return self.users("/Suggestions", { + 'Type': media, + 'Limit': limit + }) -def get_additional_parts(item_id): - return videos("/%s/AdditionalParts" % item_id) + def get_recently_added(self, media=None, parent_id=None, limit=20): + return self.user_items("/Latest", { + 'Limit': limit, + 'UserId': "{UserId}", + 'IncludeItemTypes': media, + 'ParentId': parent_id, + 'Fields': info() + }) -def delete_item(item_id): - return items("/%s" % item_id, "DELETE") + def get_next(self, index=None, limit=1): + return self.shows("/NextUp", { + 'Limit': limit, + 'UserId': "{UserId}", + 'StartIndex': None if index is None else int(index) + }) -def get_local_trailers(item_id): - return user_items("/%s/LocalTrailers" % item_id) + def get_adjacent_episodes(self, show_id, item_id): + return self.shows("/%s/Episodes" % show_id, { + 'UserId': "{UserId}", + 'AdjacentTo': item_id, + 'Fields': "Overview" + }) -def get_transcode_settings(): - return _get('System/Configuration/encoding') + def get_genres(self, parent_id=None): + return self._get("Genres", { + 'ParentId': parent_id, + 'UserId': "{UserId}", + 'Fields': info() + }) -def get_ancestors(item_id): - return items("/%s/Ancestors" % item_id, params={ - 'UserId': "{UserId}" - }) + def get_recommendation(self, parent_id=None, limit=20): + return self._get("Movies/Recommendations", { + 'ParentId': parent_id, + 'UserId': "{UserId}", + 'Fields': info(), + 'Limit': limit + }) -def get_items_theme_video(parent_id): - return users("/Items", params={ - 'HasThemeVideo': True, - 'ParentId': parent_id - }) + def get_items_by_letter(self, parent_id=None, media=None, letter=None): + return self.user_items(params={ + 'ParentId': parent_id, + 'NameStartsWith': letter, + 'Fields': info(), + 'Recursive': True, + 'IncludeItemTypes': media + }) -def get_themes(item_id): - return items("/%s/ThemeMedia" % item_id, params={ - 'UserId': "{UserId}", - 'InheritFromParent': True - }) + def get_channels(self): + return self._get("LiveTv/Channels", { + 'UserId': "{UserId}", + 'EnableImages': True, + 'EnableUserData': True + }) -def get_items_theme_song(parent_id): - return users("/Items", params={ - 'HasThemeSong': True, - 'ParentId': parent_id - }) + def get_intros(self, item_id): + return self.user_items("/%s/Intros" % item_id) -def get_plugins(): - return _get("Plugins") + def get_additional_parts(self, item_id): + return self.videos("/%s/AdditionalParts" % item_id) -def get_seasons(show_id): - return shows("/%s/Seasons" % show_id, params={ - 'UserId': "{UserId}", - 'EnableImages': True, - 'Fields': info() - }) + def delete_item(self, item_id): + return self.items("/%s" % item_id, "DELETE") -def get_date_modified(date, parent_id, media=None): - return users("/Items", params={ - 'ParentId': parent_id, - 'Recursive': False, - 'IsMissing': False, - 'IsVirtualUnaired': False, - 'IncludeItemTypes': media or None, - 'MinDateLastSaved': date, - 'Fields': info() - }) + def get_local_trailers(self, item_id): + return self.user_items("/%s/LocalTrailers" % item_id) -def get_userdata_date_modified(date, parent_id, media=None): - return users("/Items", params={ - 'ParentId': parent_id, - 'Recursive': True, - 'IsMissing': False, - 'IsVirtualUnaired': False, - 'IncludeItemTypes': media or None, - 'MinDateLastSavedForUser': date, - 'Fields': info() - }) + def get_transcode_settings(self): + return self._get('System/Configuration/encoding') -def refresh_item(item_id): - return items("/%s/Refresh" % item_id, "POST", json={ - 'Recursive': True, - 'ImageRefreshMode': "FullRefresh", - 'MetadataRefreshMode': "FullRefresh", - 'ReplaceAllImages': False, - 'ReplaceAllMetadata': True - }) + def get_ancestors(self, item_id): + return self.items("/%s/Ancestors" % item_id, params={ + 'UserId': "{UserId}" + }) -def favorite(item_id, option=True): - return users("/FavoriteItems/%s" % item_id, "POST" if option else "DELETE") + def get_items_theme_video(self, parent_id): + return self.users("/Items", params={ + 'HasThemeVideo': True, + 'ParentId': parent_id + }) -def get_system_info(): - return _get("System/Configuration") + def get_themes(self, item_id): + return self.items("/%s/ThemeMedia" % item_id, params={ + 'UserId': "{UserId}", + 'InheritFromParent': True + }) -def post_capabilities(data): - return sessions("/Capabilities/Full", "POST", json=data) + def get_items_theme_song(self, parent_id): + return self.users("/Items", params={ + 'HasThemeSong': True, + 'ParentId': parent_id + }) -def session_add_user(session_id, user_id, option=True): - return sessions("/%s/Users/%s" % (session_id, user_id), "POST" if option else "DELETE") + def get_plugins(self): + return self._get("Plugins") -def session_playing(data): - return sessions("/Playing", "POST", json=data) + def get_seasons(self, show_id): + return self.shows("/%s/Seasons" % show_id, params={ + 'UserId': "{UserId}", + 'EnableImages': True, + 'Fields': info() + }) -def session_progress(data): - return sessions("/Playing/Progress", "POST", json=data) + def get_date_modified(self, date, parent_id, media=None): + return self.users("/Items", params={ + 'ParentId': parent_id, + 'Recursive': False, + 'IsMissing': False, + 'IsVirtualUnaired': False, + 'IncludeItemTypes': media or None, + 'MinDateLastSaved': date, + 'Fields': info() + }) -def session_stop(data): - return sessions("/Playing/Stopped", "POST", json=data) + def get_userdata_date_modified(self, date, parent_id, media=None): + return self.users("/Items", params={ + 'ParentId': parent_id, + 'Recursive': True, + 'IsMissing': False, + 'IsVirtualUnaired': False, + 'IncludeItemTypes': media or None, + 'MinDateLastSavedForUser': date, + 'Fields': info() + }) -def item_played(item_id, watched): - return users("/PlayedItems/%s" % item_id, "POST" if watched else "DELETE") + def refresh_item(self, item_id): + return self.items("/%s/Refresh" % item_id, "POST", json={ + 'Recursive': True, + 'ImageRefreshMode': "FullRefresh", + 'MetadataRefreshMode': "FullRefresh", + 'ReplaceAllImages': False, + 'ReplaceAllMetadata': True + }) -def get_sync_queue(date, filters=None): - return _get("Emby.Kodi.SyncQueue/{UserId}/GetItems", params={ - 'LastUpdateDT': date, - 'filter': filters or None - }) + def favorite(self, item_id, option=True): + return self.users("/FavoriteItems/%s" % item_id, "POST" if option else "DELETE") -def get_server_time(): - return _get("Emby.Kodi.SyncQueue/GetServerDateTime") + def get_system_info(self): + return self._get("System/Configuration") -def get_play_info(item_id, profile): - return items("/%s/PlaybackInfo" % item_id, "POST", json={ - 'UserId': "{UserId}", - 'DeviceProfile': profile, - 'AutoOpenLiveStream': True - }) + def post_capabilities(self, data): + return self.sessions("/Capabilities/Full", "POST", json=data) -def get_live_stream(item_id, play_id, token, profile): - return _post("LiveStreams/Open", json={ - 'UserId': "{UserId}", - 'DeviceProfile': profile, - 'OpenToken': token, - 'PlaySessionId': play_id, - 'ItemId': item_id - }) + def session_add_user(self, session_id, user_id, option=True): + return self.sessions("/%s/Users/%s" % (session_id, user_id), "POST" if option else "DELETE") -def close_live_stream(live_id): - return _post("LiveStreams/Close", json={ - 'LiveStreamId': live_id - }) + def session_playing(self, data): + return self.sessions("/Playing", "POST", json=data) -def close_transcode(device_id): - return _delete("Videos/ActiveEncodings", params={ - 'DeviceId': device_id - }) + def session_progress(self, data): + return self.sessions("/Playing/Progress", "POST", json=data) -def delete_item(item_id): - return items("/%s" % item_id, "DELETE") + def session_stop(self, data): + return self.sessions("/Playing/Stopped", "POST", json=data) + + def item_played(self, item_id, watched): + return self.users("/PlayedItems/%s" % item_id, "POST" if watched else "DELETE") + + def get_sync_queue(self, date, filters=None): + return self._get("Emby.Kodi.SyncQueue/{UserId}/GetItems", params={ + 'LastUpdateDT': date, + 'filter': filters or None + }) + + def get_server_time(self): + return self._get("Emby.Kodi.SyncQueue/GetServerDateTime") + + def get_play_info(self, item_id, profile): + return self.items("/%s/PlaybackInfo" % item_id, "POST", json={ + 'UserId': "{UserId}", + 'DeviceProfile': profile, + 'AutoOpenLiveStream': True + }) + + def get_live_stream(self, item_id, play_id, token, profile): + return self._post("LiveStreams/Open", json={ + 'UserId': "{UserId}", + 'DeviceProfile': profile, + 'OpenToken': token, + 'PlaySessionId': play_id, + 'ItemId': item_id + }) + + def close_live_stream(self, live_id): + return self._post("LiveStreams/Close", json={ + 'LiveStreamId': live_id + }) + + def close_transcode(self, device_id): + return self._delete("Videos/ActiveEncodings", params={ + 'DeviceId': device_id + }) + + def delete_item(self, item_id): + return self.items("/%s" % item_id, "DELETE") diff --git a/resources/lib/emby/core/connection_manager.py b/resources/lib/emby/core/connection_manager.py index 96e3207f..62b3dce3 100644 --- a/resources/lib/emby/core/connection_manager.py +++ b/resources/lib/emby/core/connection_manager.py @@ -255,8 +255,8 @@ class ConnectionManager(object): tests = [] if server.get('LastConnectionMode') is not None: - #tests.append(server['LastConnectionMode']) - pass + tests.append(server['LastConnectionMode']) + if CONNECTION_MODE['Manual'] not in tests: tests.append(CONNECTION_MODE['Manual']) if CONNECTION_MODE['Local'] not in tests: @@ -359,7 +359,7 @@ class ConnectionManager(object): if first_server is not None and first_server['DateLastAccessed'] != "2001-01-01T00:00:00Z": result = self.connect_to_server(first_server, options) - if result['State'] == CONNECTION_STATE['SignedIn']: + if result['State'] in (CONNECTION_STATE['SignedIn'], CONNECTION_STATE['Unavailable']): return result # Return loaded credentials if exists diff --git a/resources/lib/emby/core/http.py b/resources/lib/emby/core/http.py index 904aefe5..8b8a6f83 100644 --- a/resources/lib/emby/core/http.py +++ b/resources/lib/emby/core/http.py @@ -6,7 +6,7 @@ import json import logging import time -from libraries import requests +import requests from exceptions import HTTPException ################################################################################################# @@ -215,17 +215,17 @@ class HTTP(object): def _authorization(self, data): auth = "MediaBrowser " - auth += "Client=%s, " % self.config['app.name'] - auth += "Device=%s, " % self.config['app.device_name'] - auth += "DeviceId=%s, " % self.config['app.device_id'] - auth += "Version=%s" % self.config['app.version'] + auth += "Client=%s, " % self.config['app.name'].encode('utf-8') + auth += "Device=%s, " % self.config['app.device_name'].encode('utf-8') + auth += "DeviceId=%s, " % self.config['app.device_id'].encode('utf-8') + auth += "Version=%s" % self.config['app.version'].encode('utf-8') data['headers'].update({'Authorization': auth}) - if self.config['auth.token']: + if self.config['auth.token'] and self.config['auth.user_id']: - auth += ', UserId=%s' % self.config['auth.user_id'] - data['headers'].update({'Authorization': auth, 'X-MediaBrowser-Token': self.config['auth.token']}) + auth += ', UserId=%s' % self.config['auth.user_id'].encode('utf-8') + data['headers'].update({'Authorization': auth, 'X-MediaBrowser-Token': self.config['auth.token'].encode('utf-8')}) return data diff --git a/resources/lib/entrypoint/__init__.py b/resources/lib/entrypoint/__init__.py index 7bac52d8..3778b389 100644 --- a/resources/lib/entrypoint/__init__.py +++ b/resources/lib/entrypoint/__init__.py @@ -13,6 +13,7 @@ from emby import Emby ################################################################################################# Emby.set_loghandler(loghandler.LogHandler, logging.DEBUG) +loghandler.reset() loghandler.config() LOG = logging.getLogger('EMBY.entrypoint') diff --git a/resources/lib/entrypoint/context.py b/resources/lib/entrypoint/context.py index 8dc7a3e9..d5078eed 100644 --- a/resources/lib/entrypoint/context.py +++ b/resources/lib/entrypoint/context.py @@ -36,7 +36,7 @@ class Context(object): _selected_option = None def __init__(self, transcode=False, delete=False): - + try: self.kodi_id = sys.listitem.getVideoInfoTag().getDbId() or None self.media = self.get_media_type() @@ -51,7 +51,7 @@ class Context(object): self.kodi_id = xbmc.getInfoLabel('ListItem.DBID') self.media = xbmc.getInfoLabel('ListItem.DBTYPE') item_id = None - + if self.server or item_id: self.item = TheVoid('GetItem', {'ServerId': self.server, 'Id': item_id}).get() else: @@ -171,6 +171,6 @@ class Context(object): TheVoid('DeleteItem', {'ServerId': self.server, 'Id': self.item['Id']}) def transcode(self): - - item = TheVoid('GetItem', {'Id': self.item['Id'], 'ServerId': self.server}).get() - Actions(self.server).play(item, self.kodi_id, True) + filename = xbmc.getInfoLabel("ListItem.Filenameandpath") + filename += "&transcode=true" + xbmc.executebuiltin("PlayMedia(%s)" % filename) diff --git a/resources/lib/entrypoint/default.py b/resources/lib/entrypoint/default.py index b0b5c356..3c46a911 100644 --- a/resources/lib/entrypoint/default.py +++ b/resources/lib/entrypoint/default.py @@ -21,7 +21,6 @@ from database import reset, get_sync, Database, emby_db, get_credentials from objects import Objects, Actions from downloader import TheVoid from helper import _, event, settings, window, dialog, api, JSONRPC -from emby import Emby ################################################################################################# @@ -69,7 +68,7 @@ class Events(object): elif mode =='play': item = TheVoid('GetItem', {'Id': params['id'], 'ServerId': server}).get() - Actions(server).play(item, params.get('dbid'), playlist=params.get('playlist') == 'true') + Actions(server).play(item, params.get('dbid'), params.get('transcode') == 'true', playlist=params.get('playlist') == 'true') elif mode == 'playlist': event('PlayPlaylist', {'Id': params['id'], 'ServerId': server}) @@ -119,6 +118,8 @@ class Events(object): event('UpdateServer') elif mode == 'thememedia': get_themes() + elif mode == 'managelibs': + manage_libraries() elif mode == 'backup': backup() elif mode == 'restartservice': @@ -161,11 +162,11 @@ def listing(): context.append((_(33133), "RunPlugin(plugin://plugin.video.emby/?mode=removelib&id=%s)" % view_id)) LOG.debug("--[ listing/%s/%s ] %s", node, label, path) - + if path: if xbmc.getCondVisibility('Window.IsActive(Pictures)') and node in ('photos', 'homevideos'): directory(label, path, artwork=artwork) - elif xbmc.getCondVisibility('Window.IsActive(Videos)') and node not in ('photos', 'homevideos', 'music'): + elif xbmc.getCondVisibility('Window.IsActive(Videos)') and node not in ('photos', 'music', 'audiobooks'): directory(label, path, artwork=artwork, context=context) elif xbmc.getCondVisibility('Window.IsActive(Music)') and node in ('music'): directory(label, path, artwork=artwork, context=context) @@ -184,16 +185,12 @@ def listing(): directory(server['Name'], "plugin://plugin.video.emby/?mode=browse&server=%s" % server['Id'], context=context) + directory(_(33194), "plugin://plugin.video.emby/?mode=managelibs", True) directory(_(33134), "plugin://plugin.video.emby/?mode=addserver", False) - directory(_(5), "plugin://plugin.video.emby/?mode=settings", False) directory(_(33054), "plugin://plugin.video.emby/?mode=adduser", False) - directory(_(33098), "plugin://plugin.video.emby/?mode=refreshboxsets", False) - directory(_(33154), "plugin://plugin.video.emby/?mode=addlibs", False) - directory(_(33139), "plugin://plugin.video.emby/?mode=updatelibs", False) - directory(_(33140), "plugin://plugin.video.emby/?mode=repairlibs", False) - directory(_(33184), "plugin://plugin.video.emby/?mode=removelibs", False) - directory(_(33060), "plugin://plugin.video.emby/?mode=thememedia", False) + directory(_(5), "plugin://plugin.video.emby/?mode=settings", False) directory(_(33058), "plugin://plugin.video.emby/?mode=reset", False) + directory(_(33192), "plugin://plugin.video.emby/?mode=restartservice", False) if settings('backupPath'): directory(_(33092), "plugin://plugin.video.emby/?mode=backup", False) @@ -225,6 +222,18 @@ def dir_listitem(label, path, artwork=None, fanart=None): return li +def manage_libraries(): + + directory(_(33098), "plugin://plugin.video.emby/?mode=refreshboxsets", False) + directory(_(33154), "plugin://plugin.video.emby/?mode=addlibs", False) + directory(_(33139), "plugin://plugin.video.emby/?mode=updatelibs", False) + directory(_(33140), "plugin://plugin.video.emby/?mode=repairlibs", False) + directory(_(33184), "plugin://plugin.video.emby/?mode=removelibs", False) + directory(_(33060), "plugin://plugin.video.emby/?mode=thememedia", False) + + xbmcplugin.setContent(int(sys.argv[1]), 'files') + xbmcplugin.endOfDirectory(int(sys.argv[1])) + def browse(media, view_id=None, folder=None, server_id=None): ''' Browse content dynamically. @@ -249,7 +258,7 @@ def browse(media, view_id=None, folder=None, server_id=None): if folder is None and media in ('homevideos', 'movies', 'books', 'audiobooks'): return browse_subfolders(media, view_id, server_id) - + if folder and folder == 'firstletter': return browse_letters(media, view_id, server_id) @@ -476,7 +485,7 @@ def get_media_type(media): return "MusicArtist,MusicAlbum,Audio" def get_fanart(item_id, path, server_id=None): - + ''' Get extra fanart for listitems. This is called by skinhelper. Images are stored locally, due to the Kodi caching system. ''' @@ -561,7 +570,7 @@ def get_video_extras(item_id, path, server_id=None): """ def get_next_episodes(item_id, limit): - + ''' Only for synced content. ''' with Database('emby') as embydb: @@ -648,7 +657,7 @@ def create_listitem(item): label2 = "" li = xbmcgui.ListItem(title) li.setProperty('IsPlayable', "true") - + metadata = { 'Title': title, 'duration': str(item['runtime']/60), @@ -702,7 +711,7 @@ def create_listitem(item): metadata['CastAndRole'] = castandrole li.setLabel2(label2) - li.setInfo(type="Video", infoLabels=metadata) + li.setInfo(type="Video", infoLabels=metadata) li.setProperty('resumetime', str(item['resume']['position'])) li.setProperty('totaltime', str(item['resume']['total'])) li.setArt(item['art']) @@ -784,7 +793,7 @@ def get_themes(): items = {} server = TheVoid('GetServerAddress', {'ServerId': None}).get() - token = TheVoid('GetToken', {'ServerId': None}).get() + token = TheVoid('GetToken', {'ServerId': None}).get() for view in views: result = TheVoid('GetThemes', {'Type': "Video", 'Id': view}).get() diff --git a/resources/lib/entrypoint/service.py b/resources/lib/entrypoint/service.py index 214053f2..2fe12f32 100644 --- a/resources/lib/entrypoint/service.py +++ b/resources/lib/entrypoint/service.py @@ -17,7 +17,7 @@ import client import library import setup import monitor -from libraries import requests +import requests from views import Views, verify_kodi_defaults from helper import _, window, settings, event, dialog, find, compare_version from downloader import get_objects @@ -119,7 +119,9 @@ class Service(xbmc.Monitor): self.settings['last_progress_report'] = datetime.today() if window('emby.restart.bool'): + window('emby.restart', clear=True) + dialog("notification", heading="{emby}", message=_(33193), icon="{emby}", time=1000, sound=False) raise Exception('RestartService') @@ -128,6 +130,8 @@ class Service(xbmc.Monitor): self.shutdown() + raise Exception("ExitService") + def start_default(self): try: @@ -169,13 +173,13 @@ class Service(xbmc.Monitor): raise Exception("Completed database reset") - def check_update(self): + def check_update(self, forced=False): ''' Check for objects build version and compare. This pulls a dict that contains all the information for the build needed. ''' LOG.info("--[ check updates/%s ]", objects.version) - kodi = xbmc.getInfoLabel('System.BuildVersion') + kodi = "DEV" if settings('devMode.bool') else xbmc.getInfoLabel('System.BuildVersion') try: versions = requests.get('http://kodi.emby.media/Public%20testing/Dependencies/databases.json').json() @@ -186,13 +190,16 @@ class Service(xbmc.Monitor): label, zipfile = build.split('-', 1) - if label == objects.version: + if label == 'DEV' and forced: + LOG.info("--[ force/objects/%s ]", label) + + elif label == objects.version: LOG.info("--[ objects/%s ]", objects.version) return False get_objects(zipfile, label + '.zip') - reload(objects) # to apply latest changes + self.reload_objects() dialog("notification", heading="{emby}", message=_(33156), icon="{emby}") LOG.info("--[ new objects/%s ]", objects.version) @@ -264,7 +271,7 @@ class Service(xbmc.Monitor): if data.get('ServerId') is None: self.stop_default() - if self.waitForAbort(20): + if self.waitForAbort(120): return self.start_default() @@ -354,7 +361,9 @@ class Service(xbmc.Monitor): libraries = data['Id'].split(',') for lib in libraries: - self.library_thread.remove_library(lib) + + if not self.library_thread.remove_library(lib): + return self.library_thread.add_library(data['Id']) xbmc.executebuiltin("Container.Refresh") @@ -363,7 +372,9 @@ class Service(xbmc.Monitor): libraries = data['Id'].split(',') for lib in libraries: - self.library_thread.remove_library(lib) + + if not self.library_thread.remove_library(lib): + return xbmc.executebuiltin("Container.Refresh") @@ -413,7 +424,7 @@ class Service(xbmc.Monitor): elif method == 'CheckUpdate': - if not self.check_update(): + if not self.check_update(True): dialog("notification", heading="{emby}", message=_(21341), icon="{emby}", sound=False) else: dialog("notification", heading="{emby}", message=_(33181), icon="{emby}", sound=False) @@ -461,17 +472,38 @@ class Service(xbmc.Monitor): if not self.settings['kodi_companion']: dialog("ok", heading="{emby}", line1=_(33138)) + def reload_objects(self): + + ''' Reload objects which depends on the patch module. + This allows to see the changes in code without restarting the python interpreter. + ''' + reload_modules = ['objects.movies', 'objects.musicvideos', 'objects.tvshows', + 'objects.music', 'objects.obj', 'objects.actions', 'objects.kodi.kodi', + 'objects.kodi.movies', 'objects.kodi.musicvideos', 'objects.kodi.tvshows', + 'objects.kodi.music', 'objects.kodi.artwork', 'objects.kodi.queries', + 'objects.kodi.queries_music', 'objects.kodi.queries_texture'] + + for mod in reload_modules: + del sys.modules[mod] + + reload(objects.kodi) + reload(objects) + reload(library) + reload(monitor) + + LOG.warn("---[ objects reloaded ]") + def shutdown(self): LOG.warn("---<[ EXITING ]") window('emby_should_stop.bool', True) properties = [ # TODO: review - "emby_state", "emby_serverStatus", - "emby_syncRunning", "emby_currUser", + "emby_state", "emby_serverStatus", "emby_currUser", "emby_play", "emby_online", "emby.connected", "emby.resume", "emby_startup", - "emby.external", "emby.external_check", "emby_deviceId", "emby_db_check", "emby_pathverified" + "emby.external", "emby.external_check", "emby_deviceId", "emby_db_check", "emby_pathverified", + "emby_sync" ] for prop in properties: window(prop, clear=True) @@ -482,6 +514,8 @@ class Service(xbmc.Monitor): self.library_thread.stop_client() if self.monitor is not None: + self.monitor.listener.stop() + self.monitor.webservice.stop() LOG.warn("---<<<[ %s ]", client.get_addon_name()) diff --git a/resources/lib/full_sync.py b/resources/lib/full_sync.py index 4813cb4c..0381ac9b 100644 --- a/resources/lib/full_sync.py +++ b/resources/lib/full_sync.py @@ -13,10 +13,9 @@ import xbmcvfs import downloader as server import helper.xmls as xmls from database import Database, get_sync, save_sync, emby_db -from objects import Movies, TVShows, MusicVideos, Music -from helper import _, settings, progress, dialog, LibraryException +from helper import _, settings, window, progress, dialog, LibraryException from helper.utils import get_screensaver, set_screensaver -from emby import Emby +from views import Views ################################################################################################## @@ -27,14 +26,56 @@ LOG = logging.getLogger("EMBY."+__name__) class FullSync(object): + ''' This should be called like a context. + i.e. with FullSync('emby') as sync: + sync.libraries() + ''' + # Borg - multiple instances, shared state + _shared_state = {} sync = None + running = False + screensaver = None - def __init__(self, library, library_id=None, update=False): + + def __init__(self, library, server): + + ''' You can call all big syncing methods here. + Initial, update, repair, remove. + ''' + self.__dict__ = self._shared_state + + if self.running: + dialog("ok", heading="{emby}", line1=_(33197)) + + raise Exception("Sync is already running.") self.library = library + self.server = server + + def __enter__(self): + + ''' Do everything we need before the sync + ''' + LOG.info("-->[ fullsync ]") + + if not settings('dbSyncScreensaver.bool'): + + xbmc.executebuiltin('InhibitIdleShutdown(true)') + self.screensaver = get_screensaver() + set_screensaver(value="") + + self.running = True + window('emby_sync.bool', True) + + return self + + + def libraries(self, library_id=None, update=False): + + ''' Map the syncing process and start the sync. Ensure only one sync is running. + ''' self.direct_path = settings('useDirectPaths') == "1" self.update_library = update - self.server = Emby() self.sync = get_sync() if library_id: @@ -138,6 +179,7 @@ class FullSync(object): return [libraries[x - 1] for x in selection] + def start(self): ''' Main sync process. @@ -146,30 +188,15 @@ class FullSync(object): save_sync(self.sync) start_time = datetime.datetime.now() - if not settings('dbSyncScreensaver.bool'): + for library in list(self.sync['Libraries']): - xbmc.executebuiltin('InhibitIdleShutdown(true)') - screensaver = get_screensaver() - set_screensaver(value="") + self.process_library(library) - try: - for library in list(self.sync['Libraries']): + if not library.startswith('Boxsets:') and library not in self.sync['Whitelist']: + self.sync['Whitelist'].append(library) - self.process_library(library) - - if not library.startswith('Boxsets:') and library not in self.sync['Whitelist']: - self.sync['Whitelist'].append(library) - - self.sync['Libraries'].pop(self.sync['Libraries'].index(library)) - self.sync['RestorePoint'] = {} - except Exception as error: - - if not settings('dbSyncScreensaver.bool'): - - xbmc.executebuiltin('InhibitIdleShutdown(false)') - set_screensaver(value=screensaver) - - raise + self.sync['Libraries'].pop(self.sync['Libraries'].index(library)) + self.sync['RestorePoint'] = {} elapsed = datetime.datetime.now() - start_time settings('SyncInstallRunDone.bool', True) @@ -235,6 +262,8 @@ class FullSync(object): ''' Process movies from a single library. ''' + Movies = self.library.media['Movies'] + with self.library.database_lock: with Database() as videodb: with Database('emby') as embydb: @@ -266,7 +295,7 @@ class FullSync(object): current = obj.item_ids for x in items: - if x[0] not in current: + if x[0] not in current and x[1] == 'Movie': obj.remove(x[0]) @progress() @@ -274,6 +303,8 @@ class FullSync(object): ''' Process tvshows and episodes from a single library. ''' + TVShows = self.library.media['TVShows'] + with self.library.database_lock: with Database() as videodb: with Database('emby') as embydb: @@ -314,7 +345,7 @@ class FullSync(object): current = obj.item_ids for x in items: - if x[0] not in current: + if x[0] not in current and x[1] == 'Series': obj.remove(x[0]) @progress() @@ -322,6 +353,8 @@ class FullSync(object): ''' Process musicvideos from a single library. ''' + MusicVideos = self.library.media['MusicVideos'] + with self.library.database_lock: with Database() as videodb: with Database('emby') as embydb: @@ -352,7 +385,7 @@ class FullSync(object): current = obj.item_ids for x in items: - if x[0] not in current: + if x[0] not in current and x[1] == 'MusicVideo': obj.remove(x[0]) @progress() @@ -360,6 +393,8 @@ class FullSync(object): ''' Process artists, album, songs from a single library. ''' + Music = self.library.media['Music'] + with self.library.music_database_lock: with Database('music') as musicdb: with Database('emby') as embydb: @@ -389,6 +424,13 @@ class FullSync(object): message="%s/%s/%s" % (message, album['Name'][:7], song['Name'][:7])) obj.song(song) + for songs in server.get_songs_by_artist(artist['Id']): + for song in songs['Items']: + + dialog.update(percent, message="%s/%s" % (message, song['Name'])) + obj.song(song) + + if self.update_library: self.music_compare(library, obj, embydb) @@ -405,7 +447,7 @@ class FullSync(object): current = obj.item_ids for x in items: - if x[0] not in current: + if x[0] not in current and x[1] == 'MusicArtist': obj.remove(x[0]) @progress(_(33018)) @@ -413,6 +455,8 @@ class FullSync(object): ''' Process all boxsets. ''' + Movies = self.library.media['Movies'] + with self.library.database_lock: with Database() as videodb: with Database('emby') as embydb: @@ -434,6 +478,8 @@ class FullSync(object): ''' Delete all exisitng boxsets and re-add. ''' + Movies = self.library.media['Movies'] + with self.library.database_lock: with Database() as videodb: with Database('emby') as embydb: @@ -442,3 +488,81 @@ class FullSync(object): obj.boxsets_reset() self.boxsets(None) + + @progress(_(33144)) + def remove_library(self, library_id, dialog): + + ''' Remove library by their id from the Kodi database. + ''' + MEDIA = self.library.MEDIA + direct_path = self.library.direct_path + + with Database('emby') as embydb: + + db = emby_db.EmbyDatabase(embydb.cursor) + library = db.get_view(library_id.replace('Mixed:', "")) + items = db.get_item_by_media_folder(library_id.replace('Mixed:', "")) + media = 'music' if library[1] == 'music' else 'video' + + if media == 'music': + settings('MusicRescan.bool', False) + + if items: + count = 0 + + with self.library.music_database_lock if media == 'music' else self.library.database_lock: + with Database(media) as kodidb: + + if library[1] == 'mixed': + + movies = [x for x in items if x[1] == 'Movie'] + tvshows = [x for x in items if x[1] == 'Series'] + + obj = MEDIA['Movie'](self.server, embydb, kodidb, direct_path)['Remove'] + + for item in movies: + + obj(item[0]) + dialog.update(int((float(count) / float(len(items))*100)), heading="%s: %s" % (_('addon_name'), library[0])) + count += 1 + + obj = MEDIA['Series'](self.server, embydb, kodidb, direct_path)['Remove'] + + for item in tvshows: + + obj(item[0]) + dialog.update(int((float(count) / float(len(items))*100)), heading="%s: %s" % (_('addon_name'), library[0])) + count += 1 + else: + obj = MEDIA[items[0][1]](self.server, embydb, kodidb, direct_path)['Remove'] + + for item in items: + + obj(item[0]) + dialog.update(int((float(count) / float(len(items))*100)), heading="%s: %s" % (_('addon_name'), library[0])) + count += 1 + + self.sync = get_sync() + + if library_id in self.sync['Whitelist']: + self.sync['Whitelist'].remove(library_id) + + elif 'Mixed:%s' % library_id in self.sync['Whitelist']: + self.sync['Whitelist'].remove('Mixed:%s' % library_id) + + save_sync(self.sync) + + + def __exit__(self, exc_type, exc_val, exc_tb): + + ''' Exiting sync + ''' + self.running = False + window('emby_sync', clear=True) + + if not settings('dbSyncScreensaver.bool') and self.screensaver is not None: + + xbmc.executebuiltin('InhibitIdleShutdown(false)') + set_screensaver(value=self.screensaver) + + LOG.info("--<[ fullsync ]") diff --git a/resources/lib/helper/__init__.py b/resources/lib/helper/__init__.py index beb06035..5575306f 100644 --- a/resources/lib/helper/__init__.py +++ b/resources/lib/helper/__init__.py @@ -16,6 +16,7 @@ from utils import write_xml from utils import compare_version from utils import unzip from utils import create_id +from utils import convert_to_local as Local from wrapper import progress from wrapper import catch diff --git a/resources/lib/helper/loghandler.py b/resources/lib/helper/loghandler.py index 4eb824d5..8fb2698f 100644 --- a/resources/lib/helper/loghandler.py +++ b/resources/lib/helper/loghandler.py @@ -18,6 +18,11 @@ def config(): logger.addHandler(LogHandler()) logger.setLevel(logging.DEBUG) +def reset(): + + for handler in logging.getLogger('EMBY').handlers: + logging.getLogger('EMBY').removeHandler(handler) + class LogHandler(logging.StreamHandler): diff --git a/resources/lib/helper/playutils.py b/resources/lib/helper/playutils.py index 30859a5e..f3146cdc 100644 --- a/resources/lib/helper/playutils.py +++ b/resources/lib/helper/playutils.py @@ -14,8 +14,8 @@ import api import database import client import collections +import requests from . import _, settings, window, dialog -from libraries import requests from downloader import TheVoid from emby import Emby @@ -38,6 +38,7 @@ def set_properties(item, method, server_id=None): 'Id': item['Id'], 'Path': info['Path'], 'PlayMethod': method, + 'PlayOption': 'Addon' if info.get('PlaySessionId') else 'Native', 'MediaSourceId': info.get('MediaSourceId', item['Id']), 'Runtime': item.get('RunTimeTicks'), 'PlaySessionId': info.get('PlaySessionId', str(uuid4()).replace("-", "")), @@ -66,7 +67,7 @@ class PlayUtils(object): 'ServerId': server_id, 'ServerAddress': server, 'ForceTranscode': force_transcode, - 'Token': token or TheVoid('GetToken', {'ServerId': server_id}).get() + 'Token': token or TheVoid('GetToken', {'ServerId': server_id}).get() } def get_sources(self, source_id=None): @@ -117,7 +118,7 @@ class PlayUtils(object): if resp > -1: source = sources[resp] else: - log.info("No media source selected.") + LOG.info("No media source selected.") return False else: source = sources[0] @@ -167,7 +168,7 @@ class PlayUtils(object): def is_strm(self, source): - if source['Container'] == 'strm' or self.item['Path'].endswith('.strm'): + if source.get('Container') == 'strm' or self.item['Path'].endswith('.strm'): LOG.info("strm detected") return True @@ -189,6 +190,11 @@ class PlayUtils(object): source['SupportsDirectPlay'] = False source['Protocol'] = "LiveTV" + if self.info['ForceTranscode']: + + source['SupportsDirectPlay'] = False + source['SupportsDirectStream'] = False + if source.get('Protocol') == 'Http' or source['SupportsDirectPlay'] and (self.is_strm(source) or not settings('playFromStream.bool') and self.is_file_exists(source)): LOG.info("--[ direct play ]") @@ -230,7 +236,7 @@ class PlayUtils(object): return info['MediaSource'] def transcode(self, source, audio=None, subtitle=None): - + if not 'TranscodingUrl' in source: raise Exception("use get_sources to get transcoding url") @@ -258,7 +264,7 @@ class PlayUtils(object): return self.info['Path'] def direct_play(self, source): - + API = api.API(self.item, self.info['ServerAddress']) self.info['Method'] = "DirectPlay" self.info['Path'] = API.get_file_path(source.get('Path')) @@ -266,13 +272,13 @@ class PlayUtils(object): return self.info['Path'] def direct_url(self, source): - + self.info['Method'] = "DirectStream" if self.item['Type'] == "Audio": self.info['Path'] = ("%s/emby/Audio/%s/stream.%s?static=true&api_key=%s" % (self.info['ServerAddress'], self.item['Id'], - source['Container'].split(',')[0], + source.get('Container', "mp4").split(',')[0], self.info['Token'])) else: self.info['Path'] = ("%s/emby/Videos/%s/stream?static=true&MediaSourceId=%s&api_key=%s" % @@ -294,7 +300,7 @@ class PlayUtils(object): def get_resolution(self): return int(xbmc.getInfoLabel('System.ScreenWidth')), int(xbmc.getInfoLabel('System.ScreenHeight')) - + def get_device_profile(self): ''' Get device profile based on the add-on settings. @@ -416,6 +422,7 @@ class PlayUtils(object): profile['CodecProfiles'].append( { 'Type': 'Video', + 'codec': 'h264', 'Conditions': [ { 'Condition': "LessThanEqual", @@ -459,10 +466,10 @@ class PlayUtils(object): for stream in source['MediaStreams']: - if stream['Type'] == 'Subtitle' and stream['IsExternal'] and stream['IsTextSubtitleStream']: + if stream['Type'] == 'Subtitle' and stream['IsExternal']: index = stream['Index'] - if 'DeliveryUrl' in stream and stream['DeliveryUrl'].lower().startswith('http'): + if 'DeliveryUrl' in stream and stream['DeliveryUrl'].lower().startswith('/videos'): url = "%s/emby%s" % (self.info['ServerAddress'], stream['DeliveryUrl']) else: url = self.get_subtitles(source, stream, index) @@ -618,7 +625,7 @@ class PlayUtils(object): def get_subtitles(self, source, stream, index): - if 'DeliveryUrl' in stream and stream['DeliveryUrl'].lower().startswith('http'): + if stream['IsTextSubtitleStream'] and 'DeliveryUrl' in stream and stream['DeliveryUrl'].lower().startswith('/videos'): url = "%s/emby%s" % (self.info['ServerAddress'], stream['DeliveryUrl']) else: url = ("%s/emby/Videos/%s/%s/Subtitles/%s/Stream.%s?api_key=%s" % diff --git a/resources/lib/helper/utils.py b/resources/lib/helper/utils.py index b20bdd5f..71d5bbbb 100644 --- a/resources/lib/helper/utils.py +++ b/resources/lib/helper/utils.py @@ -17,6 +17,7 @@ import xbmcgui import xbmcvfs from . import _ +from dateutil import tz, parser ################################################################################################# @@ -51,7 +52,7 @@ def window(key, value=None, clear=False, window_id=10000): key = key.replace('.bool', "") value = "true" if value else "false" - window.setProperty(key.replace('.json', "").replace('.bool', ""), value) + window.setProperty(key, value) else: result = window.getProperty(key.replace('.json', "").replace('.bool', "")) @@ -444,3 +445,18 @@ def split_list(itemlist, size): ''' Split up list in pieces of size. Will generate a list of lists ''' return [itemlist[i:i+size] for i in range(0, len(itemlist), size)] + +def convert_to_local(date): + + ''' Convert the local datetime to local. + ''' + try: + date = parser.parse(date) if type(date) in (unicode, str) else date + date = date.replace(tzinfo=tz.tzutc()) + date = date.astimezone(tz.tzlocal()) + + return date.strftime('%Y-%m-%dT%H:%M:%S') + except Exception as error: + LOG.error(error) + + return str(date) diff --git a/resources/lib/helper/xmls.py b/resources/lib/helper/xmls.py index a1e98a7b..5ca11099 100644 --- a/resources/lib/helper/xmls.py +++ b/resources/lib/helper/xmls.py @@ -36,20 +36,25 @@ def sources(): etree.SubElement(files, 'default', attrib={'pathversion': "1"}) video = xml.find('video') - count = 2 + count_http = 1 + count_smb = 1 for source in xml.findall('.//path'): if source.text == 'smb://': - count -= 1 + count_smb -= 1 + elif source.text == 'http://': + count_http -= 1 - if count == 0: + if not count_http and not count_smb: break else: - for i in range(0, count): - source = etree.SubElement(video, 'source') - etree.SubElement(source, 'name').text = "Emby" - etree.SubElement(source, 'path', attrib={'pathversion': "1"}).text = "smb://" - etree.SubElement(source, 'allowsharing').text = "true" + for protocol in ('smb://', 'http://'): + if (protocol == 'smb://' and count_smb > 0) or (protocol == 'http://' and count_http > 0): + + source = etree.SubElement(video, 'source') + etree.SubElement(source, 'name').text = "Emby" + etree.SubElement(source, 'path', attrib={'pathversion': "1"}).text = protocol + etree.SubElement(source, 'allowsharing').text = "true" try: files = xml.find('files') diff --git a/resources/lib/libraries/__init__.py b/resources/lib/libraries/__init__.py deleted file mode 100644 index 20b15530..00000000 --- a/resources/lib/libraries/__init__.py +++ /dev/null @@ -1 +0,0 @@ -import requests diff --git a/resources/lib/libraries/mutagen/__init__.py b/resources/lib/libraries/mutagen/__init__.py deleted file mode 100644 index 03ad7aee..00000000 --- a/resources/lib/libraries/mutagen/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Michael Urman -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - - -"""Mutagen aims to be an all purpose multimedia tagging library. - -:: - - import mutagen.[format] - metadata = mutagen.[format].Open(filename) - -`metadata` acts like a dictionary of tags in the file. Tags are generally a -list of string-like values, but may have additional methods available -depending on tag or format. They may also be entirely different objects -for certain keys, again depending on format. -""" - -from mutagen._util import MutagenError -from mutagen._file import FileType, StreamInfo, File -from mutagen._tags import Metadata, PaddingInfo - -version = (1, 31) -"""Version tuple.""" - -version_string = ".".join(map(str, version)) -"""Version string.""" - -MutagenError - -FileType - -StreamInfo - -File - -Metadata - -PaddingInfo diff --git a/resources/lib/libraries/mutagen/__pycache__/__init__.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/__init__.cpython-35.pyc deleted file mode 100644 index 0d767fdc..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/__init__.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/_compat.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/_compat.cpython-35.pyc deleted file mode 100644 index 93f423d5..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/_compat.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/_constants.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/_constants.cpython-35.pyc deleted file mode 100644 index 368544dc..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/_constants.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/_file.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/_file.cpython-35.pyc deleted file mode 100644 index 2e8bbe11..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/_file.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/_mp3util.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/_mp3util.cpython-35.pyc deleted file mode 100644 index e242cafa..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/_mp3util.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/_tags.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/_tags.cpython-35.pyc deleted file mode 100644 index b49b1dfc..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/_tags.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/_toolsutil.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/_toolsutil.cpython-35.pyc deleted file mode 100644 index cabceb1e..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/_toolsutil.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/_util.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/_util.cpython-35.pyc deleted file mode 100644 index 199c081b..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/_util.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/_vorbis.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/_vorbis.cpython-35.pyc deleted file mode 100644 index 5a667067..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/_vorbis.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/aac.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/aac.cpython-35.pyc deleted file mode 100644 index c976f6f4..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/aac.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/aiff.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/aiff.cpython-35.pyc deleted file mode 100644 index f9a42314..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/aiff.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/apev2.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/apev2.cpython-35.pyc deleted file mode 100644 index 943ab32c..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/apev2.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/easyid3.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/easyid3.cpython-35.pyc deleted file mode 100644 index a1031ba9..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/easyid3.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/easymp4.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/easymp4.cpython-35.pyc deleted file mode 100644 index f970678f..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/easymp4.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/flac.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/flac.cpython-35.pyc deleted file mode 100644 index 703d5882..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/flac.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/m4a.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/m4a.cpython-35.pyc deleted file mode 100644 index 43ede4e2..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/m4a.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/monkeysaudio.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/monkeysaudio.cpython-35.pyc deleted file mode 100644 index 8c719f0e..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/monkeysaudio.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/mp3.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/mp3.cpython-35.pyc deleted file mode 100644 index 8a0be643..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/mp3.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/musepack.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/musepack.cpython-35.pyc deleted file mode 100644 index cf4ca9e5..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/musepack.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/ogg.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/ogg.cpython-35.pyc deleted file mode 100644 index 0eb57b0c..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/ogg.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/oggflac.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/oggflac.cpython-35.pyc deleted file mode 100644 index ab7dadf0..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/oggflac.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/oggopus.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/oggopus.cpython-35.pyc deleted file mode 100644 index fa6e46fa..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/oggopus.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/oggspeex.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/oggspeex.cpython-35.pyc deleted file mode 100644 index 9cb15665..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/oggspeex.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/oggtheora.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/oggtheora.cpython-35.pyc deleted file mode 100644 index a7a4e557..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/oggtheora.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/oggvorbis.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/oggvorbis.cpython-35.pyc deleted file mode 100644 index 9acf6dc5..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/oggvorbis.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/optimfrog.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/optimfrog.cpython-35.pyc deleted file mode 100644 index 0bcbbb85..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/optimfrog.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/trueaudio.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/trueaudio.cpython-35.pyc deleted file mode 100644 index 629b654b..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/trueaudio.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/__pycache__/wavpack.cpython-35.pyc b/resources/lib/libraries/mutagen/__pycache__/wavpack.cpython-35.pyc deleted file mode 100644 index 2b9be214..00000000 Binary files a/resources/lib/libraries/mutagen/__pycache__/wavpack.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/_compat.py b/resources/lib/libraries/mutagen/_compat.py deleted file mode 100644 index 77c465f1..00000000 --- a/resources/lib/libraries/mutagen/_compat.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2013 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -import sys - - -PY2 = sys.version_info[0] == 2 -PY3 = not PY2 - -if PY2: - from StringIO import StringIO - BytesIO = StringIO - from cStringIO import StringIO as cBytesIO - from itertools import izip - - long_ = long - integer_types = (int, long) - string_types = (str, unicode) - text_type = unicode - - xrange = xrange - cmp = cmp - chr_ = chr - - def endswith(text, end): - return text.endswith(end) - - iteritems = lambda d: d.iteritems() - itervalues = lambda d: d.itervalues() - iterkeys = lambda d: d.iterkeys() - - iterbytes = lambda b: iter(b) - - exec("def reraise(tp, value, tb):\n raise tp, value, tb") - - def swap_to_string(cls): - if "__str__" in cls.__dict__: - cls.__unicode__ = cls.__str__ - - if "__bytes__" in cls.__dict__: - cls.__str__ = cls.__bytes__ - - return cls - -elif PY3: - from io import StringIO - StringIO = StringIO - from io import BytesIO - cBytesIO = BytesIO - - long_ = int - integer_types = (int,) - string_types = (str,) - text_type = str - - izip = zip - xrange = range - cmp = lambda a, b: (a > b) - (a < b) - chr_ = lambda x: bytes([x]) - - def endswith(text, end): - # usefull for paths which can be both, str and bytes - if isinstance(text, str): - if not isinstance(end, str): - end = end.decode("ascii") - else: - if not isinstance(end, bytes): - end = end.encode("ascii") - return text.endswith(end) - - iteritems = lambda d: iter(d.items()) - itervalues = lambda d: iter(d.values()) - iterkeys = lambda d: iter(d.keys()) - - iterbytes = lambda b: (bytes([v]) for v in b) - - def reraise(tp, value, tb): - raise tp(value).with_traceback(tb) - - def swap_to_string(cls): - return cls diff --git a/resources/lib/libraries/mutagen/_constants.py b/resources/lib/libraries/mutagen/_constants.py deleted file mode 100644 index 62c1ce02..00000000 --- a/resources/lib/libraries/mutagen/_constants.py +++ /dev/null @@ -1,199 +0,0 @@ -# -*- coding: utf-8 -*- - -"""Constants used by Mutagen.""" - -GENRES = [ - u"Blues", - u"Classic Rock", - u"Country", - u"Dance", - u"Disco", - u"Funk", - u"Grunge", - u"Hip-Hop", - u"Jazz", - u"Metal", - u"New Age", - u"Oldies", - u"Other", - u"Pop", - u"R&B", - u"Rap", - u"Reggae", - u"Rock", - u"Techno", - u"Industrial", - u"Alternative", - u"Ska", - u"Death Metal", - u"Pranks", - u"Soundtrack", - u"Euro-Techno", - u"Ambient", - u"Trip-Hop", - u"Vocal", - u"Jazz+Funk", - u"Fusion", - u"Trance", - u"Classical", - u"Instrumental", - u"Acid", - u"House", - u"Game", - u"Sound Clip", - u"Gospel", - u"Noise", - u"Alt. Rock", - u"Bass", - u"Soul", - u"Punk", - u"Space", - u"Meditative", - u"Instrumental Pop", - u"Instrumental Rock", - u"Ethnic", - u"Gothic", - u"Darkwave", - u"Techno-Industrial", - u"Electronic", - u"Pop-Folk", - u"Eurodance", - u"Dream", - u"Southern Rock", - u"Comedy", - u"Cult", - u"Gangsta Rap", - u"Top 40", - u"Christian Rap", - u"Pop/Funk", - u"Jungle", - u"Native American", - u"Cabaret", - u"New Wave", - u"Psychedelic", - u"Rave", - u"Showtunes", - u"Trailer", - u"Lo-Fi", - u"Tribal", - u"Acid Punk", - u"Acid Jazz", - u"Polka", - u"Retro", - u"Musical", - u"Rock & Roll", - u"Hard Rock", - u"Folk", - u"Folk-Rock", - u"National Folk", - u"Swing", - u"Fast-Fusion", - u"Bebop", - u"Latin", - u"Revival", - u"Celtic", - u"Bluegrass", - u"Avantgarde", - u"Gothic Rock", - u"Progressive Rock", - u"Psychedelic Rock", - u"Symphonic Rock", - u"Slow Rock", - u"Big Band", - u"Chorus", - u"Easy Listening", - u"Acoustic", - u"Humour", - u"Speech", - u"Chanson", - u"Opera", - u"Chamber Music", - u"Sonata", - u"Symphony", - u"Booty Bass", - u"Primus", - u"Porn Groove", - u"Satire", - u"Slow Jam", - u"Club", - u"Tango", - u"Samba", - u"Folklore", - u"Ballad", - u"Power Ballad", - u"Rhythmic Soul", - u"Freestyle", - u"Duet", - u"Punk Rock", - u"Drum Solo", - u"A Cappella", - u"Euro-House", - u"Dance Hall", - u"Goa", - u"Drum & Bass", - u"Club-House", - u"Hardcore", - u"Terror", - u"Indie", - u"BritPop", - u"Afro-Punk", - u"Polsk Punk", - u"Beat", - u"Christian Gangsta Rap", - u"Heavy Metal", - u"Black Metal", - u"Crossover", - u"Contemporary Christian", - u"Christian Rock", - u"Merengue", - u"Salsa", - u"Thrash Metal", - u"Anime", - u"JPop", - u"Synthpop", - u"Abstract", - u"Art Rock", - u"Baroque", - u"Bhangra", - u"Big Beat", - u"Breakbeat", - u"Chillout", - u"Downtempo", - u"Dub", - u"EBM", - u"Eclectic", - u"Electro", - u"Electroclash", - u"Emo", - u"Experimental", - u"Garage", - u"Global", - u"IDM", - u"Illbient", - u"Industro-Goth", - u"Jam Band", - u"Krautrock", - u"Leftfield", - u"Lounge", - u"Math Rock", - u"New Romantic", - u"Nu-Breakz", - u"Post-Punk", - u"Post-Rock", - u"Psytrance", - u"Shoegaze", - u"Space Rock", - u"Trop Rock", - u"World Music", - u"Neoclassical", - u"Audiobook", - u"Audio Theatre", - u"Neue Deutsche Welle", - u"Podcast", - u"Indie Rock", - u"G-Funk", - u"Dubstep", - u"Garage Rock", - u"Psybient", -] -"""The ID3v1 genre list.""" diff --git a/resources/lib/libraries/mutagen/_file.py b/resources/lib/libraries/mutagen/_file.py deleted file mode 100644 index 5daa2521..00000000 --- a/resources/lib/libraries/mutagen/_file.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (C) 2005 Michael Urman -# -*- coding: utf-8 -*- -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -import warnings - -from mutagen._util import DictMixin -from mutagen._compat import izip - - -class FileType(DictMixin): - """An abstract object wrapping tags and audio stream information. - - Attributes: - - * info -- stream information (length, bitrate, sample rate) - * tags -- metadata tags, if any - - Each file format has different potential tags and stream - information. - - FileTypes implement an interface very similar to Metadata; the - dict interface, save, load, and delete calls on a FileType call - the appropriate methods on its tag data. - """ - - __module__ = "mutagen" - - info = None - tags = None - filename = None - _mimes = ["application/octet-stream"] - - def __init__(self, filename=None, *args, **kwargs): - if filename is None: - warnings.warn("FileType constructor requires a filename", - DeprecationWarning) - else: - self.load(filename, *args, **kwargs) - - def load(self, filename, *args, **kwargs): - raise NotImplementedError - - def __getitem__(self, key): - """Look up a metadata tag key. - - If the file has no tags at all, a KeyError is raised. - """ - - if self.tags is None: - raise KeyError(key) - else: - return self.tags[key] - - def __setitem__(self, key, value): - """Set a metadata tag. - - If the file has no tags, an appropriate format is added (but - not written until save is called). - """ - - if self.tags is None: - self.add_tags() - self.tags[key] = value - - def __delitem__(self, key): - """Delete a metadata tag key. - - If the file has no tags at all, a KeyError is raised. - """ - - if self.tags is None: - raise KeyError(key) - else: - del(self.tags[key]) - - def keys(self): - """Return a list of keys in the metadata tag. - - If the file has no tags at all, an empty list is returned. - """ - - if self.tags is None: - return [] - else: - return self.tags.keys() - - def delete(self, filename=None): - """Remove tags from a file. - - In cases where the tagging format is independent of the file type - (for example `mutagen.ID3`) all traces of the tagging format will - be removed. - In cases where the tag is part of the file type, all tags and - padding will be removed. - - The tags attribute will be cleared as well if there is one. - - Does nothing if the file has no tags. - - :raises mutagen.MutagenError: if deleting wasn't possible - """ - - if self.tags is not None: - if filename is None: - filename = self.filename - else: - warnings.warn( - "delete(filename=...) is deprecated, reload the file", - DeprecationWarning) - return self.tags.delete(filename) - - def save(self, filename=None, **kwargs): - """Save metadata tags. - - :raises mutagen.MutagenError: if saving wasn't possible - """ - - if filename is None: - filename = self.filename - else: - warnings.warn( - "save(filename=...) is deprecated, reload the file", - DeprecationWarning) - - if self.tags is not None: - return self.tags.save(filename, **kwargs) - - def pprint(self): - """Print stream information and comment key=value pairs.""" - - stream = "%s (%s)" % (self.info.pprint(), self.mime[0]) - try: - tags = self.tags.pprint() - except AttributeError: - return stream - else: - return stream + ((tags and "\n" + tags) or "") - - def add_tags(self): - """Adds new tags to the file. - - :raises mutagen.MutagenError: if tags already exist or adding is not - possible. - """ - - raise NotImplementedError - - @property - def mime(self): - """A list of mime types""" - - mimes = [] - for Kind in type(self).__mro__: - for mime in getattr(Kind, '_mimes', []): - if mime not in mimes: - mimes.append(mime) - return mimes - - @staticmethod - def score(filename, fileobj, header): - raise NotImplementedError - - -class StreamInfo(object): - """Abstract stream information object. - - Provides attributes for length, bitrate, sample rate etc. - - See the implementations for details. - """ - - __module__ = "mutagen" - - def pprint(self): - """Print stream information""" - - raise NotImplementedError - - -def File(filename, options=None, easy=False): - """Guess the type of the file and try to open it. - - The file type is decided by several things, such as the first 128 - bytes (which usually contains a file type identifier), the - filename extension, and the presence of existing tags. - - If no appropriate type could be found, None is returned. - - :param options: Sequence of :class:`FileType` implementations, defaults to - all included ones. - - :param easy: If the easy wrappers should be returnd if available. - For example :class:`EasyMP3 <mp3.EasyMP3>` instead - of :class:`MP3 <mp3.MP3>`. - """ - - if options is None: - from mutagen.asf import ASF - from mutagen.apev2 import APEv2File - from mutagen.flac import FLAC - if easy: - from mutagen.easyid3 import EasyID3FileType as ID3FileType - else: - from mutagen.id3 import ID3FileType - if easy: - from mutagen.mp3 import EasyMP3 as MP3 - else: - from mutagen.mp3 import MP3 - from mutagen.oggflac import OggFLAC - from mutagen.oggspeex import OggSpeex - from mutagen.oggtheora import OggTheora - from mutagen.oggvorbis import OggVorbis - from mutagen.oggopus import OggOpus - if easy: - from mutagen.trueaudio import EasyTrueAudio as TrueAudio - else: - from mutagen.trueaudio import TrueAudio - from mutagen.wavpack import WavPack - if easy: - from mutagen.easymp4 import EasyMP4 as MP4 - else: - from mutagen.mp4 import MP4 - from mutagen.musepack import Musepack - from mutagen.monkeysaudio import MonkeysAudio - from mutagen.optimfrog import OptimFROG - from mutagen.aiff import AIFF - from mutagen.aac import AAC - options = [MP3, TrueAudio, OggTheora, OggSpeex, OggVorbis, OggFLAC, - FLAC, AIFF, APEv2File, MP4, ID3FileType, WavPack, - Musepack, MonkeysAudio, OptimFROG, ASF, OggOpus, AAC] - - if not options: - return None - - with open(filename, "rb") as fileobj: - header = fileobj.read(128) - # Sort by name after score. Otherwise import order affects - # Kind sort order, which affects treatment of things with - # equals scores. - results = [(Kind.score(filename, fileobj, header), Kind.__name__) - for Kind in options] - - results = list(izip(results, options)) - results.sort() - (score, name), Kind = results[-1] - if score > 0: - return Kind(filename) - else: - return None diff --git a/resources/lib/libraries/mutagen/_mp3util.py b/resources/lib/libraries/mutagen/_mp3util.py deleted file mode 100644 index 409cadcb..00000000 --- a/resources/lib/libraries/mutagen/_mp3util.py +++ /dev/null @@ -1,420 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -""" -http://www.codeproject.com/Articles/8295/MPEG-Audio-Frame-Header -http://wiki.hydrogenaud.io/index.php?title=MP3 -""" - -from functools import partial - -from ._util import cdata, BitReader -from ._compat import xrange, iterbytes, cBytesIO - - -class LAMEError(Exception): - pass - - -class LAMEHeader(object): - """http://gabriel.mp3-tech.org/mp3infotag.html""" - - vbr_method = 0 - """0: unknown, 1: CBR, 2: ABR, 3/4/5: VBR, others: see the docs""" - - lowpass_filter = 0 - """lowpass filter value in Hz. 0 means unknown""" - - quality = -1 - """Encoding quality: 0..9""" - - vbr_quality = -1 - """VBR quality: 0..9""" - - track_peak = None - """Peak signal amplitude as float. None if unknown.""" - - track_gain_origin = 0 - """see the docs""" - - track_gain_adjustment = None - """Track gain adjustment as float (for 89db replay gain) or None""" - - album_gain_origin = 0 - """see the docs""" - - album_gain_adjustment = None - """Album gain adjustment as float (for 89db replay gain) or None""" - - encoding_flags = 0 - """see docs""" - - ath_type = -1 - """see docs""" - - bitrate = -1 - """Bitrate in kbps. For VBR the minimum bitrate, for anything else - (CBR, ABR, ..) the target bitrate. - """ - - encoder_delay_start = 0 - """Encoder delay in samples""" - - encoder_padding_end = 0 - """Padding in samples added at the end""" - - source_sample_frequency_enum = -1 - """see docs""" - - unwise_setting_used = False - """see docs""" - - stereo_mode = 0 - """see docs""" - - noise_shaping = 0 - """see docs""" - - mp3_gain = 0 - """Applied MP3 gain -127..127. Factor is 2 ** (mp3_gain / 4)""" - - surround_info = 0 - """see docs""" - - preset_used = 0 - """lame preset""" - - music_length = 0 - """Length in bytes excluding any ID3 tags""" - - music_crc = -1 - """CRC16 of the data specified by music_length""" - - header_crc = -1 - """CRC16 of this header and everything before (not checked)""" - - def __init__(self, xing, fileobj): - """Raises LAMEError if parsing fails""" - - payload = fileobj.read(27) - if len(payload) != 27: - raise LAMEError("Not enough data") - - # extended lame header - r = BitReader(cBytesIO(payload)) - revision = r.bits(4) - if revision != 0: - raise LAMEError("unsupported header revision %d" % revision) - - self.vbr_method = r.bits(4) - self.lowpass_filter = r.bits(8) * 100 - - # these have a different meaning for lame; expose them again here - self.quality = (100 - xing.vbr_scale) % 10 - self.vbr_quality = (100 - xing.vbr_scale) // 10 - - track_peak_data = r.bytes(4) - if track_peak_data == b"\x00\x00\x00\x00": - self.track_peak = None - else: - # see PutLameVBR() in LAME's VbrTag.c - self.track_peak = ( - cdata.uint32_be(track_peak_data) - 0.5) / 2 ** 23 - track_gain_type = r.bits(3) - self.track_gain_origin = r.bits(3) - sign = r.bits(1) - gain_adj = r.bits(9) / 10.0 - if sign: - gain_adj *= -1 - if track_gain_type == 1: - self.track_gain_adjustment = gain_adj - else: - self.track_gain_adjustment = None - assert r.is_aligned() - - album_gain_type = r.bits(3) - self.album_gain_origin = r.bits(3) - sign = r.bits(1) - album_gain_adj = r.bits(9) / 10.0 - if album_gain_type == 2: - self.album_gain_adjustment = album_gain_adj - else: - self.album_gain_adjustment = None - - self.encoding_flags = r.bits(4) - self.ath_type = r.bits(4) - - self.bitrate = r.bits(8) - - self.encoder_delay_start = r.bits(12) - self.encoder_padding_end = r.bits(12) - - self.source_sample_frequency_enum = r.bits(2) - self.unwise_setting_used = r.bits(1) - self.stereo_mode = r.bits(3) - self.noise_shaping = r.bits(2) - - sign = r.bits(1) - mp3_gain = r.bits(7) - if sign: - mp3_gain *= -1 - self.mp3_gain = mp3_gain - - r.skip(2) - self.surround_info = r.bits(3) - self.preset_used = r.bits(11) - self.music_length = r.bits(32) - self.music_crc = r.bits(16) - - self.header_crc = r.bits(16) - assert r.is_aligned() - - @classmethod - def parse_version(cls, fileobj): - """Returns a version string and True if a LAMEHeader follows. - The passed file object will be positioned right before the - lame header if True. - - Raises LAMEError if there is no lame version info. - """ - - # http://wiki.hydrogenaud.io/index.php?title=LAME_version_string - - data = fileobj.read(20) - if len(data) != 20: - raise LAMEError("Not a lame header") - if not data.startswith((b"LAME", b"L3.99")): - raise LAMEError("Not a lame header") - - data = data.lstrip(b"EMAL") - major, data = data[0:1], data[1:].lstrip(b".") - minor = b"" - for c in iterbytes(data): - if not c.isdigit(): - break - minor += c - data = data[len(minor):] - - try: - major = int(major.decode("ascii")) - minor = int(minor.decode("ascii")) - except ValueError: - raise LAMEError - - # the extended header was added sometimes in the 3.90 cycle - # e.g. "LAME3.90 (alpha)" should still stop here. - # (I have seen such a file) - if (major, minor) < (3, 90) or ( - (major, minor) == (3, 90) and data[-11:-10] == b"("): - flag = data.strip(b"\x00").rstrip().decode("ascii") - return u"%d.%d%s" % (major, minor, flag), False - - if len(data) <= 11: - raise LAMEError("Invalid version: too long") - - flag = data[:-11].rstrip(b"\x00") - - flag_string = u"" - patch = u"" - if flag == b"a": - flag_string = u" (alpha)" - elif flag == b"b": - flag_string = u" (beta)" - elif flag == b"r": - patch = u".1+" - elif flag == b" ": - if (major, minor) > (3, 96): - patch = u".0" - else: - patch = u".0+" - elif flag == b"" or flag == b".": - patch = u".0+" - else: - flag_string = u" (?)" - - # extended header, seek back to 9 bytes for the caller - fileobj.seek(-11, 1) - - return u"%d.%d%s%s" % (major, minor, patch, flag_string), True - - -class XingHeaderError(Exception): - pass - - -class XingHeaderFlags(object): - FRAMES = 0x1 - BYTES = 0x2 - TOC = 0x4 - VBR_SCALE = 0x8 - - -class XingHeader(object): - - frames = -1 - """Number of frames, -1 if unknown""" - - bytes = -1 - """Number of bytes, -1 if unknown""" - - toc = [] - """List of 100 file offsets in percent encoded as 0-255. E.g. entry - 50 contains the file offset in percent at 50% play time. - Empty if unknown. - """ - - vbr_scale = -1 - """VBR quality indicator 0-100. -1 if unknown""" - - lame_header = None - """A LAMEHeader instance or None""" - - lame_version = u"" - """The version of the LAME encoder e.g. '3.99.0'. Empty if unknown""" - - is_info = False - """If the header started with 'Info' and not 'Xing'""" - - def __init__(self, fileobj): - """Parses the Xing header or raises XingHeaderError. - - The file position after this returns is undefined. - """ - - data = fileobj.read(8) - if len(data) != 8 or data[:4] not in (b"Xing", b"Info"): - raise XingHeaderError("Not a Xing header") - - self.is_info = (data[:4] == b"Info") - - flags = cdata.uint32_be_from(data, 4)[0] - - if flags & XingHeaderFlags.FRAMES: - data = fileobj.read(4) - if len(data) != 4: - raise XingHeaderError("Xing header truncated") - self.frames = cdata.uint32_be(data) - - if flags & XingHeaderFlags.BYTES: - data = fileobj.read(4) - if len(data) != 4: - raise XingHeaderError("Xing header truncated") - self.bytes = cdata.uint32_be(data) - - if flags & XingHeaderFlags.TOC: - data = fileobj.read(100) - if len(data) != 100: - raise XingHeaderError("Xing header truncated") - self.toc = list(bytearray(data)) - - if flags & XingHeaderFlags.VBR_SCALE: - data = fileobj.read(4) - if len(data) != 4: - raise XingHeaderError("Xing header truncated") - self.vbr_scale = cdata.uint32_be(data) - - try: - self.lame_version, has_header = LAMEHeader.parse_version(fileobj) - if has_header: - self.lame_header = LAMEHeader(self, fileobj) - except LAMEError: - pass - - @classmethod - def get_offset(cls, info): - """Calculate the offset to the Xing header from the start of the - MPEG header including sync based on the MPEG header's content. - """ - - assert info.layer == 3 - - if info.version == 1: - if info.mode != 3: - return 36 - else: - return 21 - else: - if info.mode != 3: - return 21 - else: - return 13 - - -class VBRIHeaderError(Exception): - pass - - -class VBRIHeader(object): - - version = 0 - """VBRI header version""" - - quality = 0 - """Quality indicator""" - - bytes = 0 - """Number of bytes""" - - frames = 0 - """Number of frames""" - - toc_scale_factor = 0 - """Scale factor of TOC entries""" - - toc_frames = 0 - """Number of frames per table entry""" - - toc = [] - """TOC""" - - def __init__(self, fileobj): - """Reads the VBRI header or raises VBRIHeaderError. - - The file position is undefined after this returns - """ - - data = fileobj.read(26) - if len(data) != 26 or not data.startswith(b"VBRI"): - raise VBRIHeaderError("Not a VBRI header") - - offset = 4 - self.version, offset = cdata.uint16_be_from(data, offset) - if self.version != 1: - raise VBRIHeaderError( - "Unsupported header version: %r" % self.version) - - offset += 2 # float16.. can't do - self.quality, offset = cdata.uint16_be_from(data, offset) - self.bytes, offset = cdata.uint32_be_from(data, offset) - self.frames, offset = cdata.uint32_be_from(data, offset) - - toc_num_entries, offset = cdata.uint16_be_from(data, offset) - self.toc_scale_factor, offset = cdata.uint16_be_from(data, offset) - toc_entry_size, offset = cdata.uint16_be_from(data, offset) - self.toc_frames, offset = cdata.uint16_be_from(data, offset) - toc_size = toc_entry_size * toc_num_entries - toc_data = fileobj.read(toc_size) - if len(toc_data) != toc_size: - raise VBRIHeaderError("VBRI header truncated") - - self.toc = [] - if toc_entry_size == 2: - unpack = partial(cdata.uint16_be_from, toc_data) - elif toc_entry_size == 4: - unpack = partial(cdata.uint32_be_from, toc_data) - else: - raise VBRIHeaderError("Invalid TOC entry size") - - self.toc = [unpack(i)[0] for i in xrange(0, toc_size, toc_entry_size)] - - @classmethod - def get_offset(cls, info): - """Offset in bytes from the start of the MPEG header including sync""" - - assert info.layer == 3 - - return 36 diff --git a/resources/lib/libraries/mutagen/_tags.py b/resources/lib/libraries/mutagen/_tags.py deleted file mode 100644 index ce250adf..00000000 --- a/resources/lib/libraries/mutagen/_tags.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2005 Michael Urman -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - - -class PaddingInfo(object): - """Abstract padding information object. - - This will be passed to the callback function that can be used - for saving tags. - - :: - - def my_callback(info: PaddingInfo): - return info.get_default_padding() - - The callback should return the amount of padding to use (>= 0) based on - the content size and the padding of the file after saving. The actual used - amount of padding might vary depending on the file format (due to - alignment etc.) - - The default implementation can be accessed using the - :meth:`get_default_padding` method in the callback. - """ - - padding = 0 - """The amount of padding left after saving in bytes (can be negative if - more data needs to be added as padding is available) - """ - - size = 0 - """The amount of data following the padding""" - - def __init__(self, padding, size): - self.padding = padding - self.size = size - - def get_default_padding(self): - """The default implementation which tries to select a reasonable - amount of padding and which might change in future versions. - - :return: Amount of padding after saving - :rtype: int - """ - - high = 1024 * 10 + self.size // 100 # 10 KiB + 1% of trailing data - low = 1024 + self.size // 1000 # 1 KiB + 0.1% of trailing data - - if self.padding >= 0: - # enough padding left - if self.padding > high: - # padding too large, reduce - return low - # just use existing padding as is - return self.padding - else: - # not enough padding, add some - return low - - def _get_padding(self, user_func): - if user_func is None: - return self.get_default_padding() - else: - return user_func(self) - - def __repr__(self): - return "<%s size=%d padding=%d>" % ( - type(self).__name__, self.size, self.padding) - - -class Metadata(object): - """An abstract dict-like object. - - Metadata is the base class for many of the tag objects in Mutagen. - """ - - __module__ = "mutagen" - - def __init__(self, *args, **kwargs): - if args or kwargs: - self.load(*args, **kwargs) - - def load(self, *args, **kwargs): - raise NotImplementedError - - def save(self, filename=None): - """Save changes to a file.""" - - raise NotImplementedError - - def delete(self, filename=None): - """Remove tags from a file. - - In most cases this means any traces of the tag will be removed - from the file. - """ - - raise NotImplementedError diff --git a/resources/lib/libraries/mutagen/_toolsutil.py b/resources/lib/libraries/mutagen/_toolsutil.py deleted file mode 100644 index e9074b71..00000000 --- a/resources/lib/libraries/mutagen/_toolsutil.py +++ /dev/null @@ -1,231 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2015 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -import os -import sys -import signal -import locale -import contextlib -import optparse -import ctypes - -from ._compat import text_type, PY2, PY3, iterbytes - - -def split_escape(string, sep, maxsplit=None, escape_char="\\"): - """Like unicode/str/bytes.split but allows for the separator to be escaped - - If passed unicode/str/bytes will only return list of unicode/str/bytes. - """ - - assert len(sep) == 1 - assert len(escape_char) == 1 - - if isinstance(string, bytes): - if isinstance(escape_char, text_type): - escape_char = escape_char.encode("ascii") - iter_ = iterbytes - else: - iter_ = iter - - if maxsplit is None: - maxsplit = len(string) - - empty = string[:0] - result = [] - current = empty - escaped = False - for char in iter_(string): - if escaped: - if char != escape_char and char != sep: - current += escape_char - current += char - escaped = False - else: - if char == escape_char: - escaped = True - elif char == sep and len(result) < maxsplit: - result.append(current) - current = empty - else: - current += char - result.append(current) - return result - - -class SignalHandler(object): - - def __init__(self): - self._interrupted = False - self._nosig = False - self._init = False - - def init(self): - signal.signal(signal.SIGINT, self._handler) - signal.signal(signal.SIGTERM, self._handler) - if os.name != "nt": - signal.signal(signal.SIGHUP, self._handler) - - def _handler(self, signum, frame): - self._interrupted = True - if not self._nosig: - raise SystemExit("Aborted...") - - @contextlib.contextmanager - def block(self): - """While this context manager is active any signals for aborting - the process will be queued and exit the program once the context - is left. - """ - - self._nosig = True - yield - self._nosig = False - if self._interrupted: - raise SystemExit("Aborted...") - - -def get_win32_unicode_argv(): - """Returns a unicode argv under Windows and standard sys.argv otherwise""" - - if os.name != "nt" or not PY2: - return sys.argv - - import ctypes - from ctypes import cdll, windll, wintypes - - GetCommandLineW = cdll.kernel32.GetCommandLineW - GetCommandLineW.argtypes = [] - GetCommandLineW.restype = wintypes.LPCWSTR - - CommandLineToArgvW = windll.shell32.CommandLineToArgvW - CommandLineToArgvW.argtypes = [ - wintypes.LPCWSTR, ctypes.POINTER(ctypes.c_int)] - CommandLineToArgvW.restype = ctypes.POINTER(wintypes.LPWSTR) - - LocalFree = windll.kernel32.LocalFree - LocalFree.argtypes = [wintypes.HLOCAL] - LocalFree.restype = wintypes.HLOCAL - - argc = ctypes.c_int() - argv = CommandLineToArgvW(GetCommandLineW(), ctypes.byref(argc)) - if not argv: - return - - res = argv[max(0, argc.value - len(sys.argv)):argc.value] - - LocalFree(argv) - - return res - - -def fsencoding(): - """The encoding used for paths, argv, environ, stdout and stdin""" - - if os.name == "nt": - return "" - - return locale.getpreferredencoding() or "utf-8" - - -def fsnative(text=u""): - """Returns the passed text converted to the preferred path type - for each platform. - """ - - assert isinstance(text, text_type) - - if os.name == "nt" or PY3: - return text - else: - return text.encode(fsencoding(), "replace") - return text - - -def is_fsnative(arg): - """If the passed value is of the preferred path type for each platform. - Note that on Python3+linux, paths can be bytes or str but this returns - False for bytes there. - """ - - if PY3 or os.name == "nt": - return isinstance(arg, text_type) - else: - return isinstance(arg, bytes) - - -def print_(*objects, **kwargs): - """A print which supports bytes and str+surrogates under python3. - - Needed so we can print anything passed to us through argv and environ. - Under Windows only text_type is allowed. - - Arguments: - objects: one or more bytes/text - linesep (bool): whether a line separator should be appended - sep (bool): whether objects should be printed separated by spaces - """ - - linesep = kwargs.pop("linesep", True) - sep = kwargs.pop("sep", True) - file_ = kwargs.pop("file", None) - if file_ is None: - file_ = sys.stdout - - old_cp = None - if os.name == "nt": - # Try to force the output to cp65001 aka utf-8. - # If that fails use the current one (most likely cp850, so - # most of unicode will be replaced with '?') - encoding = "utf-8" - old_cp = ctypes.windll.kernel32.GetConsoleOutputCP() - if ctypes.windll.kernel32.SetConsoleOutputCP(65001) == 0: - encoding = getattr(sys.stdout, "encoding", None) or "utf-8" - old_cp = None - else: - encoding = fsencoding() - - try: - if linesep: - objects = list(objects) + [os.linesep] - - parts = [] - for text in objects: - if isinstance(text, text_type): - if PY3: - try: - text = text.encode(encoding, 'surrogateescape') - except UnicodeEncodeError: - text = text.encode(encoding, 'replace') - else: - text = text.encode(encoding, 'replace') - parts.append(text) - - data = (b" " if sep else b"").join(parts) - try: - fileno = file_.fileno() - except (AttributeError, OSError, ValueError): - # for tests when stdout is replaced - try: - file_.write(data) - except TypeError: - file_.write(data.decode(encoding, "replace")) - else: - file_.flush() - os.write(fileno, data) - finally: - # reset the code page to what we had before - if old_cp is not None: - ctypes.windll.kernel32.SetConsoleOutputCP(old_cp) - - -class OptionParser(optparse.OptionParser): - """OptionParser subclass which supports printing Unicode under Windows""" - - def print_help(self, file=None): - print_(self.format_help(), file=file) diff --git a/resources/lib/libraries/mutagen/_util.py b/resources/lib/libraries/mutagen/_util.py deleted file mode 100644 index f05ff454..00000000 --- a/resources/lib/libraries/mutagen/_util.py +++ /dev/null @@ -1,550 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Utility classes for Mutagen. - -You should not rely on the interfaces here being stable. They are -intended for internal use in Mutagen only. -""" - -import struct -import codecs - -from fnmatch import fnmatchcase - -from ._compat import chr_, PY2, iteritems, iterbytes, integer_types, xrange, \ - izip - - -class MutagenError(Exception): - """Base class for all custom exceptions in mutagen - - .. versionadded:: 1.25 - """ - - __module__ = "mutagen" - - -def total_ordering(cls): - assert "__eq__" in cls.__dict__ - assert "__lt__" in cls.__dict__ - - cls.__le__ = lambda self, other: self == other or self < other - cls.__gt__ = lambda self, other: not (self == other or self < other) - cls.__ge__ = lambda self, other: not self < other - cls.__ne__ = lambda self, other: not self.__eq__(other) - - return cls - - -def hashable(cls): - """Makes sure the class is hashable. - - Needs a working __eq__ and __hash__ and will add a __ne__. - """ - - # py2 - assert "__hash__" in cls.__dict__ - # py3 - assert cls.__dict__["__hash__"] is not None - assert "__eq__" in cls.__dict__ - - cls.__ne__ = lambda self, other: not self.__eq__(other) - - return cls - - -def enum(cls): - assert cls.__bases__ == (object,) - - d = dict(cls.__dict__) - new_type = type(cls.__name__, (int,), d) - new_type.__module__ = cls.__module__ - - map_ = {} - for key, value in iteritems(d): - if key.upper() == key and isinstance(value, integer_types): - value_instance = new_type(value) - setattr(new_type, key, value_instance) - map_[value] = key - - def str_(self): - if self in map_: - return "%s.%s" % (type(self).__name__, map_[self]) - return "%d" % int(self) - - def repr_(self): - if self in map_: - return "<%s.%s: %d>" % (type(self).__name__, map_[self], int(self)) - return "%d" % int(self) - - setattr(new_type, "__repr__", repr_) - setattr(new_type, "__str__", str_) - - return new_type - - -@total_ordering -class DictMixin(object): - """Implement the dict API using keys() and __*item__ methods. - - Similar to UserDict.DictMixin, this takes a class that defines - __getitem__, __setitem__, __delitem__, and keys(), and turns it - into a full dict-like object. - - UserDict.DictMixin is not suitable for this purpose because it's - an old-style class. - - This class is not optimized for very large dictionaries; many - functions have linear memory requirements. I recommend you - override some of these functions if speed is required. - """ - - def __iter__(self): - return iter(self.keys()) - - def __has_key(self, key): - try: - self[key] - except KeyError: - return False - else: - return True - - if PY2: - has_key = __has_key - - __contains__ = __has_key - - if PY2: - iterkeys = lambda self: iter(self.keys()) - - def values(self): - return [self[k] for k in self.keys()] - - if PY2: - itervalues = lambda self: iter(self.values()) - - def items(self): - return list(izip(self.keys(), self.values())) - - if PY2: - iteritems = lambda s: iter(s.items()) - - def clear(self): - for key in list(self.keys()): - self.__delitem__(key) - - def pop(self, key, *args): - if len(args) > 1: - raise TypeError("pop takes at most two arguments") - try: - value = self[key] - except KeyError: - if args: - return args[0] - else: - raise - del(self[key]) - return value - - def popitem(self): - for key in self.keys(): - break - else: - raise KeyError("dictionary is empty") - return key, self.pop(key) - - def update(self, other=None, **kwargs): - if other is None: - self.update(kwargs) - other = {} - - try: - for key, value in other.items(): - self.__setitem__(key, value) - except AttributeError: - for key, value in other: - self[key] = value - - def setdefault(self, key, default=None): - try: - return self[key] - except KeyError: - self[key] = default - return default - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def __repr__(self): - return repr(dict(self.items())) - - def __eq__(self, other): - return dict(self.items()) == other - - def __lt__(self, other): - return dict(self.items()) < other - - __hash__ = object.__hash__ - - def __len__(self): - return len(self.keys()) - - -class DictProxy(DictMixin): - def __init__(self, *args, **kwargs): - self.__dict = {} - super(DictProxy, self).__init__(*args, **kwargs) - - def __getitem__(self, key): - return self.__dict[key] - - def __setitem__(self, key, value): - self.__dict[key] = value - - def __delitem__(self, key): - del(self.__dict[key]) - - def keys(self): - return self.__dict.keys() - - -def _fill_cdata(cls): - """Add struct pack/unpack functions""" - - funcs = {} - for key, name in [("b", "char"), ("h", "short"), - ("i", "int"), ("q", "longlong")]: - for echar, esuffix in [("<", "le"), (">", "be")]: - esuffix = "_" + esuffix - for unsigned in [True, False]: - s = struct.Struct(echar + (key.upper() if unsigned else key)) - get_wrapper = lambda f: lambda *a, **k: f(*a, **k)[0] - unpack = get_wrapper(s.unpack) - unpack_from = get_wrapper(s.unpack_from) - - def get_unpack_from(s): - def unpack_from(data, offset=0): - return s.unpack_from(data, offset)[0], offset + s.size - return unpack_from - - unpack_from = get_unpack_from(s) - pack = s.pack - - prefix = "u" if unsigned else "" - if s.size == 1: - esuffix = "" - bits = str(s.size * 8) - funcs["%s%s%s" % (prefix, name, esuffix)] = unpack - funcs["%sint%s%s" % (prefix, bits, esuffix)] = unpack - funcs["%s%s%s_from" % (prefix, name, esuffix)] = unpack_from - funcs["%sint%s%s_from" % (prefix, bits, esuffix)] = unpack_from - funcs["to_%s%s%s" % (prefix, name, esuffix)] = pack - funcs["to_%sint%s%s" % (prefix, bits, esuffix)] = pack - - for key, func in iteritems(funcs): - setattr(cls, key, staticmethod(func)) - - -class cdata(object): - """C character buffer to Python numeric type conversions. - - For each size/sign/endianness: - uint32_le(data)/to_uint32_le(num)/uint32_le_from(data, offset=0) - """ - - from struct import error - error = error - - bitswap = b''.join( - chr_(sum(((val >> i) & 1) << (7 - i) for i in xrange(8))) - for val in xrange(256)) - - test_bit = staticmethod(lambda value, n: bool((value >> n) & 1)) - - -_fill_cdata(cdata) - - -def get_size(fileobj): - """Returns the size of the file object. The position when passed in will - be preserved if no error occurs. - - In case of an error raises IOError. - """ - - old_pos = fileobj.tell() - try: - fileobj.seek(0, 2) - return fileobj.tell() - finally: - fileobj.seek(old_pos, 0) - - -def insert_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16): - """Insert size bytes of empty space starting at offset. - - fobj must be an open file object, open rb+ or - equivalent. Mutagen tries to use mmap to resize the file, but - falls back to a significantly slower method if mmap fails. - """ - - assert 0 < size - assert 0 <= offset - - fobj.seek(0, 2) - filesize = fobj.tell() - movesize = filesize - offset - fobj.write(b'\x00' * size) - fobj.flush() - - try: - import mmap - file_map = mmap.mmap(fobj.fileno(), filesize + size) - try: - file_map.move(offset + size, offset, movesize) - finally: - file_map.close() - except (ValueError, EnvironmentError, ImportError, AttributeError): - # handle broken mmap scenarios, BytesIO() - fobj.truncate(filesize) - - fobj.seek(0, 2) - padsize = size - # Don't generate an enormous string if we need to pad - # the file out several megs. - while padsize: - addsize = min(BUFFER_SIZE, padsize) - fobj.write(b"\x00" * addsize) - padsize -= addsize - - fobj.seek(filesize, 0) - while movesize: - # At the start of this loop, fobj is pointing at the end - # of the data we need to move, which is of movesize length. - thismove = min(BUFFER_SIZE, movesize) - # Seek back however much we're going to read this frame. - fobj.seek(-thismove, 1) - nextpos = fobj.tell() - # Read it, so we're back at the end. - data = fobj.read(thismove) - # Seek back to where we need to write it. - fobj.seek(-thismove + size, 1) - # Write it. - fobj.write(data) - # And seek back to the end of the unmoved data. - fobj.seek(nextpos) - movesize -= thismove - - fobj.flush() - - -def delete_bytes(fobj, size, offset, BUFFER_SIZE=2 ** 16): - """Delete size bytes of empty space starting at offset. - - fobj must be an open file object, open rb+ or - equivalent. Mutagen tries to use mmap to resize the file, but - falls back to a significantly slower method if mmap fails. - """ - - assert 0 < size - assert 0 <= offset - - fobj.seek(0, 2) - filesize = fobj.tell() - movesize = filesize - offset - size - assert 0 <= movesize - - if movesize > 0: - fobj.flush() - try: - import mmap - file_map = mmap.mmap(fobj.fileno(), filesize) - try: - file_map.move(offset, offset + size, movesize) - finally: - file_map.close() - except (ValueError, EnvironmentError, ImportError, AttributeError): - # handle broken mmap scenarios, BytesIO() - fobj.seek(offset + size) - buf = fobj.read(BUFFER_SIZE) - while buf: - fobj.seek(offset) - fobj.write(buf) - offset += len(buf) - fobj.seek(offset + size) - buf = fobj.read(BUFFER_SIZE) - fobj.truncate(filesize - size) - fobj.flush() - - -def resize_bytes(fobj, old_size, new_size, offset): - """Resize an area in a file adding and deleting at the end of it. - Does nothing if no resizing is needed. - """ - - if new_size < old_size: - delete_size = old_size - new_size - delete_at = offset + new_size - delete_bytes(fobj, delete_size, delete_at) - elif new_size > old_size: - insert_size = new_size - old_size - insert_at = offset + old_size - insert_bytes(fobj, insert_size, insert_at) - - -def dict_match(d, key, default=None): - """Like __getitem__ but works as if the keys() are all filename patterns. - Returns the value of any dict key that matches the passed key. - """ - - if key in d and "[" not in key: - return d[key] - else: - for pattern, value in iteritems(d): - if fnmatchcase(key, pattern): - return value - return default - - -def decode_terminated(data, encoding, strict=True): - """Returns the decoded data until the first NULL terminator - and all data after it. - - In case the data can't be decoded raises UnicodeError. - In case the encoding is not found raises LookupError. - In case the data isn't null terminated (even if it is encoded correctly) - raises ValueError except if strict is False, then the decoded string - will be returned anyway. - """ - - codec_info = codecs.lookup(encoding) - - # normalize encoding name so we can compare by name - encoding = codec_info.name - - # fast path - if encoding in ("utf-8", "iso8859-1"): - index = data.find(b"\x00") - if index == -1: - # make sure we raise UnicodeError first, like in the slow path - res = data.decode(encoding), b"" - if strict: - raise ValueError("not null terminated") - else: - return res - return data[:index].decode(encoding), data[index + 1:] - - # slow path - decoder = codec_info.incrementaldecoder() - r = [] - for i, b in enumerate(iterbytes(data)): - c = decoder.decode(b) - if c == u"\x00": - return u"".join(r), data[i + 1:] - r.append(c) - else: - # make sure the decoder is finished - r.append(decoder.decode(b"", True)) - if strict: - raise ValueError("not null terminated") - return u"".join(r), b"" - - -class BitReaderError(Exception): - pass - - -class BitReader(object): - - def __init__(self, fileobj): - self._fileobj = fileobj - self._buffer = 0 - self._bits = 0 - self._pos = fileobj.tell() - - def bits(self, count): - """Reads `count` bits and returns an uint, MSB read first. - - May raise BitReaderError if not enough data could be read or - IOError by the underlying file object. - """ - - if count < 0: - raise ValueError - - if count > self._bits: - n_bytes = (count - self._bits + 7) // 8 - data = self._fileobj.read(n_bytes) - if len(data) != n_bytes: - raise BitReaderError("not enough data") - for b in bytearray(data): - self._buffer = (self._buffer << 8) | b - self._bits += n_bytes * 8 - - self._bits -= count - value = self._buffer >> self._bits - self._buffer &= (1 << self._bits) - 1 - assert self._bits < 8 - return value - - def bytes(self, count): - """Returns a bytearray of length `count`. Works unaligned.""" - - if count < 0: - raise ValueError - - # fast path - if self._bits == 0: - data = self._fileobj.read(count) - if len(data) != count: - raise BitReaderError("not enough data") - return data - - return bytes(bytearray(self.bits(8) for _ in xrange(count))) - - def skip(self, count): - """Skip `count` bits. - - Might raise BitReaderError if there wasn't enough data to skip, - but might also fail on the next bits() instead. - """ - - if count < 0: - raise ValueError - - if count <= self._bits: - self.bits(count) - else: - count -= self.align() - n_bytes = count // 8 - self._fileobj.seek(n_bytes, 1) - count -= n_bytes * 8 - self.bits(count) - - def get_position(self): - """Returns the amount of bits read or skipped so far""" - - return (self._fileobj.tell() - self._pos) * 8 - self._bits - - def align(self): - """Align to the next byte, returns the amount of bits skipped""" - - bits = self._bits - self._buffer = 0 - self._bits = 0 - return bits - - def is_aligned(self): - """If we are currently aligned to bytes and nothing is buffered""" - - return self._bits == 0 diff --git a/resources/lib/libraries/mutagen/_vorbis.py b/resources/lib/libraries/mutagen/_vorbis.py deleted file mode 100644 index da202400..00000000 --- a/resources/lib/libraries/mutagen/_vorbis.py +++ /dev/null @@ -1,330 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005-2006 Joe Wreschnig -# 2013 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""Read and write Vorbis comment data. - -Vorbis comments are freeform key/value pairs; keys are -case-insensitive ASCII and values are Unicode strings. A key may have -multiple values. - -The specification is at http://www.xiph.org/vorbis/doc/v-comment.html. -""" - -import sys - -import mutagen -from ._compat import reraise, BytesIO, text_type, xrange, PY3, PY2 -from mutagen._util import DictMixin, cdata - - -def is_valid_key(key): - """Return true if a string is a valid Vorbis comment key. - - Valid Vorbis comment keys are printable ASCII between 0x20 (space) - and 0x7D ('}'), excluding '='. - - Takes str/unicode in Python 2, unicode in Python 3 - """ - - if PY3 and isinstance(key, bytes): - raise TypeError("needs to be str not bytes") - - for c in key: - if c < " " or c > "}" or c == "=": - return False - else: - return bool(key) - - -istag = is_valid_key - - -class error(IOError): - pass - - -class VorbisUnsetFrameError(error): - pass - - -class VorbisEncodingError(error): - pass - - -class VComment(mutagen.Metadata, list): - """A Vorbis comment parser, accessor, and renderer. - - All comment ordering is preserved. A VComment is a list of - key/value pairs, and so any Python list method can be used on it. - - Vorbis comments are always wrapped in something like an Ogg Vorbis - bitstream or a FLAC metadata block, so this loads string data or a - file-like object, not a filename. - - Attributes: - - * vendor -- the stream 'vendor' (i.e. writer); default 'Mutagen' - """ - - vendor = u"Mutagen " + mutagen.version_string - - def __init__(self, data=None, *args, **kwargs): - self._size = 0 - # Collect the args to pass to load, this lets child classes - # override just load and get equivalent magic for the - # constructor. - if data is not None: - if isinstance(data, bytes): - data = BytesIO(data) - elif not hasattr(data, 'read'): - raise TypeError("VComment requires bytes or a file-like") - start = data.tell() - self.load(data, *args, **kwargs) - self._size = data.tell() - start - - def load(self, fileobj, errors='replace', framing=True): - """Parse a Vorbis comment from a file-like object. - - Keyword arguments: - - * errors: - 'strict', 'replace', or 'ignore'. This affects Unicode decoding - and how other malformed content is interpreted. - * framing -- if true, fail if a framing bit is not present - - Framing bits are required by the Vorbis comment specification, - but are not used in FLAC Vorbis comment blocks. - """ - - try: - vendor_length = cdata.uint_le(fileobj.read(4)) - self.vendor = fileobj.read(vendor_length).decode('utf-8', errors) - count = cdata.uint_le(fileobj.read(4)) - for i in xrange(count): - length = cdata.uint_le(fileobj.read(4)) - try: - string = fileobj.read(length).decode('utf-8', errors) - except (OverflowError, MemoryError): - raise error("cannot read %d bytes, too large" % length) - try: - tag, value = string.split('=', 1) - except ValueError as err: - if errors == "ignore": - continue - elif errors == "replace": - tag, value = u"unknown%d" % i, string - else: - reraise(VorbisEncodingError, err, sys.exc_info()[2]) - try: - tag = tag.encode('ascii', errors) - except UnicodeEncodeError: - raise VorbisEncodingError("invalid tag name %r" % tag) - else: - # string keys in py3k - if PY3: - tag = tag.decode("ascii") - if is_valid_key(tag): - self.append((tag, value)) - - if framing and not bytearray(fileobj.read(1))[0] & 0x01: - raise VorbisUnsetFrameError("framing bit was unset") - except (cdata.error, TypeError): - raise error("file is not a valid Vorbis comment") - - def validate(self): - """Validate keys and values. - - Check to make sure every key used is a valid Vorbis key, and - that every value used is a valid Unicode or UTF-8 string. If - any invalid keys or values are found, a ValueError is raised. - - In Python 3 all keys and values have to be a string. - """ - - if not isinstance(self.vendor, text_type): - if PY3: - raise ValueError("vendor needs to be str") - - try: - self.vendor.decode('utf-8') - except UnicodeDecodeError: - raise ValueError - - for key, value in self: - try: - if not is_valid_key(key): - raise ValueError - except TypeError: - raise ValueError("%r is not a valid key" % key) - - if not isinstance(value, text_type): - if PY3: - raise ValueError("%r needs to be str" % key) - - try: - value.decode("utf-8") - except: - raise ValueError("%r is not a valid value" % value) - - return True - - def clear(self): - """Clear all keys from the comment.""" - - for i in list(self): - self.remove(i) - - def write(self, framing=True): - """Return a string representation of the data. - - Validation is always performed, so calling this function on - invalid data may raise a ValueError. - - Keyword arguments: - - * framing -- if true, append a framing bit (see load) - """ - - self.validate() - - def _encode(value): - if not isinstance(value, bytes): - return value.encode('utf-8') - return value - - f = BytesIO() - vendor = _encode(self.vendor) - f.write(cdata.to_uint_le(len(vendor))) - f.write(vendor) - f.write(cdata.to_uint_le(len(self))) - for tag, value in self: - tag = _encode(tag) - value = _encode(value) - comment = tag + b"=" + value - f.write(cdata.to_uint_le(len(comment))) - f.write(comment) - if framing: - f.write(b"\x01") - return f.getvalue() - - def pprint(self): - - def _decode(value): - if not isinstance(value, text_type): - return value.decode('utf-8', 'replace') - return value - - tags = [u"%s=%s" % (_decode(k), _decode(v)) for k, v in self] - return u"\n".join(tags) - - -class VCommentDict(VComment, DictMixin): - """A VComment that looks like a dictionary. - - This object differs from a dictionary in two ways. First, - len(comment) will still return the number of values, not the - number of keys. Secondly, iterating through the object will - iterate over (key, value) pairs, not keys. Since a key may have - multiple values, the same value may appear multiple times while - iterating. - - Since Vorbis comment keys are case-insensitive, all keys are - normalized to lowercase ASCII. - """ - - def __getitem__(self, key): - """A list of values for the key. - - This is a copy, so comment['title'].append('a title') will not - work. - """ - - # PY3 only - if isinstance(key, slice): - return VComment.__getitem__(self, key) - - if not is_valid_key(key): - raise ValueError - - key = key.lower() - - values = [value for (k, value) in self if k.lower() == key] - if not values: - raise KeyError(key) - else: - return values - - def __delitem__(self, key): - """Delete all values associated with the key.""" - - # PY3 only - if isinstance(key, slice): - return VComment.__delitem__(self, key) - - if not is_valid_key(key): - raise ValueError - - key = key.lower() - to_delete = [x for x in self if x[0].lower() == key] - if not to_delete: - raise KeyError(key) - else: - for item in to_delete: - self.remove(item) - - def __contains__(self, key): - """Return true if the key has any values.""" - - if not is_valid_key(key): - raise ValueError - - key = key.lower() - for k, value in self: - if k.lower() == key: - return True - else: - return False - - def __setitem__(self, key, values): - """Set a key's value or values. - - Setting a value overwrites all old ones. The value may be a - list of Unicode or UTF-8 strings, or a single Unicode or UTF-8 - string. - """ - - # PY3 only - if isinstance(key, slice): - return VComment.__setitem__(self, key, values) - - if not is_valid_key(key): - raise ValueError - - if not isinstance(values, list): - values = [values] - try: - del(self[key]) - except KeyError: - pass - - if PY2: - key = key.encode('ascii') - - for value in values: - self.append((key, value)) - - def keys(self): - """Return all keys in the comment.""" - - return list(set([k.lower() for k, v in self])) - - def as_dict(self): - """Return a copy of the comment data in a real dict.""" - - return dict([(key, self[key]) for key in self.keys()]) diff --git a/resources/lib/libraries/mutagen/aac.py b/resources/lib/libraries/mutagen/aac.py deleted file mode 100644 index 83968a05..00000000 --- a/resources/lib/libraries/mutagen/aac.py +++ /dev/null @@ -1,410 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2014 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -""" -* ADTS - Audio Data Transport Stream -* ADIF - Audio Data Interchange Format -* See ISO/IEC 13818-7 / 14496-03 -""" - -from mutagen import StreamInfo -from mutagen._file import FileType -from mutagen._util import BitReader, BitReaderError, MutagenError -from mutagen._compat import endswith, xrange - - -_FREQS = [ - 96000, 88200, 64000, 48000, - 44100, 32000, 24000, 22050, - 16000, 12000, 11025, 8000, - 7350, -] - - -class _ADTSStream(object): - """Represents a series of frames belonging to the same stream""" - - parsed_frames = 0 - """Number of successfully parsed frames""" - - offset = 0 - """offset in bytes at which the stream starts (the first sync word)""" - - @classmethod - def find_stream(cls, fileobj, max_bytes): - """Returns a possibly valid _ADTSStream or None. - - Args: - max_bytes (int): maximum bytes to read - """ - - r = BitReader(fileobj) - stream = cls(r) - if stream.sync(max_bytes): - stream.offset = (r.get_position() - 12) // 8 - return stream - - def sync(self, max_bytes): - """Find the next sync. - Returns True if found.""" - - # at least 2 bytes for the sync - max_bytes = max(max_bytes, 2) - - r = self._r - r.align() - while max_bytes > 0: - try: - b = r.bytes(1) - if b == b"\xff": - if r.bits(4) == 0xf: - return True - r.align() - max_bytes -= 2 - else: - max_bytes -= 1 - except BitReaderError: - return False - return False - - def __init__(self, r): - """Use _ADTSStream.find_stream to create a stream""" - - self._fixed_header_key = None - self._r = r - self.offset = -1 - self.parsed_frames = 0 - - self._samples = 0 - self._payload = 0 - self._start = r.get_position() / 8 - self._last = self._start - - @property - def bitrate(self): - """Bitrate of the raw aac blocks, excluding framing/crc""" - - assert self.parsed_frames, "no frame parsed yet" - - if self._samples == 0: - return 0 - - return (8 * self._payload * self.frequency) // self._samples - - @property - def samples(self): - """samples so far""" - - assert self.parsed_frames, "no frame parsed yet" - - return self._samples - - @property - def size(self): - """bytes read in the stream so far (including framing)""" - - assert self.parsed_frames, "no frame parsed yet" - - return self._last - self._start - - @property - def channels(self): - """0 means unknown""" - - assert self.parsed_frames, "no frame parsed yet" - - b_index = self._fixed_header_key[6] - if b_index == 7: - return 8 - elif b_index > 7: - return 0 - else: - return b_index - - @property - def frequency(self): - """0 means unknown""" - - assert self.parsed_frames, "no frame parsed yet" - - f_index = self._fixed_header_key[4] - try: - return _FREQS[f_index] - except IndexError: - return 0 - - def parse_frame(self): - """True if parsing was successful. - Fails either because the frame wasn't valid or the stream ended. - """ - - try: - return self._parse_frame() - except BitReaderError: - return False - - def _parse_frame(self): - r = self._r - # start == position of sync word - start = r.get_position() - 12 - - # adts_fixed_header - id_ = r.bits(1) - layer = r.bits(2) - protection_absent = r.bits(1) - - profile = r.bits(2) - sampling_frequency_index = r.bits(4) - private_bit = r.bits(1) - # TODO: if 0 we could parse program_config_element() - channel_configuration = r.bits(3) - original_copy = r.bits(1) - home = r.bits(1) - - # the fixed header has to be the same for every frame in the stream - fixed_header_key = ( - id_, layer, protection_absent, profile, sampling_frequency_index, - private_bit, channel_configuration, original_copy, home, - ) - - if self._fixed_header_key is None: - self._fixed_header_key = fixed_header_key - else: - if self._fixed_header_key != fixed_header_key: - return False - - # adts_variable_header - r.skip(2) # copyright_identification_bit/start - frame_length = r.bits(13) - r.skip(11) # adts_buffer_fullness - nordbif = r.bits(2) - # adts_variable_header end - - crc_overhead = 0 - if not protection_absent: - crc_overhead += (nordbif + 1) * 16 - if nordbif != 0: - crc_overhead *= 2 - - left = (frame_length * 8) - (r.get_position() - start) - if left < 0: - return False - r.skip(left) - assert r.is_aligned() - - self._payload += (left - crc_overhead) / 8 - self._samples += (nordbif + 1) * 1024 - self._last = r.get_position() / 8 - - self.parsed_frames += 1 - return True - - -class ProgramConfigElement(object): - - element_instance_tag = None - object_type = None - sampling_frequency_index = None - channels = None - - def __init__(self, r): - """Reads the program_config_element() - - Raises BitReaderError - """ - - self.element_instance_tag = r.bits(4) - self.object_type = r.bits(2) - self.sampling_frequency_index = r.bits(4) - num_front_channel_elements = r.bits(4) - num_side_channel_elements = r.bits(4) - num_back_channel_elements = r.bits(4) - num_lfe_channel_elements = r.bits(2) - num_assoc_data_elements = r.bits(3) - num_valid_cc_elements = r.bits(4) - - mono_mixdown_present = r.bits(1) - if mono_mixdown_present == 1: - r.skip(4) - stereo_mixdown_present = r.bits(1) - if stereo_mixdown_present == 1: - r.skip(4) - matrix_mixdown_idx_present = r.bits(1) - if matrix_mixdown_idx_present == 1: - r.skip(3) - - elms = num_front_channel_elements + num_side_channel_elements + \ - num_back_channel_elements - channels = 0 - for i in xrange(elms): - channels += 1 - element_is_cpe = r.bits(1) - if element_is_cpe: - channels += 1 - r.skip(4) - channels += num_lfe_channel_elements - self.channels = channels - - r.skip(4 * num_lfe_channel_elements) - r.skip(4 * num_assoc_data_elements) - r.skip(5 * num_valid_cc_elements) - r.align() - comment_field_bytes = r.bits(8) - r.skip(8 * comment_field_bytes) - - -class AACError(MutagenError): - pass - - -class AACInfo(StreamInfo): - """AAC stream information. - - Attributes: - - * channels -- number of audio channels - * length -- file length in seconds, as a float - * sample_rate -- audio sampling rate in Hz - * bitrate -- audio bitrate, in bits per second - - The length of the stream is just a guess and might not be correct. - """ - - channels = 0 - length = 0 - sample_rate = 0 - bitrate = 0 - - def __init__(self, fileobj): - # skip id3v2 header - start_offset = 0 - header = fileobj.read(10) - from mutagen.id3 import BitPaddedInt - if header.startswith(b"ID3"): - size = BitPaddedInt(header[6:]) - start_offset = size + 10 - - fileobj.seek(start_offset) - adif = fileobj.read(4) - if adif == b"ADIF": - self._parse_adif(fileobj) - self._type = "ADIF" - else: - self._parse_adts(fileobj, start_offset) - self._type = "ADTS" - - def _parse_adif(self, fileobj): - r = BitReader(fileobj) - try: - copyright_id_present = r.bits(1) - if copyright_id_present: - r.skip(72) # copyright_id - r.skip(1 + 1) # original_copy, home - bitstream_type = r.bits(1) - self.bitrate = r.bits(23) - npce = r.bits(4) - if bitstream_type == 0: - r.skip(20) # adif_buffer_fullness - - pce = ProgramConfigElement(r) - try: - self.sample_rate = _FREQS[pce.sampling_frequency_index] - except IndexError: - pass - self.channels = pce.channels - - # other pces.. - for i in xrange(npce): - ProgramConfigElement(r) - r.align() - except BitReaderError as e: - raise AACError(e) - - # use bitrate + data size to guess length - start = fileobj.tell() - fileobj.seek(0, 2) - length = fileobj.tell() - start - if self.bitrate != 0: - self.length = (8.0 * length) / self.bitrate - - def _parse_adts(self, fileobj, start_offset): - max_initial_read = 512 - max_resync_read = 10 - max_sync_tries = 10 - - frames_max = 100 - frames_needed = 3 - - # Try up to X times to find a sync word and read up to Y frames. - # If more than Z frames are valid we assume a valid stream - offset = start_offset - for i in xrange(max_sync_tries): - fileobj.seek(offset) - s = _ADTSStream.find_stream(fileobj, max_initial_read) - if s is None: - raise AACError("sync not found") - # start right after the last found offset - offset += s.offset + 1 - - for i in xrange(frames_max): - if not s.parse_frame(): - break - if not s.sync(max_resync_read): - break - - if s.parsed_frames >= frames_needed: - break - else: - raise AACError( - "no valid stream found (only %d frames)" % s.parsed_frames) - - self.sample_rate = s.frequency - self.channels = s.channels - self.bitrate = s.bitrate - - # size from stream start to end of file - fileobj.seek(0, 2) - stream_size = fileobj.tell() - (offset + s.offset) - # approx - self.length = float(s.samples * stream_size) / (s.size * s.frequency) - - def pprint(self): - return u"AAC (%s), %d Hz, %.2f seconds, %d channel(s), %d bps" % ( - self._type, self.sample_rate, self.length, self.channels, - self.bitrate) - - -class AAC(FileType): - """Load ADTS or ADIF streams containing AAC. - - Tagging is not supported. - Use the ID3/APEv2 classes directly instead. - """ - - _mimes = ["audio/x-aac"] - - def load(self, filename): - self.filename = filename - with open(filename, "rb") as h: - self.info = AACInfo(h) - - def add_tags(self): - raise AACError("doesn't support tags") - - @staticmethod - def score(filename, fileobj, header): - filename = filename.lower() - s = endswith(filename, ".aac") or endswith(filename, ".adts") or \ - endswith(filename, ".adif") - s += b"ADIF" in header - return s - - -Open = AAC -error = AACError - -__all__ = ["AAC", "Open"] diff --git a/resources/lib/libraries/mutagen/aiff.py b/resources/lib/libraries/mutagen/aiff.py deleted file mode 100644 index dc580063..00000000 --- a/resources/lib/libraries/mutagen/aiff.py +++ /dev/null @@ -1,357 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2014 Evan Purkhiser -# 2014 Ben Ockmore -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""AIFF audio stream information and tags.""" - -import sys -import struct -from struct import pack - -from ._compat import endswith, text_type, reraise -from mutagen import StreamInfo, FileType - -from mutagen.id3 import ID3 -from mutagen.id3._util import ID3NoHeaderError, error as ID3Error -from mutagen._util import resize_bytes, delete_bytes, MutagenError - -__all__ = ["AIFF", "Open", "delete"] - - -class error(MutagenError, RuntimeError): - pass - - -class InvalidChunk(error, IOError): - pass - - -# based on stdlib's aifc -_HUGE_VAL = 1.79769313486231e+308 - - -def is_valid_chunk_id(id): - assert isinstance(id, text_type) - - return ((len(id) <= 4) and (min(id) >= u' ') and - (max(id) <= u'~')) - - -def read_float(data): # 10 bytes - expon, himant, lomant = struct.unpack('>hLL', data) - sign = 1 - if expon < 0: - sign = -1 - expon = expon + 0x8000 - if expon == himant == lomant == 0: - f = 0.0 - elif expon == 0x7FFF: - f = _HUGE_VAL - else: - expon = expon - 16383 - f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63) - return sign * f - - -class IFFChunk(object): - """Representation of a single IFF chunk""" - - # Chunk headers are 8 bytes long (4 for ID and 4 for the size) - HEADER_SIZE = 8 - - def __init__(self, fileobj, parent_chunk=None): - self.__fileobj = fileobj - self.parent_chunk = parent_chunk - self.offset = fileobj.tell() - - header = fileobj.read(self.HEADER_SIZE) - if len(header) < self.HEADER_SIZE: - raise InvalidChunk() - - self.id, self.data_size = struct.unpack('>4si', header) - - try: - self.id = self.id.decode('ascii') - except UnicodeDecodeError: - raise InvalidChunk() - - if not is_valid_chunk_id(self.id): - raise InvalidChunk() - - self.size = self.HEADER_SIZE + self.data_size - self.data_offset = fileobj.tell() - - def read(self): - """Read the chunks data""" - - self.__fileobj.seek(self.data_offset) - return self.__fileobj.read(self.data_size) - - def write(self, data): - """Write the chunk data""" - - if len(data) > self.data_size: - raise ValueError - - self.__fileobj.seek(self.data_offset) - self.__fileobj.write(data) - - def delete(self): - """Removes the chunk from the file""" - - delete_bytes(self.__fileobj, self.size, self.offset) - if self.parent_chunk is not None: - self.parent_chunk._update_size( - self.parent_chunk.data_size - self.size) - - def _update_size(self, data_size): - """Update the size of the chunk""" - - self.__fileobj.seek(self.offset + 4) - self.__fileobj.write(pack('>I', data_size)) - if self.parent_chunk is not None: - size_diff = self.data_size - data_size - self.parent_chunk._update_size( - self.parent_chunk.data_size - size_diff) - self.data_size = data_size - self.size = data_size + self.HEADER_SIZE - - def resize(self, new_data_size): - """Resize the file and update the chunk sizes""" - - resize_bytes( - self.__fileobj, self.data_size, new_data_size, self.data_offset) - self._update_size(new_data_size) - - -class IFFFile(object): - """Representation of a IFF file""" - - def __init__(self, fileobj): - self.__fileobj = fileobj - self.__chunks = {} - - # AIFF Files always start with the FORM chunk which contains a 4 byte - # ID before the start of other chunks - fileobj.seek(0) - self.__chunks[u'FORM'] = IFFChunk(fileobj) - - # Skip past the 4 byte FORM id - fileobj.seek(IFFChunk.HEADER_SIZE + 4) - - # Where the next chunk can be located. We need to keep track of this - # since the size indicated in the FORM header may not match up with the - # offset determined from the size of the last chunk in the file - self.__next_offset = fileobj.tell() - - # Load all of the chunks - while True: - try: - chunk = IFFChunk(fileobj, self[u'FORM']) - except InvalidChunk: - break - self.__chunks[chunk.id.strip()] = chunk - - # Calculate the location of the next chunk, - # considering the pad byte - self.__next_offset = chunk.offset + chunk.size - self.__next_offset += self.__next_offset % 2 - fileobj.seek(self.__next_offset) - - def __contains__(self, id_): - """Check if the IFF file contains a specific chunk""" - - assert isinstance(id_, text_type) - - if not is_valid_chunk_id(id_): - raise KeyError("AIFF key must be four ASCII characters.") - - return id_ in self.__chunks - - def __getitem__(self, id_): - """Get a chunk from the IFF file""" - - assert isinstance(id_, text_type) - - if not is_valid_chunk_id(id_): - raise KeyError("AIFF key must be four ASCII characters.") - - try: - return self.__chunks[id_] - except KeyError: - raise KeyError( - "%r has no %r chunk" % (self.__fileobj.name, id_)) - - def __delitem__(self, id_): - """Remove a chunk from the IFF file""" - - assert isinstance(id_, text_type) - - if not is_valid_chunk_id(id_): - raise KeyError("AIFF key must be four ASCII characters.") - - self.__chunks.pop(id_).delete() - - def insert_chunk(self, id_): - """Insert a new chunk at the end of the IFF file""" - - assert isinstance(id_, text_type) - - if not is_valid_chunk_id(id_): - raise KeyError("AIFF key must be four ASCII characters.") - - self.__fileobj.seek(self.__next_offset) - self.__fileobj.write(pack('>4si', id_.ljust(4).encode('ascii'), 0)) - self.__fileobj.seek(self.__next_offset) - chunk = IFFChunk(self.__fileobj, self[u'FORM']) - self[u'FORM']._update_size(self[u'FORM'].data_size + chunk.size) - - self.__chunks[id_] = chunk - self.__next_offset = chunk.offset + chunk.size - - -class AIFFInfo(StreamInfo): - """AIFF audio stream information. - - Information is parsed from the COMM chunk of the AIFF file - - Useful attributes: - - * length -- audio length, in seconds - * bitrate -- audio bitrate, in bits per second - * channels -- The number of audio channels - * sample_rate -- audio sample rate, in Hz - * sample_size -- The audio sample size - """ - - length = 0 - bitrate = 0 - channels = 0 - sample_rate = 0 - - def __init__(self, fileobj): - iff = IFFFile(fileobj) - try: - common_chunk = iff[u'COMM'] - except KeyError as e: - raise error(str(e)) - - data = common_chunk.read() - - info = struct.unpack('>hLh10s', data[:18]) - channels, frame_count, sample_size, sample_rate = info - - self.sample_rate = int(read_float(sample_rate)) - self.sample_size = sample_size - self.channels = channels - self.bitrate = channels * sample_size * self.sample_rate - self.length = frame_count / float(self.sample_rate) - - def pprint(self): - return u"%d channel AIFF @ %d bps, %s Hz, %.2f seconds" % ( - self.channels, self.bitrate, self.sample_rate, self.length) - - -class _IFFID3(ID3): - """A AIFF file with ID3v2 tags""" - - def _pre_load_header(self, fileobj): - try: - fileobj.seek(IFFFile(fileobj)[u'ID3'].data_offset) - except (InvalidChunk, KeyError): - raise ID3NoHeaderError("No ID3 chunk") - - def save(self, filename=None, v2_version=4, v23_sep='/', padding=None): - """Save ID3v2 data to the AIFF file""" - - if filename is None: - filename = self.filename - - # Unlike the parent ID3.save method, we won't save to a blank file - # since we would have to construct a empty AIFF file - with open(filename, 'rb+') as fileobj: - iff_file = IFFFile(fileobj) - - if u'ID3' not in iff_file: - iff_file.insert_chunk(u'ID3') - - chunk = iff_file[u'ID3'] - - try: - data = self._prepare_data( - fileobj, chunk.data_offset, chunk.data_size, v2_version, - v23_sep, padding) - except ID3Error as e: - reraise(error, e, sys.exc_info()[2]) - - new_size = len(data) - new_size += new_size % 2 # pad byte - assert new_size % 2 == 0 - chunk.resize(new_size) - data += (new_size - len(data)) * b'\x00' - assert new_size == len(data) - chunk.write(data) - - def delete(self, filename=None): - """Completely removes the ID3 chunk from the AIFF file""" - - if filename is None: - filename = self.filename - delete(filename) - self.clear() - - -def delete(filename): - """Completely removes the ID3 chunk from the AIFF file""" - - with open(filename, "rb+") as file_: - try: - del IFFFile(file_)[u'ID3'] - except KeyError: - pass - - -class AIFF(FileType): - """An AIFF audio file. - - :ivar info: :class:`AIFFInfo` - :ivar tags: :class:`ID3` - """ - - _mimes = ["audio/aiff", "audio/x-aiff"] - - @staticmethod - def score(filename, fileobj, header): - filename = filename.lower() - - return (header.startswith(b"FORM") * 2 + endswith(filename, b".aif") + - endswith(filename, b".aiff") + endswith(filename, b".aifc")) - - def add_tags(self): - """Add an empty ID3 tag to the file.""" - if self.tags is None: - self.tags = _IFFID3() - else: - raise error("an ID3 tag already exists") - - def load(self, filename, **kwargs): - """Load stream and tag information from a file.""" - self.filename = filename - - try: - self.tags = _IFFID3(filename, **kwargs) - except ID3NoHeaderError: - self.tags = None - except ID3Error as e: - raise error(e) - - with open(filename, "rb") as fileobj: - self.info = AIFFInfo(fileobj) - - -Open = AIFF diff --git a/resources/lib/libraries/mutagen/apev2.py b/resources/lib/libraries/mutagen/apev2.py deleted file mode 100644 index 3b79aba9..00000000 --- a/resources/lib/libraries/mutagen/apev2.py +++ /dev/null @@ -1,710 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""APEv2 reading and writing. - -The APEv2 format is most commonly used with Musepack files, but is -also the format of choice for WavPack and other formats. Some MP3s -also have APEv2 tags, but this can cause problems with many MP3 -decoders and taggers. - -APEv2 tags, like Vorbis comments, are freeform key=value pairs. APEv2 -keys can be any ASCII string with characters from 0x20 to 0x7E, -between 2 and 255 characters long. Keys are case-sensitive, but -readers are recommended to be case insensitive, and it is forbidden to -multiple keys which differ only in case. Keys are usually stored -title-cased (e.g. 'Artist' rather than 'artist'). - -APEv2 values are slightly more structured than Vorbis comments; values -are flagged as one of text, binary, or an external reference (usually -a URI). - -Based off the format specification found at -http://wiki.hydrogenaudio.org/index.php?title=APEv2_specification. -""" - -__all__ = ["APEv2", "APEv2File", "Open", "delete"] - -import sys -import struct -from collections import MutableSequence - -from ._compat import (cBytesIO, PY3, text_type, PY2, reraise, swap_to_string, - xrange) -from mutagen import Metadata, FileType, StreamInfo -from mutagen._util import (DictMixin, cdata, delete_bytes, total_ordering, - MutagenError) - - -def is_valid_apev2_key(key): - if not isinstance(key, text_type): - if PY3: - raise TypeError("APEv2 key must be str") - - try: - key = key.decode('ascii') - except UnicodeDecodeError: - return False - - # PY26 - Change to set literal syntax (since set is faster than list here) - return ((2 <= len(key) <= 255) and (min(key) >= u' ') and - (max(key) <= u'~') and - (key not in [u"OggS", u"TAG", u"ID3", u"MP+"])) - -# There are three different kinds of APE tag values. -# "0: Item contains text information coded in UTF-8 -# 1: Item contains binary information -# 2: Item is a locator of external stored information [e.g. URL] -# 3: reserved" -TEXT, BINARY, EXTERNAL = xrange(3) - -HAS_HEADER = 1 << 31 -HAS_NO_FOOTER = 1 << 30 -IS_HEADER = 1 << 29 - - -class error(IOError, MutagenError): - pass - - -class APENoHeaderError(error, ValueError): - pass - - -class APEUnsupportedVersionError(error, ValueError): - pass - - -class APEBadItemError(error, ValueError): - pass - - -class _APEv2Data(object): - # Store offsets of the important parts of the file. - start = header = data = footer = end = None - # Footer or header; seek here and read 32 to get version/size/items/flags - metadata = None - # Actual tag data - tag = None - - version = None - size = None - items = None - flags = 0 - - # The tag is at the start rather than the end. A tag at both - # the start and end of the file (i.e. the tag is the whole file) - # is not considered to be at the start. - is_at_start = False - - def __init__(self, fileobj): - self.__find_metadata(fileobj) - - if self.header is None: - self.metadata = self.footer - elif self.footer is None: - self.metadata = self.header - else: - self.metadata = max(self.header, self.footer) - - if self.metadata is None: - return - - self.__fill_missing(fileobj) - self.__fix_brokenness(fileobj) - if self.data is not None: - fileobj.seek(self.data) - self.tag = fileobj.read(self.size) - - def __find_metadata(self, fileobj): - # Try to find a header or footer. - - # Check for a simple footer. - try: - fileobj.seek(-32, 2) - except IOError: - fileobj.seek(0, 2) - return - if fileobj.read(8) == b"APETAGEX": - fileobj.seek(-8, 1) - self.footer = self.metadata = fileobj.tell() - return - - # Check for an APEv2 tag followed by an ID3v1 tag at the end. - try: - fileobj.seek(-128, 2) - if fileobj.read(3) == b"TAG": - - fileobj.seek(-35, 1) # "TAG" + header length - if fileobj.read(8) == b"APETAGEX": - fileobj.seek(-8, 1) - self.footer = fileobj.tell() - return - - # ID3v1 tag at the end, maybe preceded by Lyrics3v2. - # (http://www.id3.org/lyrics3200.html) - # (header length - "APETAGEX") - "LYRICS200" - fileobj.seek(15, 1) - if fileobj.read(9) == b'LYRICS200': - fileobj.seek(-15, 1) # "LYRICS200" + size tag - try: - offset = int(fileobj.read(6)) - except ValueError: - raise IOError - - fileobj.seek(-32 - offset - 6, 1) - if fileobj.read(8) == b"APETAGEX": - fileobj.seek(-8, 1) - self.footer = fileobj.tell() - return - - except IOError: - pass - - # Check for a tag at the start. - fileobj.seek(0, 0) - if fileobj.read(8) == b"APETAGEX": - self.is_at_start = True - self.header = 0 - - def __fill_missing(self, fileobj): - fileobj.seek(self.metadata + 8) - self.version = fileobj.read(4) - self.size = cdata.uint_le(fileobj.read(4)) - self.items = cdata.uint_le(fileobj.read(4)) - self.flags = cdata.uint_le(fileobj.read(4)) - - if self.header is not None: - self.data = self.header + 32 - # If we're reading the header, the size is the header - # offset + the size, which includes the footer. - self.end = self.data + self.size - fileobj.seek(self.end - 32, 0) - if fileobj.read(8) == b"APETAGEX": - self.footer = self.end - 32 - elif self.footer is not None: - self.end = self.footer + 32 - self.data = self.end - self.size - if self.flags & HAS_HEADER: - self.header = self.data - 32 - else: - self.header = self.data - else: - raise APENoHeaderError("No APE tag found") - - # exclude the footer from size - if self.footer is not None: - self.size -= 32 - - def __fix_brokenness(self, fileobj): - # Fix broken tags written with PyMusepack. - if self.header is not None: - start = self.header - else: - start = self.data - fileobj.seek(start) - - while start > 0: - # Clean up broken writing from pre-Mutagen PyMusepack. - # It didn't remove the first 24 bytes of header. - try: - fileobj.seek(-24, 1) - except IOError: - break - else: - if fileobj.read(8) == b"APETAGEX": - fileobj.seek(-8, 1) - start = fileobj.tell() - else: - break - self.start = start - - -class _CIDictProxy(DictMixin): - - def __init__(self, *args, **kwargs): - self.__casemap = {} - self.__dict = {} - super(_CIDictProxy, self).__init__(*args, **kwargs) - # Internally all names are stored as lowercase, but the case - # they were set with is remembered and used when saving. This - # is roughly in line with the standard, which says that keys - # are case-sensitive but two keys differing only in case are - # not allowed, and recommends case-insensitive - # implementations. - - def __getitem__(self, key): - return self.__dict[key.lower()] - - def __setitem__(self, key, value): - lower = key.lower() - self.__casemap[lower] = key - self.__dict[lower] = value - - def __delitem__(self, key): - lower = key.lower() - del(self.__casemap[lower]) - del(self.__dict[lower]) - - def keys(self): - return [self.__casemap.get(key, key) for key in self.__dict.keys()] - - -class APEv2(_CIDictProxy, Metadata): - """A file with an APEv2 tag. - - ID3v1 tags are silently ignored and overwritten. - """ - - filename = None - - def pprint(self): - """Return tag key=value pairs in a human-readable format.""" - - items = sorted(self.items()) - return u"\n".join(u"%s=%s" % (k, v.pprint()) for k, v in items) - - def load(self, filename): - """Load tags from a filename.""" - - self.filename = filename - with open(filename, "rb") as fileobj: - data = _APEv2Data(fileobj) - - if data.tag: - self.clear() - self.__parse_tag(data.tag, data.items) - else: - raise APENoHeaderError("No APE tag found") - - def __parse_tag(self, tag, count): - fileobj = cBytesIO(tag) - - for i in xrange(count): - size_data = fileobj.read(4) - # someone writes wrong item counts - if not size_data: - break - size = cdata.uint_le(size_data) - flags = cdata.uint_le(fileobj.read(4)) - - # Bits 1 and 2 bits are flags, 0-3 - # Bit 0 is read/write flag, ignored - kind = (flags & 6) >> 1 - if kind == 3: - raise APEBadItemError("value type must be 0, 1, or 2") - key = value = fileobj.read(1) - while key[-1:] != b'\x00' and value: - value = fileobj.read(1) - key += value - if key[-1:] == b"\x00": - key = key[:-1] - if PY3: - try: - key = key.decode("ascii") - except UnicodeError as err: - reraise(APEBadItemError, err, sys.exc_info()[2]) - value = fileobj.read(size) - - value = _get_value_type(kind)._new(value) - - self[key] = value - - def __getitem__(self, key): - if not is_valid_apev2_key(key): - raise KeyError("%r is not a valid APEv2 key" % key) - if PY2: - key = key.encode('ascii') - - return super(APEv2, self).__getitem__(key) - - def __delitem__(self, key): - if not is_valid_apev2_key(key): - raise KeyError("%r is not a valid APEv2 key" % key) - if PY2: - key = key.encode('ascii') - - super(APEv2, self).__delitem__(key) - - def __setitem__(self, key, value): - """'Magic' value setter. - - This function tries to guess at what kind of value you want to - store. If you pass in a valid UTF-8 or Unicode string, it - treats it as a text value. If you pass in a list, it treats it - as a list of string/Unicode values. If you pass in a string - that is not valid UTF-8, it assumes it is a binary value. - - Python 3: all bytes will be assumed to be a byte value, even - if they are valid utf-8. - - If you need to force a specific type of value (e.g. binary - data that also happens to be valid UTF-8, or an external - reference), use the APEValue factory and set the value to the - result of that:: - - from mutagen.apev2 import APEValue, EXTERNAL - tag['Website'] = APEValue('http://example.org', EXTERNAL) - """ - - if not is_valid_apev2_key(key): - raise KeyError("%r is not a valid APEv2 key" % key) - - if PY2: - key = key.encode('ascii') - - if not isinstance(value, _APEValue): - # let's guess at the content if we're not already a value... - if isinstance(value, text_type): - # unicode? we've got to be text. - value = APEValue(value, TEXT) - elif isinstance(value, list): - items = [] - for v in value: - if not isinstance(v, text_type): - if PY3: - raise TypeError("item in list not str") - v = v.decode("utf-8") - items.append(v) - - # list? text. - value = APEValue(u"\0".join(items), TEXT) - else: - if PY3: - value = APEValue(value, BINARY) - else: - try: - value.decode("utf-8") - except UnicodeError: - # invalid UTF8 text, probably binary - value = APEValue(value, BINARY) - else: - # valid UTF8, probably text - value = APEValue(value, TEXT) - - super(APEv2, self).__setitem__(key, value) - - def save(self, filename=None): - """Save changes to a file. - - If no filename is given, the one most recently loaded is used. - - Tags are always written at the end of the file, and include - a header and a footer. - """ - - filename = filename or self.filename - try: - fileobj = open(filename, "r+b") - except IOError: - fileobj = open(filename, "w+b") - data = _APEv2Data(fileobj) - - if data.is_at_start: - delete_bytes(fileobj, data.end - data.start, data.start) - elif data.start is not None: - fileobj.seek(data.start) - # Delete an ID3v1 tag if present, too. - fileobj.truncate() - fileobj.seek(0, 2) - - tags = [] - for key, value in self.items(): - # Packed format for an item: - # 4B: Value length - # 4B: Value type - # Key name - # 1B: Null - # Key value - value_data = value._write() - if not isinstance(key, bytes): - key = key.encode("utf-8") - tag_data = bytearray() - tag_data += struct.pack("<2I", len(value_data), value.kind << 1) - tag_data += key + b"\0" + value_data - tags.append(bytes(tag_data)) - - # "APE tags items should be sorted ascending by size... This is - # not a MUST, but STRONGLY recommended. Actually the items should - # be sorted by importance/byte, but this is not feasible." - tags.sort(key=len) - num_tags = len(tags) - tags = b"".join(tags) - - header = bytearray(b"APETAGEX") - # version, tag size, item count, flags - header += struct.pack("<4I", 2000, len(tags) + 32, num_tags, - HAS_HEADER | IS_HEADER) - header += b"\0" * 8 - fileobj.write(header) - - fileobj.write(tags) - - footer = bytearray(b"APETAGEX") - footer += struct.pack("<4I", 2000, len(tags) + 32, num_tags, - HAS_HEADER) - footer += b"\0" * 8 - - fileobj.write(footer) - fileobj.close() - - def delete(self, filename=None): - """Remove tags from a file.""" - - filename = filename or self.filename - with open(filename, "r+b") as fileobj: - data = _APEv2Data(fileobj) - if data.start is not None and data.size is not None: - delete_bytes(fileobj, data.end - data.start, data.start) - - self.clear() - - -Open = APEv2 - - -def delete(filename): - """Remove tags from a file.""" - - try: - APEv2(filename).delete() - except APENoHeaderError: - pass - - -def _get_value_type(kind): - """Returns a _APEValue subclass or raises ValueError""" - - if kind == TEXT: - return APETextValue - elif kind == BINARY: - return APEBinaryValue - elif kind == EXTERNAL: - return APEExtValue - raise ValueError("unknown kind %r" % kind) - - -def APEValue(value, kind): - """APEv2 tag value factory. - - Use this if you need to specify the value's type manually. Binary - and text data are automatically detected by APEv2.__setitem__. - """ - - try: - type_ = _get_value_type(kind) - except ValueError: - raise ValueError("kind must be TEXT, BINARY, or EXTERNAL") - else: - return type_(value) - - -class _APEValue(object): - - kind = None - value = None - - def __init__(self, value, kind=None): - # kind kwarg is for backwards compat - if kind is not None and kind != self.kind: - raise ValueError - self.value = self._validate(value) - - @classmethod - def _new(cls, data): - instance = cls.__new__(cls) - instance._parse(data) - return instance - - def _parse(self, data): - """Sets value or raises APEBadItemError""" - - raise NotImplementedError - - def _write(self): - """Returns bytes""" - - raise NotImplementedError - - def _validate(self, value): - """Returns validated value or raises TypeError/ValueErrr""" - - raise NotImplementedError - - def __repr__(self): - return "%s(%r, %d)" % (type(self).__name__, self.value, self.kind) - - -@swap_to_string -@total_ordering -class _APEUtf8Value(_APEValue): - - def _parse(self, data): - try: - self.value = data.decode("utf-8") - except UnicodeDecodeError as e: - reraise(APEBadItemError, e, sys.exc_info()[2]) - - def _validate(self, value): - if not isinstance(value, text_type): - if PY3: - raise TypeError("value not str") - else: - value = value.decode("utf-8") - return value - - def _write(self): - return self.value.encode("utf-8") - - def __len__(self): - return len(self.value) - - def __bytes__(self): - return self._write() - - def __eq__(self, other): - return self.value == other - - def __lt__(self, other): - return self.value < other - - def __str__(self): - return self.value - - -class APETextValue(_APEUtf8Value, MutableSequence): - """An APEv2 text value. - - Text values are Unicode/UTF-8 strings. They can be accessed like - strings (with a null separating the values), or arrays of strings. - """ - - kind = TEXT - - def __iter__(self): - """Iterate over the strings of the value (not the characters)""" - - return iter(self.value.split(u"\0")) - - def __getitem__(self, index): - return self.value.split(u"\0")[index] - - def __len__(self): - return self.value.count(u"\0") + 1 - - def __setitem__(self, index, value): - if not isinstance(value, text_type): - if PY3: - raise TypeError("value not str") - else: - value = value.decode("utf-8") - - values = list(self) - values[index] = value - self.value = u"\0".join(values) - - def insert(self, index, value): - if not isinstance(value, text_type): - if PY3: - raise TypeError("value not str") - else: - value = value.decode("utf-8") - - values = list(self) - values.insert(index, value) - self.value = u"\0".join(values) - - def __delitem__(self, index): - values = list(self) - del values[index] - self.value = u"\0".join(values) - - def pprint(self): - return u" / ".join(self) - - -@swap_to_string -@total_ordering -class APEBinaryValue(_APEValue): - """An APEv2 binary value.""" - - kind = BINARY - - def _parse(self, data): - self.value = data - - def _write(self): - return self.value - - def _validate(self, value): - if not isinstance(value, bytes): - raise TypeError("value not bytes") - return bytes(value) - - def __len__(self): - return len(self.value) - - def __bytes__(self): - return self._write() - - def __eq__(self, other): - return self.value == other - - def __lt__(self, other): - return self.value < other - - def pprint(self): - return u"[%d bytes]" % len(self) - - -class APEExtValue(_APEUtf8Value): - """An APEv2 external value. - - External values are usually URI or IRI strings. - """ - - kind = EXTERNAL - - def pprint(self): - return u"[External] %s" % self.value - - -class APEv2File(FileType): - class _Info(StreamInfo): - length = 0 - bitrate = 0 - - def __init__(self, fileobj): - pass - - @staticmethod - def pprint(): - return u"Unknown format with APEv2 tag." - - def load(self, filename): - self.filename = filename - self.info = self._Info(open(filename, "rb")) - try: - self.tags = APEv2(filename) - except APENoHeaderError: - self.tags = None - - def add_tags(self): - if self.tags is None: - self.tags = APEv2() - else: - raise error("%r already has tags: %r" % (self, self.tags)) - - @staticmethod - def score(filename, fileobj, header): - try: - fileobj.seek(-160, 2) - except IOError: - fileobj.seek(0) - footer = fileobj.read() - return ((b"APETAGEX" in footer) - header.startswith(b"ID3")) diff --git a/resources/lib/libraries/mutagen/asf/__init__.py b/resources/lib/libraries/mutagen/asf/__init__.py deleted file mode 100644 index e667192d..00000000 --- a/resources/lib/libraries/mutagen/asf/__init__.py +++ /dev/null @@ -1,319 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2005-2006 Joe Wreschnig -# Copyright (C) 2006-2007 Lukas Lalinsky -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write ASF (Window Media Audio) files.""" - -__all__ = ["ASF", "Open"] - -from mutagen import FileType, Metadata, StreamInfo -from mutagen._util import resize_bytes, DictMixin -from mutagen._compat import string_types, long_, PY3, izip - -from ._util import error, ASFError, ASFHeaderError -from ._objects import HeaderObject, MetadataLibraryObject, MetadataObject, \ - ExtendedContentDescriptionObject, HeaderExtensionObject, \ - ContentDescriptionObject -from ._attrs import ASFGUIDAttribute, ASFWordAttribute, ASFQWordAttribute, \ - ASFDWordAttribute, ASFBoolAttribute, ASFByteArrayAttribute, \ - ASFUnicodeAttribute, ASFBaseAttribute, ASFValue - - -# pyflakes -error, ASFError, ASFHeaderError, ASFValue - - -class ASFInfo(StreamInfo): - """ASF stream information.""" - - length = 0.0 - """Length in seconds (`float`)""" - - sample_rate = 0 - """Sample rate in Hz (`int`)""" - - bitrate = 0 - """Bitrate in bps (`int`)""" - - channels = 0 - """Number of channels (`int`)""" - - codec_type = u"" - """Name of the codec type of the first audio stream or - an empty string if unknown. Example: ``Windows Media Audio 9 Standard`` - (:class:`mutagen.text`) - """ - - codec_name = u"" - """Name and maybe version of the codec used. Example: - ``Windows Media Audio 9.1`` (:class:`mutagen.text`) - """ - - codec_description = u"" - """Further information on the codec used. - Example: ``64 kbps, 48 kHz, stereo 2-pass CBR`` (:class:`mutagen.text`) - """ - - def __init__(self): - self.length = 0.0 - self.sample_rate = 0 - self.bitrate = 0 - self.channels = 0 - self.codec_type = u"" - self.codec_name = u"" - self.codec_description = u"" - - def pprint(self): - """Returns a stream information text summary - - :rtype: text - """ - - s = u"ASF (%s) %d bps, %s Hz, %d channels, %.2f seconds" % ( - self.codec_type or self.codec_name or u"???", self.bitrate, - self.sample_rate, self.channels, self.length) - return s - - -class ASFTags(list, DictMixin, Metadata): - """Dictionary containing ASF attributes.""" - - def __getitem__(self, key): - """A list of values for the key. - - This is a copy, so comment['title'].append('a title') will not - work. - - """ - - # PY3 only - if isinstance(key, slice): - return list.__getitem__(self, key) - - values = [value for (k, value) in self if k == key] - if not values: - raise KeyError(key) - else: - return values - - def __delitem__(self, key): - """Delete all values associated with the key.""" - - # PY3 only - if isinstance(key, slice): - return list.__delitem__(self, key) - - to_delete = [x for x in self if x[0] == key] - if not to_delete: - raise KeyError(key) - else: - for k in to_delete: - self.remove(k) - - def __contains__(self, key): - """Return true if the key has any values.""" - for k, value in self: - if k == key: - return True - else: - return False - - def __setitem__(self, key, values): - """Set a key's value or values. - - Setting a value overwrites all old ones. The value may be a - list of Unicode or UTF-8 strings, or a single Unicode or UTF-8 - string. - """ - - # PY3 only - if isinstance(key, slice): - return list.__setitem__(self, key, values) - - if not isinstance(values, list): - values = [values] - - to_append = [] - for value in values: - if not isinstance(value, ASFBaseAttribute): - if isinstance(value, string_types): - value = ASFUnicodeAttribute(value) - elif PY3 and isinstance(value, bytes): - value = ASFByteArrayAttribute(value) - elif isinstance(value, bool): - value = ASFBoolAttribute(value) - elif isinstance(value, int): - value = ASFDWordAttribute(value) - elif isinstance(value, long_): - value = ASFQWordAttribute(value) - else: - raise TypeError("Invalid type %r" % type(value)) - to_append.append((key, value)) - - try: - del(self[key]) - except KeyError: - pass - - self.extend(to_append) - - def keys(self): - """Return a sequence of all keys in the comment.""" - - return self and set(next(izip(*self))) - - def as_dict(self): - """Return a copy of the comment data in a real dict.""" - - d = {} - for key, value in self: - d.setdefault(key, []).append(value) - return d - - def pprint(self): - """Returns a string containing all key, value pairs. - - :rtype: text - """ - - return "\n".join("%s=%s" % (k, v) for k, v in self) - - -UNICODE = ASFUnicodeAttribute.TYPE -"""Unicode string type""" - -BYTEARRAY = ASFByteArrayAttribute.TYPE -"""Byte array type""" - -BOOL = ASFBoolAttribute.TYPE -"""Bool type""" - -DWORD = ASFDWordAttribute.TYPE -""""DWord type (uint32)""" - -QWORD = ASFQWordAttribute.TYPE -"""QWord type (uint64)""" - -WORD = ASFWordAttribute.TYPE -"""Word type (uint16)""" - -GUID = ASFGUIDAttribute.TYPE -"""GUID type""" - - -class ASF(FileType): - """An ASF file, probably containing WMA or WMV. - - :param filename: a filename to load - :raises mutagen.asf.error: In case loading fails - """ - - _mimes = ["audio/x-ms-wma", "audio/x-ms-wmv", "video/x-ms-asf", - "audio/x-wma", "video/x-wmv"] - - info = None - """A `ASFInfo` instance""" - - tags = None - """A `ASFTags` instance""" - - def load(self, filename): - self.filename = filename - self.info = ASFInfo() - self.tags = ASFTags() - - with open(filename, "rb") as fileobj: - self._tags = {} - - self._header = HeaderObject.parse_full(self, fileobj) - - for guid in [ContentDescriptionObject.GUID, - ExtendedContentDescriptionObject.GUID, MetadataObject.GUID, - MetadataLibraryObject.GUID]: - self.tags.extend(self._tags.pop(guid, [])) - - assert not self._tags - - def save(self, filename=None, padding=None): - """Save tag changes back to the loaded file. - - :param padding: A callback which returns the amount of padding to use. - See :class:`mutagen.PaddingInfo` - - :raises mutagen.asf.error: In case saving fails - """ - - if filename is not None and filename != self.filename: - raise ValueError("saving to another file not supported atm") - - # Move attributes to the right objects - self.to_content_description = {} - self.to_extended_content_description = {} - self.to_metadata = {} - self.to_metadata_library = [] - for name, value in self.tags: - library_only = (value.data_size() > 0xFFFF or value.TYPE == GUID) - can_cont_desc = value.TYPE == UNICODE - - if library_only or value.language is not None: - self.to_metadata_library.append((name, value)) - elif value.stream is not None: - if name not in self.to_metadata: - self.to_metadata[name] = value - else: - self.to_metadata_library.append((name, value)) - elif name in ContentDescriptionObject.NAMES: - if name not in self.to_content_description and can_cont_desc: - self.to_content_description[name] = value - else: - self.to_metadata_library.append((name, value)) - else: - if name not in self.to_extended_content_description: - self.to_extended_content_description[name] = value - else: - self.to_metadata_library.append((name, value)) - - # Add missing objects - header = self._header - if header.get_child(ContentDescriptionObject.GUID) is None: - header.objects.append(ContentDescriptionObject()) - if header.get_child(ExtendedContentDescriptionObject.GUID) is None: - header.objects.append(ExtendedContentDescriptionObject()) - header_ext = header.get_child(HeaderExtensionObject.GUID) - if header_ext is None: - header_ext = HeaderExtensionObject() - header.objects.append(header_ext) - if header_ext.get_child(MetadataObject.GUID) is None: - header_ext.objects.append(MetadataObject()) - if header_ext.get_child(MetadataLibraryObject.GUID) is None: - header_ext.objects.append(MetadataLibraryObject()) - - # Render to file - with open(self.filename, "rb+") as fileobj: - old_size = header.parse_size(fileobj)[0] - data = header.render_full(self, fileobj, old_size, padding) - size = len(data) - resize_bytes(fileobj, old_size, size, 0) - fileobj.seek(0) - fileobj.write(data) - - def add_tags(self): - raise ASFError - - def delete(self, filename=None): - - if filename is not None and filename != self.filename: - raise ValueError("saving to another file not supported atm") - - self.tags.clear() - self.save(padding=lambda x: 0) - - @staticmethod - def score(filename, fileobj, header): - return header.startswith(HeaderObject.GUID) * 2 - -Open = ASF diff --git a/resources/lib/libraries/mutagen/asf/__pycache__/__init__.cpython-35.pyc b/resources/lib/libraries/mutagen/asf/__pycache__/__init__.cpython-35.pyc deleted file mode 100644 index 277e2c5f..00000000 Binary files a/resources/lib/libraries/mutagen/asf/__pycache__/__init__.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/asf/__pycache__/_attrs.cpython-35.pyc b/resources/lib/libraries/mutagen/asf/__pycache__/_attrs.cpython-35.pyc deleted file mode 100644 index aa916edd..00000000 Binary files a/resources/lib/libraries/mutagen/asf/__pycache__/_attrs.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/asf/__pycache__/_objects.cpython-35.pyc b/resources/lib/libraries/mutagen/asf/__pycache__/_objects.cpython-35.pyc deleted file mode 100644 index cb0810d8..00000000 Binary files a/resources/lib/libraries/mutagen/asf/__pycache__/_objects.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/asf/__pycache__/_util.cpython-35.pyc b/resources/lib/libraries/mutagen/asf/__pycache__/_util.cpython-35.pyc deleted file mode 100644 index 661bff50..00000000 Binary files a/resources/lib/libraries/mutagen/asf/__pycache__/_util.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/asf/_attrs.py b/resources/lib/libraries/mutagen/asf/_attrs.py deleted file mode 100644 index 4621c9fa..00000000 --- a/resources/lib/libraries/mutagen/asf/_attrs.py +++ /dev/null @@ -1,438 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2005-2006 Joe Wreschnig -# Copyright (C) 2006-2007 Lukas Lalinsky -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -import sys -import struct - -from mutagen._compat import swap_to_string, text_type, PY2, reraise -from mutagen._util import total_ordering - -from ._util import ASFError - - -class ASFBaseAttribute(object): - """Generic attribute.""" - - TYPE = None - - _TYPES = {} - - value = None - """The Python value of this attribute (type depends on the class)""" - - language = None - """Language""" - - stream = None - """Stream""" - - def __init__(self, value=None, data=None, language=None, - stream=None, **kwargs): - self.language = language - self.stream = stream - if data: - self.value = self.parse(data, **kwargs) - else: - if value is None: - # we used to support not passing any args and instead assign - # them later, keep that working.. - self.value = None - else: - self.value = self._validate(value) - - @classmethod - def _register(cls, other): - cls._TYPES[other.TYPE] = other - return other - - @classmethod - def _get_type(cls, type_): - """Raises KeyError""" - - return cls._TYPES[type_] - - def _validate(self, value): - """Raises TypeError or ValueError in case the user supplied value - isn't valid. - """ - - return value - - def data_size(self): - raise NotImplementedError - - def __repr__(self): - name = "%s(%r" % (type(self).__name__, self.value) - if self.language: - name += ", language=%d" % self.language - if self.stream: - name += ", stream=%d" % self.stream - name += ")" - return name - - def render(self, name): - name = name.encode("utf-16-le") + b"\x00\x00" - data = self._render() - return (struct.pack("<H", len(name)) + name + - struct.pack("<HH", self.TYPE, len(data)) + data) - - def render_m(self, name): - name = name.encode("utf-16-le") + b"\x00\x00" - if self.TYPE == 2: - data = self._render(dword=False) - else: - data = self._render() - return (struct.pack("<HHHHI", 0, self.stream or 0, len(name), - self.TYPE, len(data)) + name + data) - - def render_ml(self, name): - name = name.encode("utf-16-le") + b"\x00\x00" - if self.TYPE == 2: - data = self._render(dword=False) - else: - data = self._render() - - return (struct.pack("<HHHHI", self.language or 0, self.stream or 0, - len(name), self.TYPE, len(data)) + name + data) - - -@ASFBaseAttribute._register -@swap_to_string -@total_ordering -class ASFUnicodeAttribute(ASFBaseAttribute): - """Unicode string attribute. - - :: - - ASFUnicodeAttribute(u'some text') - """ - - TYPE = 0x0000 - - def parse(self, data): - try: - return data.decode("utf-16-le").strip("\x00") - except UnicodeDecodeError as e: - reraise(ASFError, e, sys.exc_info()[2]) - - def _validate(self, value): - if not isinstance(value, text_type): - if PY2: - return value.decode("utf-8") - else: - raise TypeError("%r not str" % value) - return value - - def _render(self): - return self.value.encode("utf-16-le") + b"\x00\x00" - - def data_size(self): - return len(self._render()) - - def __bytes__(self): - return self.value.encode("utf-16-le") - - def __str__(self): - return self.value - - def __eq__(self, other): - return text_type(self) == other - - def __lt__(self, other): - return text_type(self) < other - - __hash__ = ASFBaseAttribute.__hash__ - - -@ASFBaseAttribute._register -@swap_to_string -@total_ordering -class ASFByteArrayAttribute(ASFBaseAttribute): - """Byte array attribute. - - :: - - ASFByteArrayAttribute(b'1234') - """ - TYPE = 0x0001 - - def parse(self, data): - assert isinstance(data, bytes) - return data - - def _render(self): - assert isinstance(self.value, bytes) - return self.value - - def _validate(self, value): - if not isinstance(value, bytes): - raise TypeError("must be bytes/str: %r" % value) - return value - - def data_size(self): - return len(self.value) - - def __bytes__(self): - return self.value - - def __str__(self): - return "[binary data (%d bytes)]" % len(self.value) - - def __eq__(self, other): - return self.value == other - - def __lt__(self, other): - return self.value < other - - __hash__ = ASFBaseAttribute.__hash__ - - -@ASFBaseAttribute._register -@swap_to_string -@total_ordering -class ASFBoolAttribute(ASFBaseAttribute): - """Bool attribute. - - :: - - ASFBoolAttribute(True) - """ - - TYPE = 0x0002 - - def parse(self, data, dword=True): - if dword: - return struct.unpack("<I", data)[0] == 1 - else: - return struct.unpack("<H", data)[0] == 1 - - def _render(self, dword=True): - if dword: - return struct.pack("<I", bool(self.value)) - else: - return struct.pack("<H", bool(self.value)) - - def _validate(self, value): - return bool(value) - - def data_size(self): - return 4 - - def __bool__(self): - return bool(self.value) - - def __bytes__(self): - return text_type(self.value).encode('utf-8') - - def __str__(self): - return text_type(self.value) - - def __eq__(self, other): - return bool(self.value) == other - - def __lt__(self, other): - return bool(self.value) < other - - __hash__ = ASFBaseAttribute.__hash__ - - -@ASFBaseAttribute._register -@swap_to_string -@total_ordering -class ASFDWordAttribute(ASFBaseAttribute): - """DWORD attribute. - - :: - - ASFDWordAttribute(42) - """ - - TYPE = 0x0003 - - def parse(self, data): - return struct.unpack("<L", data)[0] - - def _render(self): - return struct.pack("<L", self.value) - - def _validate(self, value): - value = int(value) - if not 0 <= value <= 2 ** 32 - 1: - raise ValueError("Out of range") - return value - - def data_size(self): - return 4 - - def __int__(self): - return self.value - - def __bytes__(self): - return text_type(self.value).encode('utf-8') - - def __str__(self): - return text_type(self.value) - - def __eq__(self, other): - return int(self.value) == other - - def __lt__(self, other): - return int(self.value) < other - - __hash__ = ASFBaseAttribute.__hash__ - - -@ASFBaseAttribute._register -@swap_to_string -@total_ordering -class ASFQWordAttribute(ASFBaseAttribute): - """QWORD attribute. - - :: - - ASFQWordAttribute(42) - """ - - TYPE = 0x0004 - - def parse(self, data): - return struct.unpack("<Q", data)[0] - - def _render(self): - return struct.pack("<Q", self.value) - - def _validate(self, value): - value = int(value) - if not 0 <= value <= 2 ** 64 - 1: - raise ValueError("Out of range") - return value - - def data_size(self): - return 8 - - def __int__(self): - return self.value - - def __bytes__(self): - return text_type(self.value).encode('utf-8') - - def __str__(self): - return text_type(self.value) - - def __eq__(self, other): - return int(self.value) == other - - def __lt__(self, other): - return int(self.value) < other - - __hash__ = ASFBaseAttribute.__hash__ - - -@ASFBaseAttribute._register -@swap_to_string -@total_ordering -class ASFWordAttribute(ASFBaseAttribute): - """WORD attribute. - - :: - - ASFWordAttribute(42) - """ - - TYPE = 0x0005 - - def parse(self, data): - return struct.unpack("<H", data)[0] - - def _render(self): - return struct.pack("<H", self.value) - - def _validate(self, value): - value = int(value) - if not 0 <= value <= 2 ** 16 - 1: - raise ValueError("Out of range") - return value - - def data_size(self): - return 2 - - def __int__(self): - return self.value - - def __bytes__(self): - return text_type(self.value).encode('utf-8') - - def __str__(self): - return text_type(self.value) - - def __eq__(self, other): - return int(self.value) == other - - def __lt__(self, other): - return int(self.value) < other - - __hash__ = ASFBaseAttribute.__hash__ - - -@ASFBaseAttribute._register -@swap_to_string -@total_ordering -class ASFGUIDAttribute(ASFBaseAttribute): - """GUID attribute.""" - - TYPE = 0x0006 - - def parse(self, data): - assert isinstance(data, bytes) - return data - - def _render(self): - assert isinstance(self.value, bytes) - return self.value - - def _validate(self, value): - if not isinstance(value, bytes): - raise TypeError("must be bytes/str: %r" % value) - return value - - def data_size(self): - return len(self.value) - - def __bytes__(self): - return self.value - - def __str__(self): - return repr(self.value) - - def __eq__(self, other): - return self.value == other - - def __lt__(self, other): - return self.value < other - - __hash__ = ASFBaseAttribute.__hash__ - - -def ASFValue(value, kind, **kwargs): - """Create a tag value of a specific kind. - - :: - - ASFValue(u"My Value", UNICODE) - - :rtype: ASFBaseAttribute - :raises TypeError: in case a wrong type was passed - :raises ValueError: in case the value can't be be represented as ASFValue. - """ - - try: - attr_type = ASFBaseAttribute._get_type(kind) - except KeyError: - raise ValueError("Unknown value type %r" % kind) - else: - return attr_type(value=value, **kwargs) diff --git a/resources/lib/libraries/mutagen/asf/_objects.py b/resources/lib/libraries/mutagen/asf/_objects.py deleted file mode 100644 index ed942679..00000000 --- a/resources/lib/libraries/mutagen/asf/_objects.py +++ /dev/null @@ -1,437 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2005-2006 Joe Wreschnig -# Copyright (C) 2006-2007 Lukas Lalinsky -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -import struct - -from mutagen._util import cdata, get_size -from mutagen._compat import text_type, xrange, izip -from mutagen._tags import PaddingInfo - -from ._util import guid2bytes, bytes2guid, CODECS, ASFError, ASFHeaderError -from ._attrs import ASFBaseAttribute, ASFUnicodeAttribute - - -class BaseObject(object): - """Base ASF object.""" - - GUID = None - _TYPES = {} - - def __init__(self): - self.objects = [] - self.data = b"" - - def parse(self, asf, data): - self.data = data - - def render(self, asf): - data = self.GUID + struct.pack("<Q", len(self.data) + 24) + self.data - return data - - def get_child(self, guid): - for obj in self.objects: - if obj.GUID == guid: - return obj - return None - - @classmethod - def _register(cls, other): - cls._TYPES[other.GUID] = other - return other - - @classmethod - def _get_object(cls, guid): - if guid in cls._TYPES: - return cls._TYPES[guid]() - else: - return UnknownObject(guid) - - def __repr__(self): - return "<%s GUID=%s objects=%r>" % ( - type(self).__name__, bytes2guid(self.GUID), self.objects) - - def pprint(self): - l = [] - l.append("%s(%s)" % (type(self).__name__, bytes2guid(self.GUID))) - for o in self.objects: - for e in o.pprint().splitlines(): - l.append(" " + e) - return "\n".join(l) - - -class UnknownObject(BaseObject): - """Unknown ASF object.""" - - def __init__(self, guid): - super(UnknownObject, self).__init__() - assert isinstance(guid, bytes) - self.GUID = guid - - -@BaseObject._register -class HeaderObject(BaseObject): - """ASF header.""" - - GUID = guid2bytes("75B22630-668E-11CF-A6D9-00AA0062CE6C") - - @classmethod - def parse_full(cls, asf, fileobj): - """Raises ASFHeaderError""" - - header = cls() - - size, num_objects = cls.parse_size(fileobj) - for i in xrange(num_objects): - guid, size = struct.unpack("<16sQ", fileobj.read(24)) - obj = BaseObject._get_object(guid) - data = fileobj.read(size - 24) - obj.parse(asf, data) - header.objects.append(obj) - - return header - - @classmethod - def parse_size(cls, fileobj): - """Returns (size, num_objects) - - Raises ASFHeaderError - """ - - header = fileobj.read(30) - if len(header) != 30 or header[:16] != HeaderObject.GUID: - raise ASFHeaderError("Not an ASF file.") - - return struct.unpack("<QL", header[16:28]) - - def render_full(self, asf, fileobj, available, padding_func): - # Render everything except padding - num_objects = 0 - data = bytearray() - for obj in self.objects: - if obj.GUID == PaddingObject.GUID: - continue - data += obj.render(asf) - num_objects += 1 - - # calculate how much space we need at least - padding_obj = PaddingObject() - header_size = len(HeaderObject.GUID) + 14 - padding_overhead = len(padding_obj.render(asf)) - needed_size = len(data) + header_size + padding_overhead - - # ask the user for padding adjustments - file_size = get_size(fileobj) - content_size = file_size - available - assert content_size >= 0 - info = PaddingInfo(available - needed_size, content_size) - - # add padding - padding = info._get_padding(padding_func) - padding_obj.parse(asf, b"\x00" * padding) - data += padding_obj.render(asf) - num_objects += 1 - - data = (HeaderObject.GUID + - struct.pack("<QL", len(data) + 30, num_objects) + - b"\x01\x02" + data) - - return data - - def parse(self, asf, data): - raise NotImplementedError - - def render(self, asf): - raise NotImplementedError - - -@BaseObject._register -class ContentDescriptionObject(BaseObject): - """Content description.""" - - GUID = guid2bytes("75B22633-668E-11CF-A6D9-00AA0062CE6C") - - NAMES = [ - u"Title", - u"Author", - u"Copyright", - u"Description", - u"Rating", - ] - - def parse(self, asf, data): - super(ContentDescriptionObject, self).parse(asf, data) - lengths = struct.unpack("<HHHHH", data[:10]) - texts = [] - pos = 10 - for length in lengths: - end = pos + length - if length > 0: - texts.append(data[pos:end].decode("utf-16-le").strip(u"\x00")) - else: - texts.append(None) - pos = end - - for key, value in izip(self.NAMES, texts): - if value is not None: - value = ASFUnicodeAttribute(value=value) - asf._tags.setdefault(self.GUID, []).append((key, value)) - - def render(self, asf): - def render_text(name): - value = asf.to_content_description.get(name) - if value is not None: - return text_type(value).encode("utf-16-le") + b"\x00\x00" - else: - return b"" - - texts = [render_text(x) for x in self.NAMES] - data = struct.pack("<HHHHH", *map(len, texts)) + b"".join(texts) - return self.GUID + struct.pack("<Q", 24 + len(data)) + data - - -@BaseObject._register -class ExtendedContentDescriptionObject(BaseObject): - """Extended content description.""" - - GUID = guid2bytes("D2D0A440-E307-11D2-97F0-00A0C95EA850") - - def parse(self, asf, data): - super(ExtendedContentDescriptionObject, self).parse(asf, data) - num_attributes, = struct.unpack("<H", data[0:2]) - pos = 2 - for i in xrange(num_attributes): - name_length, = struct.unpack("<H", data[pos:pos + 2]) - pos += 2 - name = data[pos:pos + name_length] - name = name.decode("utf-16-le").strip("\x00") - pos += name_length - value_type, value_length = struct.unpack("<HH", data[pos:pos + 4]) - pos += 4 - value = data[pos:pos + value_length] - pos += value_length - attr = ASFBaseAttribute._get_type(value_type)(data=value) - asf._tags.setdefault(self.GUID, []).append((name, attr)) - - def render(self, asf): - attrs = asf.to_extended_content_description.items() - data = b"".join(attr.render(name) for (name, attr) in attrs) - data = struct.pack("<QH", 26 + len(data), len(attrs)) + data - return self.GUID + data - - -@BaseObject._register -class FilePropertiesObject(BaseObject): - """File properties.""" - - GUID = guid2bytes("8CABDCA1-A947-11CF-8EE4-00C00C205365") - - def parse(self, asf, data): - super(FilePropertiesObject, self).parse(asf, data) - length, _, preroll = struct.unpack("<QQQ", data[40:64]) - # there are files where preroll is larger than length, limit to >= 0 - asf.info.length = max((length / 10000000.0) - (preroll / 1000.0), 0.0) - - -@BaseObject._register -class StreamPropertiesObject(BaseObject): - """Stream properties.""" - - GUID = guid2bytes("B7DC0791-A9B7-11CF-8EE6-00C00C205365") - - def parse(self, asf, data): - super(StreamPropertiesObject, self).parse(asf, data) - channels, sample_rate, bitrate = struct.unpack("<HII", data[56:66]) - asf.info.channels = channels - asf.info.sample_rate = sample_rate - asf.info.bitrate = bitrate * 8 - - -@BaseObject._register -class CodecListObject(BaseObject): - """Codec List""" - - GUID = guid2bytes("86D15240-311D-11D0-A3A4-00A0C90348F6") - - def _parse_entry(self, data, offset): - """can raise cdata.error""" - - type_, offset = cdata.uint16_le_from(data, offset) - - units, offset = cdata.uint16_le_from(data, offset) - # utf-16 code units, not characters.. - next_offset = offset + units * 2 - try: - name = data[offset:next_offset].decode("utf-16-le").strip("\x00") - except UnicodeDecodeError: - name = u"" - offset = next_offset - - units, offset = cdata.uint16_le_from(data, offset) - next_offset = offset + units * 2 - try: - desc = data[offset:next_offset].decode("utf-16-le").strip("\x00") - except UnicodeDecodeError: - desc = u"" - offset = next_offset - - bytes_, offset = cdata.uint16_le_from(data, offset) - next_offset = offset + bytes_ - codec = u"" - if bytes_ == 2: - codec_id = cdata.uint16_le_from(data, offset)[0] - if codec_id in CODECS: - codec = CODECS[codec_id] - offset = next_offset - - return offset, type_, name, desc, codec - - def parse(self, asf, data): - super(CodecListObject, self).parse(asf, data) - - offset = 16 - count, offset = cdata.uint32_le_from(data, offset) - for i in xrange(count): - try: - offset, type_, name, desc, codec = \ - self._parse_entry(data, offset) - except cdata.error: - raise ASFError("invalid codec entry") - - # go with the first audio entry - if type_ == 2: - name = name.strip() - desc = desc.strip() - asf.info.codec_type = codec - asf.info.codec_name = name - asf.info.codec_description = desc - return - - -@BaseObject._register -class PaddingObject(BaseObject): - """Padding object""" - - GUID = guid2bytes("1806D474-CADF-4509-A4BA-9AABCB96AAE8") - - -@BaseObject._register -class StreamBitratePropertiesObject(BaseObject): - """Stream bitrate properties""" - - GUID = guid2bytes("7BF875CE-468D-11D1-8D82-006097C9A2B2") - - -@BaseObject._register -class ContentEncryptionObject(BaseObject): - """Content encryption""" - - GUID = guid2bytes("2211B3FB-BD23-11D2-B4B7-00A0C955FC6E") - - -@BaseObject._register -class ExtendedContentEncryptionObject(BaseObject): - """Extended content encryption""" - - GUID = guid2bytes("298AE614-2622-4C17-B935-DAE07EE9289C") - - -@BaseObject._register -class HeaderExtensionObject(BaseObject): - """Header extension.""" - - GUID = guid2bytes("5FBF03B5-A92E-11CF-8EE3-00C00C205365") - - def parse(self, asf, data): - super(HeaderExtensionObject, self).parse(asf, data) - datasize, = struct.unpack("<I", data[18:22]) - datapos = 0 - while datapos < datasize: - guid, size = struct.unpack( - "<16sQ", data[22 + datapos:22 + datapos + 24]) - obj = BaseObject._get_object(guid) - obj.parse(asf, data[22 + datapos + 24:22 + datapos + size]) - self.objects.append(obj) - datapos += size - - def render(self, asf): - data = bytearray() - for obj in self.objects: - # some files have the padding in the extension header, but we - # want to add it at the end of the top level header. Just - # skip padding at this level. - if obj.GUID == PaddingObject.GUID: - continue - data += obj.render(asf) - return (self.GUID + struct.pack("<Q", 24 + 16 + 6 + len(data)) + - b"\x11\xD2\xD3\xAB\xBA\xA9\xcf\x11" + - b"\x8E\xE6\x00\xC0\x0C\x20\x53\x65" + - b"\x06\x00" + struct.pack("<I", len(data)) + data) - - -@BaseObject._register -class MetadataObject(BaseObject): - """Metadata description.""" - - GUID = guid2bytes("C5F8CBEA-5BAF-4877-8467-AA8C44FA4CCA") - - def parse(self, asf, data): - super(MetadataObject, self).parse(asf, data) - num_attributes, = struct.unpack("<H", data[0:2]) - pos = 2 - for i in xrange(num_attributes): - (reserved, stream, name_length, value_type, - value_length) = struct.unpack("<HHHHI", data[pos:pos + 12]) - pos += 12 - name = data[pos:pos + name_length] - name = name.decode("utf-16-le").strip("\x00") - pos += name_length - value = data[pos:pos + value_length] - pos += value_length - args = {'data': value, 'stream': stream} - if value_type == 2: - args['dword'] = False - attr = ASFBaseAttribute._get_type(value_type)(**args) - asf._tags.setdefault(self.GUID, []).append((name, attr)) - - def render(self, asf): - attrs = asf.to_metadata.items() - data = b"".join([attr.render_m(name) for (name, attr) in attrs]) - return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) + - data) - - -@BaseObject._register -class MetadataLibraryObject(BaseObject): - """Metadata library description.""" - - GUID = guid2bytes("44231C94-9498-49D1-A141-1D134E457054") - - def parse(self, asf, data): - super(MetadataLibraryObject, self).parse(asf, data) - num_attributes, = struct.unpack("<H", data[0:2]) - pos = 2 - for i in xrange(num_attributes): - (language, stream, name_length, value_type, - value_length) = struct.unpack("<HHHHI", data[pos:pos + 12]) - pos += 12 - name = data[pos:pos + name_length] - name = name.decode("utf-16-le").strip("\x00") - pos += name_length - value = data[pos:pos + value_length] - pos += value_length - args = {'data': value, 'language': language, 'stream': stream} - if value_type == 2: - args['dword'] = False - attr = ASFBaseAttribute._get_type(value_type)(**args) - asf._tags.setdefault(self.GUID, []).append((name, attr)) - - def render(self, asf): - attrs = asf.to_metadata_library - data = b"".join([attr.render_ml(name) for (name, attr) in attrs]) - return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) + - data) diff --git a/resources/lib/libraries/mutagen/asf/_util.py b/resources/lib/libraries/mutagen/asf/_util.py deleted file mode 100644 index 42154bff..00000000 --- a/resources/lib/libraries/mutagen/asf/_util.py +++ /dev/null @@ -1,315 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2005-2006 Joe Wreschnig -# Copyright (C) 2006-2007 Lukas Lalinsky -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -import struct - -from mutagen._util import MutagenError - - -class error(IOError, MutagenError): - """Error raised by :mod:`mutagen.asf`""" - - -class ASFError(error): - pass - - -class ASFHeaderError(error): - pass - - -def guid2bytes(s): - """Converts a GUID to the serialized bytes representation""" - - assert isinstance(s, str) - assert len(s) == 36 - - p = struct.pack - return b"".join([ - p("<IHH", int(s[:8], 16), int(s[9:13], 16), int(s[14:18], 16)), - p(">H", int(s[19:23], 16)), - p(">Q", int(s[24:], 16))[2:], - ]) - - -def bytes2guid(s): - """Converts a serialized GUID to a text GUID""" - - assert isinstance(s, bytes) - - u = struct.unpack - v = [] - v.extend(u("<IHH", s[:8])) - v.extend(u(">HQ", s[8:10] + b"\x00\x00" + s[10:])) - return "%08X-%04X-%04X-%04X-%012X" % tuple(v) - - -# Names from http://windows.microsoft.com/en-za/windows7/c00d10d1-[0-9A-F]{1,4} -CODECS = { - 0x0000: u"Unknown Wave Format", - 0x0001: u"Microsoft PCM Format", - 0x0002: u"Microsoft ADPCM Format", - 0x0003: u"IEEE Float", - 0x0004: u"Compaq Computer VSELP", - 0x0005: u"IBM CVSD", - 0x0006: u"Microsoft CCITT A-Law", - 0x0007: u"Microsoft CCITT u-Law", - 0x0008: u"Microsoft DTS", - 0x0009: u"Microsoft DRM", - 0x000A: u"Windows Media Audio 9 Voice", - 0x000B: u"Windows Media Audio 10 Voice", - 0x000C: u"OGG Vorbis", - 0x000D: u"FLAC", - 0x000E: u"MOT AMR", - 0x000F: u"Nice Systems IMBE", - 0x0010: u"OKI ADPCM", - 0x0011: u"Intel IMA ADPCM", - 0x0012: u"Videologic MediaSpace ADPCM", - 0x0013: u"Sierra Semiconductor ADPCM", - 0x0014: u"Antex Electronics G.723 ADPCM", - 0x0015: u"DSP Solutions DIGISTD", - 0x0016: u"DSP Solutions DIGIFIX", - 0x0017: u"Dialogic OKI ADPCM", - 0x0018: u"MediaVision ADPCM", - 0x0019: u"Hewlett-Packard CU codec", - 0x001A: u"Hewlett-Packard Dynamic Voice", - 0x0020: u"Yamaha ADPCM", - 0x0021: u"Speech Compression SONARC", - 0x0022: u"DSP Group True Speech", - 0x0023: u"Echo Speech EchoSC1", - 0x0024: u"Ahead Inc. Audiofile AF36", - 0x0025: u"Audio Processing Technology APTX", - 0x0026: u"Ahead Inc. AudioFile AF10", - 0x0027: u"Aculab Prosody 1612", - 0x0028: u"Merging Technologies S.A. LRC", - 0x0030: u"Dolby Labs AC2", - 0x0031: u"Microsoft GSM 6.10", - 0x0032: u"Microsoft MSNAudio", - 0x0033: u"Antex Electronics ADPCME", - 0x0034: u"Control Resources VQLPC", - 0x0035: u"DSP Solutions Digireal", - 0x0036: u"DSP Solutions DigiADPCM", - 0x0037: u"Control Resources CR10", - 0x0038: u"Natural MicroSystems VBXADPCM", - 0x0039: u"Crystal Semiconductor IMA ADPCM", - 0x003A: u"Echo Speech EchoSC3", - 0x003B: u"Rockwell ADPCM", - 0x003C: u"Rockwell DigiTalk", - 0x003D: u"Xebec Multimedia Solutions", - 0x0040: u"Antex Electronics G.721 ADPCM", - 0x0041: u"Antex Electronics G.728 CELP", - 0x0042: u"Intel G.723", - 0x0043: u"Intel G.723.1", - 0x0044: u"Intel G.729 Audio", - 0x0045: u"Sharp G.726 Audio", - 0x0050: u"Microsoft MPEG-1", - 0x0052: u"InSoft RT24", - 0x0053: u"InSoft PAC", - 0x0055: u"MP3 - MPEG Layer III", - 0x0059: u"Lucent G.723", - 0x0060: u"Cirrus Logic", - 0x0061: u"ESS Technology ESPCM", - 0x0062: u"Voxware File-Mode", - 0x0063: u"Canopus Atrac", - 0x0064: u"APICOM G.726 ADPCM", - 0x0065: u"APICOM G.722 ADPCM", - 0x0066: u"Microsoft DSAT", - 0x0067: u"Microsoft DSAT Display", - 0x0069: u"Voxware Byte Aligned", - 0x0070: u"Voxware AC8", - 0x0071: u"Voxware AC10", - 0x0072: u"Voxware AC16", - 0x0073: u"Voxware AC20", - 0x0074: u"Voxware RT24 MetaVoice", - 0x0075: u"Voxware RT29 MetaSound", - 0x0076: u"Voxware RT29HW", - 0x0077: u"Voxware VR12", - 0x0078: u"Voxware VR18", - 0x0079: u"Voxware TQ40", - 0x007A: u"Voxware SC3", - 0x007B: u"Voxware SC3", - 0x0080: u"Softsound", - 0x0081: u"Voxware TQ60", - 0x0082: u"Microsoft MSRT24", - 0x0083: u"AT&T Labs G.729A", - 0x0084: u"Motion Pixels MVI MV12", - 0x0085: u"DataFusion Systems G.726", - 0x0086: u"DataFusion Systems GSM610", - 0x0088: u"Iterated Systems ISIAudio", - 0x0089: u"Onlive", - 0x008A: u"Multitude FT SX20", - 0x008B: u"Infocom ITS ACM G.721", - 0x008C: u"Convedia G.729", - 0x008D: u"Congruency Audio", - 0x0091: u"Siemens Business Communications SBC24", - 0x0092: u"Sonic Foundry Dolby AC3 SPDIF", - 0x0093: u"MediaSonic G.723", - 0x0094: u"Aculab Prosody 8KBPS", - 0x0097: u"ZyXEL ADPCM", - 0x0098: u"Philips LPCBB", - 0x0099: u"Studer Professional Audio AG Packed", - 0x00A0: u"Malden Electronics PHONYTALK", - 0x00A1: u"Racal Recorder GSM", - 0x00A2: u"Racal Recorder G720.a", - 0x00A3: u"Racal Recorder G723.1", - 0x00A4: u"Racal Recorder Tetra ACELP", - 0x00B0: u"NEC AAC", - 0x00FF: u"CoreAAC Audio", - 0x0100: u"Rhetorex ADPCM", - 0x0101: u"BeCubed Software IRAT", - 0x0111: u"Vivo G.723", - 0x0112: u"Vivo Siren", - 0x0120: u"Philips CELP", - 0x0121: u"Philips Grundig", - 0x0123: u"Digital G.723", - 0x0125: u"Sanyo ADPCM", - 0x0130: u"Sipro Lab Telecom ACELP.net", - 0x0131: u"Sipro Lab Telecom ACELP.4800", - 0x0132: u"Sipro Lab Telecom ACELP.8V3", - 0x0133: u"Sipro Lab Telecom ACELP.G.729", - 0x0134: u"Sipro Lab Telecom ACELP.G.729A", - 0x0135: u"Sipro Lab Telecom ACELP.KELVIN", - 0x0136: u"VoiceAge AMR", - 0x0140: u"Dictaphone G.726 ADPCM", - 0x0141: u"Dictaphone CELP68", - 0x0142: u"Dictaphone CELP54", - 0x0150: u"Qualcomm PUREVOICE", - 0x0151: u"Qualcomm HALFRATE", - 0x0155: u"Ring Zero Systems TUBGSM", - 0x0160: u"Windows Media Audio Standard", - 0x0161: u"Windows Media Audio 9 Standard", - 0x0162: u"Windows Media Audio 9 Professional", - 0x0163: u"Windows Media Audio 9 Lossless", - 0x0164: u"Windows Media Audio Pro over SPDIF", - 0x0170: u"Unisys NAP ADPCM", - 0x0171: u"Unisys NAP ULAW", - 0x0172: u"Unisys NAP ALAW", - 0x0173: u"Unisys NAP 16K", - 0x0174: u"Sycom ACM SYC008", - 0x0175: u"Sycom ACM SYC701 G725", - 0x0176: u"Sycom ACM SYC701 CELP54", - 0x0177: u"Sycom ACM SYC701 CELP68", - 0x0178: u"Knowledge Adventure ADPCM", - 0x0180: u"Fraunhofer IIS MPEG-2 AAC", - 0x0190: u"Digital Theater Systems DTS", - 0x0200: u"Creative Labs ADPCM", - 0x0202: u"Creative Labs FastSpeech8", - 0x0203: u"Creative Labs FastSpeech10", - 0x0210: u"UHER informatic GmbH ADPCM", - 0x0215: u"Ulead DV Audio", - 0x0216: u"Ulead DV Audio", - 0x0220: u"Quarterdeck", - 0x0230: u"I-link Worldwide ILINK VC", - 0x0240: u"Aureal Semiconductor RAW SPORT", - 0x0249: u"Generic Passthru", - 0x0250: u"Interactive Products HSX", - 0x0251: u"Interactive Products RPELP", - 0x0260: u"Consistent Software CS2", - 0x0270: u"Sony SCX", - 0x0271: u"Sony SCY", - 0x0272: u"Sony ATRAC3", - 0x0273: u"Sony SPC", - 0x0280: u"Telum Audio", - 0x0281: u"Telum IA Audio", - 0x0285: u"Norcom Voice Systems ADPCM", - 0x0300: u"Fujitsu TOWNS SND", - 0x0350: u"Micronas SC4 Speech", - 0x0351: u"Micronas CELP833", - 0x0400: u"Brooktree BTV Digital", - 0x0401: u"Intel Music Coder", - 0x0402: u"Intel Audio", - 0x0450: u"QDesign Music", - 0x0500: u"On2 AVC0 Audio", - 0x0501: u"On2 AVC1 Audio", - 0x0680: u"AT&T Labs VME VMPCM", - 0x0681: u"AT&T Labs TPC", - 0x08AE: u"ClearJump Lightwave Lossless", - 0x1000: u"Olivetti GSM", - 0x1001: u"Olivetti ADPCM", - 0x1002: u"Olivetti CELP", - 0x1003: u"Olivetti SBC", - 0x1004: u"Olivetti OPR", - 0x1100: u"Lernout & Hauspie", - 0x1101: u"Lernout & Hauspie CELP", - 0x1102: u"Lernout & Hauspie SBC8", - 0x1103: u"Lernout & Hauspie SBC12", - 0x1104: u"Lernout & Hauspie SBC16", - 0x1400: u"Norris Communication", - 0x1401: u"ISIAudio", - 0x1500: u"AT&T Labs Soundspace Music Compression", - 0x1600: u"Microsoft MPEG ADTS AAC", - 0x1601: u"Microsoft MPEG RAW AAC", - 0x1608: u"Nokia MPEG ADTS AAC", - 0x1609: u"Nokia MPEG RAW AAC", - 0x181C: u"VoxWare MetaVoice RT24", - 0x1971: u"Sonic Foundry Lossless", - 0x1979: u"Innings Telecom ADPCM", - 0x1FC4: u"NTCSoft ALF2CD ACM", - 0x2000: u"Dolby AC3", - 0x2001: u"DTS", - 0x4143: u"Divio AAC", - 0x4201: u"Nokia Adaptive Multi-Rate", - 0x4243: u"Divio G.726", - 0x4261: u"ITU-T H.261", - 0x4263: u"ITU-T H.263", - 0x4264: u"ITU-T H.264", - 0x674F: u"Ogg Vorbis Mode 1", - 0x6750: u"Ogg Vorbis Mode 2", - 0x6751: u"Ogg Vorbis Mode 3", - 0x676F: u"Ogg Vorbis Mode 1+", - 0x6770: u"Ogg Vorbis Mode 2+", - 0x6771: u"Ogg Vorbis Mode 3+", - 0x7000: u"3COM NBX Audio", - 0x706D: u"FAAD AAC Audio", - 0x77A1: u"True Audio Lossless Audio", - 0x7A21: u"GSM-AMR CBR 3GPP Audio", - 0x7A22: u"GSM-AMR VBR 3GPP Audio", - 0xA100: u"Comverse Infosys G723.1", - 0xA101: u"Comverse Infosys AVQSBC", - 0xA102: u"Comverse Infosys SBC", - 0xA103: u"Symbol Technologies G729a", - 0xA104: u"VoiceAge AMR WB", - 0xA105: u"Ingenient Technologies G.726", - 0xA106: u"ISO/MPEG-4 Advanced Audio Coding (AAC)", - 0xA107: u"Encore Software Ltd's G.726", - 0xA108: u"ZOLL Medical Corporation ASAO", - 0xA109: u"Speex Voice", - 0xA10A: u"Vianix MASC Speech Compression", - 0xA10B: u"Windows Media 9 Spectrum Analyzer Output", - 0xA10C: u"Media Foundation Spectrum Analyzer Output", - 0xA10D: u"GSM 6.10 (Full-Rate) Speech", - 0xA10E: u"GSM 6.20 (Half-Rate) Speech", - 0xA10F: u"GSM 6.60 (Enchanced Full-Rate) Speech", - 0xA110: u"GSM 6.90 (Adaptive Multi-Rate) Speech", - 0xA111: u"GSM Adaptive Multi-Rate WideBand Speech", - 0xA112: u"Polycom G.722", - 0xA113: u"Polycom G.728", - 0xA114: u"Polycom G.729a", - 0xA115: u"Polycom Siren", - 0xA116: u"Global IP Sound ILBC", - 0xA117: u"Radio Time Time Shifted Radio", - 0xA118: u"Nice Systems ACA", - 0xA119: u"Nice Systems ADPCM", - 0xA11A: u"Vocord Group ITU-T G.721", - 0xA11B: u"Vocord Group ITU-T G.726", - 0xA11C: u"Vocord Group ITU-T G.722.1", - 0xA11D: u"Vocord Group ITU-T G.728", - 0xA11E: u"Vocord Group ITU-T G.729", - 0xA11F: u"Vocord Group ITU-T G.729a", - 0xA120: u"Vocord Group ITU-T G.723.1", - 0xA121: u"Vocord Group LBC", - 0xA122: u"Nice G.728", - 0xA123: u"France Telecom G.729 ACM Audio", - 0xA124: u"CODIAN Audio", - 0xCC12: u"Intel YUV12 Codec", - 0xCFCC: u"Digital Processing Systems Perception Motion JPEG", - 0xD261: u"DEC H.261", - 0xD263: u"DEC H.263", - 0xFFFE: u"Extensible Wave Format", - 0xFFFF: u"Unregistered", -} diff --git a/resources/lib/libraries/mutagen/easyid3.py b/resources/lib/libraries/mutagen/easyid3.py deleted file mode 100644 index f8dd2de0..00000000 --- a/resources/lib/libraries/mutagen/easyid3.py +++ /dev/null @@ -1,534 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""Easier access to ID3 tags. - -EasyID3 is a wrapper around mutagen.id3.ID3 to make ID3 tags appear -more like Vorbis or APEv2 tags. -""" - -import mutagen.id3 - -from ._compat import iteritems, text_type, PY2 -from mutagen import Metadata -from mutagen._util import DictMixin, dict_match -from mutagen.id3 import ID3, error, delete, ID3FileType - - -__all__ = ['EasyID3', 'Open', 'delete'] - - -class EasyID3KeyError(KeyError, ValueError, error): - """Raised when trying to get/set an invalid key. - - Subclasses both KeyError and ValueError for API compatibility, - catching KeyError is preferred. - """ - - -class EasyID3(DictMixin, Metadata): - """A file with an ID3 tag. - - Like Vorbis comments, EasyID3 keys are case-insensitive ASCII - strings. Only a subset of ID3 frames are supported by default. Use - EasyID3.RegisterKey and its wrappers to support more. - - You can also set the GetFallback, SetFallback, and DeleteFallback - to generic key getter/setter/deleter functions, which are called - if no specific handler is registered for a key. Additionally, - ListFallback can be used to supply an arbitrary list of extra - keys. These can be set on EasyID3 or on individual instances after - creation. - - To use an EasyID3 class with mutagen.mp3.MP3:: - - from mutagen.mp3 import EasyMP3 as MP3 - MP3(filename) - - Because many of the attributes are constructed on the fly, things - like the following will not work:: - - ezid3["performer"].append("Joe") - - Instead, you must do:: - - values = ezid3["performer"] - values.append("Joe") - ezid3["performer"] = values - - """ - - Set = {} - Get = {} - Delete = {} - List = {} - - # For compatibility. - valid_keys = Get - - GetFallback = None - SetFallback = None - DeleteFallback = None - ListFallback = None - - @classmethod - def RegisterKey(cls, key, - getter=None, setter=None, deleter=None, lister=None): - """Register a new key mapping. - - A key mapping is four functions, a getter, setter, deleter, - and lister. The key may be either a string or a glob pattern. - - The getter, deleted, and lister receive an ID3 instance and - the requested key name. The setter also receives the desired - value, which will be a list of strings. - - The getter, setter, and deleter are used to implement __getitem__, - __setitem__, and __delitem__. - - The lister is used to implement keys(). It should return a - list of keys that are actually in the ID3 instance, provided - by its associated getter. - """ - key = key.lower() - if getter is not None: - cls.Get[key] = getter - if setter is not None: - cls.Set[key] = setter - if deleter is not None: - cls.Delete[key] = deleter - if lister is not None: - cls.List[key] = lister - - @classmethod - def RegisterTextKey(cls, key, frameid): - """Register a text key. - - If the key you need to register is a simple one-to-one mapping - of ID3 frame name to EasyID3 key, then you can use this - function:: - - EasyID3.RegisterTextKey("title", "TIT2") - """ - def getter(id3, key): - return list(id3[frameid]) - - def setter(id3, key, value): - try: - frame = id3[frameid] - except KeyError: - id3.add(mutagen.id3.Frames[frameid](encoding=3, text=value)) - else: - frame.encoding = 3 - frame.text = value - - def deleter(id3, key): - del(id3[frameid]) - - cls.RegisterKey(key, getter, setter, deleter) - - @classmethod - def RegisterTXXXKey(cls, key, desc): - """Register a user-defined text frame key. - - Some ID3 tags are stored in TXXX frames, which allow a - freeform 'description' which acts as a subkey, - e.g. TXXX:BARCODE.:: - - EasyID3.RegisterTXXXKey('barcode', 'BARCODE'). - """ - frameid = "TXXX:" + desc - - def getter(id3, key): - return list(id3[frameid]) - - def setter(id3, key, value): - try: - frame = id3[frameid] - except KeyError: - enc = 0 - # Store 8859-1 if we can, per MusicBrainz spec. - for v in value: - if v and max(v) > u'\x7f': - enc = 3 - break - - id3.add(mutagen.id3.TXXX(encoding=enc, text=value, desc=desc)) - else: - frame.text = value - - def deleter(id3, key): - del(id3[frameid]) - - cls.RegisterKey(key, getter, setter, deleter) - - def __init__(self, filename=None): - self.__id3 = ID3() - if filename is not None: - self.load(filename) - - load = property(lambda s: s.__id3.load, - lambda s, v: setattr(s.__id3, 'load', v)) - - def save(self, *args, **kwargs): - # ignore v2_version until we support 2.3 here - kwargs.pop("v2_version", None) - self.__id3.save(*args, **kwargs) - - delete = property(lambda s: s.__id3.delete, - lambda s, v: setattr(s.__id3, 'delete', v)) - - filename = property(lambda s: s.__id3.filename, - lambda s, fn: setattr(s.__id3, 'filename', fn)) - - size = property(lambda s: s.__id3.size, - lambda s, fn: setattr(s.__id3, 'size', s)) - - def __getitem__(self, key): - key = key.lower() - func = dict_match(self.Get, key, self.GetFallback) - if func is not None: - return func(self.__id3, key) - else: - raise EasyID3KeyError("%r is not a valid key" % key) - - def __setitem__(self, key, value): - key = key.lower() - if PY2: - if isinstance(value, basestring): - value = [value] - else: - if isinstance(value, text_type): - value = [value] - func = dict_match(self.Set, key, self.SetFallback) - if func is not None: - return func(self.__id3, key, value) - else: - raise EasyID3KeyError("%r is not a valid key" % key) - - def __delitem__(self, key): - key = key.lower() - func = dict_match(self.Delete, key, self.DeleteFallback) - if func is not None: - return func(self.__id3, key) - else: - raise EasyID3KeyError("%r is not a valid key" % key) - - def keys(self): - keys = [] - for key in self.Get.keys(): - if key in self.List: - keys.extend(self.List[key](self.__id3, key)) - elif key in self: - keys.append(key) - if self.ListFallback is not None: - keys.extend(self.ListFallback(self.__id3, "")) - return keys - - def pprint(self): - """Print tag key=value pairs.""" - strings = [] - for key in sorted(self.keys()): - values = self[key] - for value in values: - strings.append("%s=%s" % (key, value)) - return "\n".join(strings) - - -Open = EasyID3 - - -def genre_get(id3, key): - return id3["TCON"].genres - - -def genre_set(id3, key, value): - try: - frame = id3["TCON"] - except KeyError: - id3.add(mutagen.id3.TCON(encoding=3, text=value)) - else: - frame.encoding = 3 - frame.genres = value - - -def genre_delete(id3, key): - del(id3["TCON"]) - - -def date_get(id3, key): - return [stamp.text for stamp in id3["TDRC"].text] - - -def date_set(id3, key, value): - id3.add(mutagen.id3.TDRC(encoding=3, text=value)) - - -def date_delete(id3, key): - del(id3["TDRC"]) - - -def original_date_get(id3, key): - return [stamp.text for stamp in id3["TDOR"].text] - - -def original_date_set(id3, key, value): - id3.add(mutagen.id3.TDOR(encoding=3, text=value)) - - -def original_date_delete(id3, key): - del(id3["TDOR"]) - - -def performer_get(id3, key): - people = [] - wanted_role = key.split(":", 1)[1] - try: - mcl = id3["TMCL"] - except KeyError: - raise KeyError(key) - for role, person in mcl.people: - if role == wanted_role: - people.append(person) - if people: - return people - else: - raise KeyError(key) - - -def performer_set(id3, key, value): - wanted_role = key.split(":", 1)[1] - try: - mcl = id3["TMCL"] - except KeyError: - mcl = mutagen.id3.TMCL(encoding=3, people=[]) - id3.add(mcl) - mcl.encoding = 3 - people = [p for p in mcl.people if p[0] != wanted_role] - for v in value: - people.append((wanted_role, v)) - mcl.people = people - - -def performer_delete(id3, key): - wanted_role = key.split(":", 1)[1] - try: - mcl = id3["TMCL"] - except KeyError: - raise KeyError(key) - people = [p for p in mcl.people if p[0] != wanted_role] - if people == mcl.people: - raise KeyError(key) - elif people: - mcl.people = people - else: - del(id3["TMCL"]) - - -def performer_list(id3, key): - try: - mcl = id3["TMCL"] - except KeyError: - return [] - else: - return list(set("performer:" + p[0] for p in mcl.people)) - - -def musicbrainz_trackid_get(id3, key): - return [id3["UFID:http://musicbrainz.org"].data.decode('ascii')] - - -def musicbrainz_trackid_set(id3, key, value): - if len(value) != 1: - raise ValueError("only one track ID may be set per song") - value = value[0].encode('ascii') - try: - frame = id3["UFID:http://musicbrainz.org"] - except KeyError: - frame = mutagen.id3.UFID(owner="http://musicbrainz.org", data=value) - id3.add(frame) - else: - frame.data = value - - -def musicbrainz_trackid_delete(id3, key): - del(id3["UFID:http://musicbrainz.org"]) - - -def website_get(id3, key): - urls = [frame.url for frame in id3.getall("WOAR")] - if urls: - return urls - else: - raise EasyID3KeyError(key) - - -def website_set(id3, key, value): - id3.delall("WOAR") - for v in value: - id3.add(mutagen.id3.WOAR(url=v)) - - -def website_delete(id3, key): - id3.delall("WOAR") - - -def gain_get(id3, key): - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - raise EasyID3KeyError(key) - else: - return [u"%+f dB" % frame.gain] - - -def gain_set(id3, key, value): - if len(value) != 1: - raise ValueError( - "there must be exactly one gain value, not %r.", value) - gain = float(value[0].split()[0]) - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1) - id3.add(frame) - frame.gain = gain - - -def gain_delete(id3, key): - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - pass - else: - if frame.peak: - frame.gain = 0.0 - else: - del(id3["RVA2:" + key[11:-5]]) - - -def peak_get(id3, key): - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - raise EasyID3KeyError(key) - else: - return [u"%f" % frame.peak] - - -def peak_set(id3, key, value): - if len(value) != 1: - raise ValueError( - "there must be exactly one peak value, not %r.", value) - peak = float(value[0]) - if peak >= 2 or peak < 0: - raise ValueError("peak must be => 0 and < 2.") - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1) - id3.add(frame) - frame.peak = peak - - -def peak_delete(id3, key): - try: - frame = id3["RVA2:" + key[11:-5]] - except KeyError: - pass - else: - if frame.gain: - frame.peak = 0.0 - else: - del(id3["RVA2:" + key[11:-5]]) - - -def peakgain_list(id3, key): - keys = [] - for frame in id3.getall("RVA2"): - keys.append("replaygain_%s_gain" % frame.desc) - keys.append("replaygain_%s_peak" % frame.desc) - return keys - -for frameid, key in iteritems({ - "TALB": "album", - "TBPM": "bpm", - "TCMP": "compilation", # iTunes extension - "TCOM": "composer", - "TCOP": "copyright", - "TENC": "encodedby", - "TEXT": "lyricist", - "TLEN": "length", - "TMED": "media", - "TMOO": "mood", - "TIT2": "title", - "TIT3": "version", - "TPE1": "artist", - "TPE2": "performer", - "TPE3": "conductor", - "TPE4": "arranger", - "TPOS": "discnumber", - "TPUB": "organization", - "TRCK": "tracknumber", - "TOLY": "author", - "TSO2": "albumartistsort", # iTunes extension - "TSOA": "albumsort", - "TSOC": "composersort", # iTunes extension - "TSOP": "artistsort", - "TSOT": "titlesort", - "TSRC": "isrc", - "TSST": "discsubtitle", - "TLAN": "language", -}): - EasyID3.RegisterTextKey(key, frameid) - -EasyID3.RegisterKey("genre", genre_get, genre_set, genre_delete) -EasyID3.RegisterKey("date", date_get, date_set, date_delete) -EasyID3.RegisterKey("originaldate", original_date_get, original_date_set, - original_date_delete) -EasyID3.RegisterKey( - "performer:*", performer_get, performer_set, performer_delete, - performer_list) -EasyID3.RegisterKey("musicbrainz_trackid", musicbrainz_trackid_get, - musicbrainz_trackid_set, musicbrainz_trackid_delete) -EasyID3.RegisterKey("website", website_get, website_set, website_delete) -EasyID3.RegisterKey( - "replaygain_*_gain", gain_get, gain_set, gain_delete, peakgain_list) -EasyID3.RegisterKey("replaygain_*_peak", peak_get, peak_set, peak_delete) - -# At various times, information for this came from -# http://musicbrainz.org/docs/specs/metadata_tags.html -# http://bugs.musicbrainz.org/ticket/1383 -# http://musicbrainz.org/doc/MusicBrainzTag -for desc, key in iteritems({ - u"MusicBrainz Artist Id": "musicbrainz_artistid", - u"MusicBrainz Album Id": "musicbrainz_albumid", - u"MusicBrainz Album Artist Id": "musicbrainz_albumartistid", - u"MusicBrainz TRM Id": "musicbrainz_trmid", - u"MusicIP PUID": "musicip_puid", - u"MusicMagic Fingerprint": "musicip_fingerprint", - u"MusicBrainz Album Status": "musicbrainz_albumstatus", - u"MusicBrainz Album Type": "musicbrainz_albumtype", - u"MusicBrainz Album Release Country": "releasecountry", - u"MusicBrainz Disc Id": "musicbrainz_discid", - u"ASIN": "asin", - u"ALBUMARTISTSORT": "albumartistsort", - u"BARCODE": "barcode", - u"CATALOGNUMBER": "catalognumber", - u"MusicBrainz Release Track Id": "musicbrainz_releasetrackid", - u"MusicBrainz Release Group Id": "musicbrainz_releasegroupid", - u"MusicBrainz Work Id": "musicbrainz_workid", - u"Acoustid Fingerprint": "acoustid_fingerprint", - u"Acoustid Id": "acoustid_id", -}): - EasyID3.RegisterTXXXKey(key, desc) - - -class EasyID3FileType(ID3FileType): - """Like ID3FileType, but uses EasyID3 for tags.""" - ID3 = EasyID3 diff --git a/resources/lib/libraries/mutagen/easymp4.py b/resources/lib/libraries/mutagen/easymp4.py deleted file mode 100644 index b965f37d..00000000 --- a/resources/lib/libraries/mutagen/easymp4.py +++ /dev/null @@ -1,285 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2009 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -from mutagen import Metadata -from mutagen._util import DictMixin, dict_match -from mutagen.mp4 import MP4, MP4Tags, error, delete -from ._compat import PY2, text_type, PY3 - - -__all__ = ["EasyMP4Tags", "EasyMP4", "delete", "error"] - - -class EasyMP4KeyError(error, KeyError, ValueError): - pass - - -class EasyMP4Tags(DictMixin, Metadata): - """A file with MPEG-4 iTunes metadata. - - Like Vorbis comments, EasyMP4Tags keys are case-insensitive ASCII - strings, and values are a list of Unicode strings (and these lists - are always of length 0 or 1). - - If you need access to the full MP4 metadata feature set, you should use - MP4, not EasyMP4. - """ - - Set = {} - Get = {} - Delete = {} - List = {} - - def __init__(self, *args, **kwargs): - self.__mp4 = MP4Tags(*args, **kwargs) - self.load = self.__mp4.load - self.save = self.__mp4.save - self.delete = self.__mp4.delete - self._padding = self.__mp4._padding - - filename = property(lambda s: s.__mp4.filename, - lambda s, fn: setattr(s.__mp4, 'filename', fn)) - - @classmethod - def RegisterKey(cls, key, - getter=None, setter=None, deleter=None, lister=None): - """Register a new key mapping. - - A key mapping is four functions, a getter, setter, deleter, - and lister. The key may be either a string or a glob pattern. - - The getter, deleted, and lister receive an MP4Tags instance - and the requested key name. The setter also receives the - desired value, which will be a list of strings. - - The getter, setter, and deleter are used to implement __getitem__, - __setitem__, and __delitem__. - - The lister is used to implement keys(). It should return a - list of keys that are actually in the MP4 instance, provided - by its associated getter. - """ - key = key.lower() - if getter is not None: - cls.Get[key] = getter - if setter is not None: - cls.Set[key] = setter - if deleter is not None: - cls.Delete[key] = deleter - if lister is not None: - cls.List[key] = lister - - @classmethod - def RegisterTextKey(cls, key, atomid): - """Register a text key. - - If the key you need to register is a simple one-to-one mapping - of MP4 atom name to EasyMP4Tags key, then you can use this - function:: - - EasyMP4Tags.RegisterTextKey("artist", "\xa9ART") - """ - def getter(tags, key): - return tags[atomid] - - def setter(tags, key, value): - tags[atomid] = value - - def deleter(tags, key): - del(tags[atomid]) - - cls.RegisterKey(key, getter, setter, deleter) - - @classmethod - def RegisterIntKey(cls, key, atomid, min_value=0, max_value=(2 ** 16) - 1): - """Register a scalar integer key. - """ - - def getter(tags, key): - return list(map(text_type, tags[atomid])) - - def setter(tags, key, value): - clamp = lambda x: int(min(max(min_value, x), max_value)) - tags[atomid] = [clamp(v) for v in map(int, value)] - - def deleter(tags, key): - del(tags[atomid]) - - cls.RegisterKey(key, getter, setter, deleter) - - @classmethod - def RegisterIntPairKey(cls, key, atomid, min_value=0, - max_value=(2 ** 16) - 1): - def getter(tags, key): - ret = [] - for (track, total) in tags[atomid]: - if total: - ret.append(u"%d/%d" % (track, total)) - else: - ret.append(text_type(track)) - return ret - - def setter(tags, key, value): - clamp = lambda x: int(min(max(min_value, x), max_value)) - data = [] - for v in value: - try: - tracks, total = v.split("/") - tracks = clamp(int(tracks)) - total = clamp(int(total)) - except (ValueError, TypeError): - tracks = clamp(int(v)) - total = min_value - data.append((tracks, total)) - tags[atomid] = data - - def deleter(tags, key): - del(tags[atomid]) - - cls.RegisterKey(key, getter, setter, deleter) - - @classmethod - def RegisterFreeformKey(cls, key, name, mean="com.apple.iTunes"): - """Register a text key. - - If the key you need to register is a simple one-to-one mapping - of MP4 freeform atom (----) and name to EasyMP4Tags key, then - you can use this function:: - - EasyMP4Tags.RegisterFreeformKey( - "musicbrainz_artistid", "MusicBrainz Artist Id") - """ - atomid = "----:" + mean + ":" + name - - def getter(tags, key): - return [s.decode("utf-8", "replace") for s in tags[atomid]] - - def setter(tags, key, value): - encoded = [] - for v in value: - if not isinstance(v, text_type): - if PY3: - raise TypeError("%r not str" % v) - v = v.decode("utf-8") - encoded.append(v.encode("utf-8")) - tags[atomid] = encoded - - def deleter(tags, key): - del(tags[atomid]) - - cls.RegisterKey(key, getter, setter, deleter) - - def __getitem__(self, key): - key = key.lower() - func = dict_match(self.Get, key) - if func is not None: - return func(self.__mp4, key) - else: - raise EasyMP4KeyError("%r is not a valid key" % key) - - def __setitem__(self, key, value): - key = key.lower() - - if PY2: - if isinstance(value, basestring): - value = [value] - else: - if isinstance(value, text_type): - value = [value] - - func = dict_match(self.Set, key) - if func is not None: - return func(self.__mp4, key, value) - else: - raise EasyMP4KeyError("%r is not a valid key" % key) - - def __delitem__(self, key): - key = key.lower() - func = dict_match(self.Delete, key) - if func is not None: - return func(self.__mp4, key) - else: - raise EasyMP4KeyError("%r is not a valid key" % key) - - def keys(self): - keys = [] - for key in self.Get.keys(): - if key in self.List: - keys.extend(self.List[key](self.__mp4, key)) - elif key in self: - keys.append(key) - return keys - - def pprint(self): - """Print tag key=value pairs.""" - strings = [] - for key in sorted(self.keys()): - values = self[key] - for value in values: - strings.append("%s=%s" % (key, value)) - return "\n".join(strings) - -for atomid, key in { - '\xa9nam': 'title', - '\xa9alb': 'album', - '\xa9ART': 'artist', - 'aART': 'albumartist', - '\xa9day': 'date', - '\xa9cmt': 'comment', - 'desc': 'description', - '\xa9grp': 'grouping', - '\xa9gen': 'genre', - 'cprt': 'copyright', - 'soal': 'albumsort', - 'soaa': 'albumartistsort', - 'soar': 'artistsort', - 'sonm': 'titlesort', - 'soco': 'composersort', -}.items(): - EasyMP4Tags.RegisterTextKey(key, atomid) - -for name, key in { - 'MusicBrainz Artist Id': 'musicbrainz_artistid', - 'MusicBrainz Track Id': 'musicbrainz_trackid', - 'MusicBrainz Album Id': 'musicbrainz_albumid', - 'MusicBrainz Album Artist Id': 'musicbrainz_albumartistid', - 'MusicIP PUID': 'musicip_puid', - 'MusicBrainz Album Status': 'musicbrainz_albumstatus', - 'MusicBrainz Album Type': 'musicbrainz_albumtype', - 'MusicBrainz Release Country': 'releasecountry', -}.items(): - EasyMP4Tags.RegisterFreeformKey(key, name) - -for name, key in { - "tmpo": "bpm", -}.items(): - EasyMP4Tags.RegisterIntKey(key, name) - -for name, key in { - "trkn": "tracknumber", - "disk": "discnumber", -}.items(): - EasyMP4Tags.RegisterIntPairKey(key, name) - - -class EasyMP4(MP4): - """Like :class:`MP4 <mutagen.mp4.MP4>`, - but uses :class:`EasyMP4Tags` for tags. - - :ivar info: :class:`MP4Info <mutagen.mp4.MP4Info>` - :ivar tags: :class:`EasyMP4Tags` - """ - - MP4Tags = EasyMP4Tags - - Get = EasyMP4Tags.Get - Set = EasyMP4Tags.Set - Delete = EasyMP4Tags.Delete - List = EasyMP4Tags.List - RegisterTextKey = EasyMP4Tags.RegisterTextKey - RegisterKey = EasyMP4Tags.RegisterKey diff --git a/resources/lib/libraries/mutagen/flac.py b/resources/lib/libraries/mutagen/flac.py deleted file mode 100644 index e6cd1cf7..00000000 --- a/resources/lib/libraries/mutagen/flac.py +++ /dev/null @@ -1,876 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""Read and write FLAC Vorbis comments and stream information. - -Read more about FLAC at http://flac.sourceforge.net. - -FLAC supports arbitrary metadata blocks. The two most interesting ones -are the FLAC stream information block, and the Vorbis comment block; -these are also the only ones Mutagen can currently read. - -This module does not handle Ogg FLAC files. - -Based off documentation available at -http://flac.sourceforge.net/format.html -""" - -__all__ = ["FLAC", "Open", "delete"] - -import struct -from ._vorbis import VCommentDict -import mutagen - -from ._compat import cBytesIO, endswith, chr_, xrange -from mutagen._util import resize_bytes, MutagenError, get_size -from mutagen._tags import PaddingInfo -from mutagen.id3 import BitPaddedInt -from functools import reduce - - -class error(IOError, MutagenError): - pass - - -class FLACNoHeaderError(error): - pass - - -class FLACVorbisError(ValueError, error): - pass - - -def to_int_be(data): - """Convert an arbitrarily-long string to a long using big-endian - byte order.""" - return reduce(lambda a, b: (a << 8) + b, bytearray(data), 0) - - -class StrictFileObject(object): - """Wraps a file-like object and raises an exception if the requested - amount of data to read isn't returned.""" - - def __init__(self, fileobj): - self._fileobj = fileobj - for m in ["close", "tell", "seek", "write", "name"]: - if hasattr(fileobj, m): - setattr(self, m, getattr(fileobj, m)) - - def read(self, size=-1): - data = self._fileobj.read(size) - if size >= 0 and len(data) != size: - raise error("file said %d bytes, read %d bytes" % ( - size, len(data))) - return data - - def tryread(self, *args): - return self._fileobj.read(*args) - - -class MetadataBlock(object): - """A generic block of FLAC metadata. - - This class is extended by specific used as an ancestor for more specific - blocks, and also as a container for data blobs of unknown blocks. - - Attributes: - - * data -- raw binary data for this block - """ - - _distrust_size = False - """For block types setting this, we don't trust the size field and - use the size of the content instead.""" - - _invalid_overflow_size = -1 - """In case the real size was bigger than what is representable by the - 24 bit size field, we save the wrong specified size here. This can - only be set if _distrust_size is True""" - - _MAX_SIZE = 2 ** 24 - 1 - - def __init__(self, data): - """Parse the given data string or file-like as a metadata block. - The metadata header should not be included.""" - if data is not None: - if not isinstance(data, StrictFileObject): - if isinstance(data, bytes): - data = cBytesIO(data) - elif not hasattr(data, 'read'): - raise TypeError( - "StreamInfo requires string data or a file-like") - data = StrictFileObject(data) - self.load(data) - - def load(self, data): - self.data = data.read() - - def write(self): - return self.data - - @classmethod - def _writeblock(cls, block, is_last=False): - """Returns the block content + header. - - Raises error. - """ - - data = bytearray() - code = (block.code | 128) if is_last else block.code - datum = block.write() - size = len(datum) - if size > cls._MAX_SIZE: - if block._distrust_size and block._invalid_overflow_size != -1: - # The original size of this block was (1) wrong and (2) - # the real size doesn't allow us to save the file - # according to the spec (too big for 24 bit uint). Instead - # simply write back the original wrong size.. at least - # we don't make the file more "broken" as it is. - size = block._invalid_overflow_size - else: - raise error("block is too long to write") - assert not size > cls._MAX_SIZE - length = struct.pack(">I", size)[-3:] - data.append(code) - data += length - data += datum - return data - - @classmethod - def _writeblocks(cls, blocks, available, cont_size, padding_func): - """Render metadata block as a byte string.""" - - # write everything except padding - data = bytearray() - for block in blocks: - if isinstance(block, Padding): - continue - data += cls._writeblock(block) - blockssize = len(data) - - # take the padding overhead into account. we always add one - # to make things simple. - padding_block = Padding() - blockssize += len(cls._writeblock(padding_block)) - - # finally add a padding block - info = PaddingInfo(available - blockssize, cont_size) - padding_block.length = min(info._get_padding(padding_func), - cls._MAX_SIZE) - data += cls._writeblock(padding_block, is_last=True) - - return data - - -class StreamInfo(MetadataBlock, mutagen.StreamInfo): - """FLAC stream information. - - This contains information about the audio data in the FLAC file. - Unlike most stream information objects in Mutagen, changes to this - one will rewritten to the file when it is saved. Unless you are - actually changing the audio stream itself, don't change any - attributes of this block. - - Attributes: - - * min_blocksize -- minimum audio block size - * max_blocksize -- maximum audio block size - * sample_rate -- audio sample rate in Hz - * channels -- audio channels (1 for mono, 2 for stereo) - * bits_per_sample -- bits per sample - * total_samples -- total samples in file - * length -- audio length in seconds - """ - - code = 0 - - def __eq__(self, other): - try: - return (self.min_blocksize == other.min_blocksize and - self.max_blocksize == other.max_blocksize and - self.sample_rate == other.sample_rate and - self.channels == other.channels and - self.bits_per_sample == other.bits_per_sample and - self.total_samples == other.total_samples) - except: - return False - - __hash__ = MetadataBlock.__hash__ - - def load(self, data): - self.min_blocksize = int(to_int_be(data.read(2))) - self.max_blocksize = int(to_int_be(data.read(2))) - self.min_framesize = int(to_int_be(data.read(3))) - self.max_framesize = int(to_int_be(data.read(3))) - # first 16 bits of sample rate - sample_first = to_int_be(data.read(2)) - # last 4 bits of sample rate, 3 of channels, first 1 of bits/sample - sample_channels_bps = to_int_be(data.read(1)) - # last 4 of bits/sample, 36 of total samples - bps_total = to_int_be(data.read(5)) - - sample_tail = sample_channels_bps >> 4 - self.sample_rate = int((sample_first << 4) + sample_tail) - if not self.sample_rate: - raise error("A sample rate value of 0 is invalid") - self.channels = int(((sample_channels_bps >> 1) & 7) + 1) - bps_tail = bps_total >> 36 - bps_head = (sample_channels_bps & 1) << 4 - self.bits_per_sample = int(bps_head + bps_tail + 1) - self.total_samples = bps_total & 0xFFFFFFFFF - self.length = self.total_samples / float(self.sample_rate) - - self.md5_signature = to_int_be(data.read(16)) - - def write(self): - f = cBytesIO() - f.write(struct.pack(">I", self.min_blocksize)[-2:]) - f.write(struct.pack(">I", self.max_blocksize)[-2:]) - f.write(struct.pack(">I", self.min_framesize)[-3:]) - f.write(struct.pack(">I", self.max_framesize)[-3:]) - - # first 16 bits of sample rate - f.write(struct.pack(">I", self.sample_rate >> 4)[-2:]) - # 4 bits sample, 3 channel, 1 bps - byte = (self.sample_rate & 0xF) << 4 - byte += ((self.channels - 1) & 7) << 1 - byte += ((self.bits_per_sample - 1) >> 4) & 1 - f.write(chr_(byte)) - # 4 bits of bps, 4 of sample count - byte = ((self.bits_per_sample - 1) & 0xF) << 4 - byte += (self.total_samples >> 32) & 0xF - f.write(chr_(byte)) - # last 32 of sample count - f.write(struct.pack(">I", self.total_samples & 0xFFFFFFFF)) - # MD5 signature - sig = self.md5_signature - f.write(struct.pack( - ">4I", (sig >> 96) & 0xFFFFFFFF, (sig >> 64) & 0xFFFFFFFF, - (sig >> 32) & 0xFFFFFFFF, sig & 0xFFFFFFFF)) - return f.getvalue() - - def pprint(self): - return u"FLAC, %.2f seconds, %d Hz" % (self.length, self.sample_rate) - - -class SeekPoint(tuple): - """A single seek point in a FLAC file. - - Placeholder seek points have first_sample of 0xFFFFFFFFFFFFFFFFL, - and byte_offset and num_samples undefined. Seek points must be - sorted in ascending order by first_sample number. Seek points must - be unique by first_sample number, except for placeholder - points. Placeholder points must occur last in the table and there - may be any number of them. - - Attributes: - - * first_sample -- sample number of first sample in the target frame - * byte_offset -- offset from first frame to target frame - * num_samples -- number of samples in target frame - """ - - def __new__(cls, first_sample, byte_offset, num_samples): - return super(cls, SeekPoint).__new__( - cls, (first_sample, byte_offset, num_samples)) - - first_sample = property(lambda self: self[0]) - byte_offset = property(lambda self: self[1]) - num_samples = property(lambda self: self[2]) - - -class SeekTable(MetadataBlock): - """Read and write FLAC seek tables. - - Attributes: - - * seekpoints -- list of SeekPoint objects - """ - - __SEEKPOINT_FORMAT = '>QQH' - __SEEKPOINT_SIZE = struct.calcsize(__SEEKPOINT_FORMAT) - - code = 3 - - def __init__(self, data): - self.seekpoints = [] - super(SeekTable, self).__init__(data) - - def __eq__(self, other): - try: - return (self.seekpoints == other.seekpoints) - except (AttributeError, TypeError): - return False - - __hash__ = MetadataBlock.__hash__ - - def load(self, data): - self.seekpoints = [] - sp = data.tryread(self.__SEEKPOINT_SIZE) - while len(sp) == self.__SEEKPOINT_SIZE: - self.seekpoints.append(SeekPoint( - *struct.unpack(self.__SEEKPOINT_FORMAT, sp))) - sp = data.tryread(self.__SEEKPOINT_SIZE) - - def write(self): - f = cBytesIO() - for seekpoint in self.seekpoints: - packed = struct.pack( - self.__SEEKPOINT_FORMAT, - seekpoint.first_sample, seekpoint.byte_offset, - seekpoint.num_samples) - f.write(packed) - return f.getvalue() - - def __repr__(self): - return "<%s seekpoints=%r>" % (type(self).__name__, self.seekpoints) - - -class VCFLACDict(VCommentDict): - """Read and write FLAC Vorbis comments. - - FLACs don't use the framing bit at the end of the comment block. - So this extends VCommentDict to not use the framing bit. - """ - - code = 4 - _distrust_size = True - - def load(self, data, errors='replace', framing=False): - super(VCFLACDict, self).load(data, errors=errors, framing=framing) - - def write(self, framing=False): - return super(VCFLACDict, self).write(framing=framing) - - -class CueSheetTrackIndex(tuple): - """Index for a track in a cuesheet. - - For CD-DA, an index_number of 0 corresponds to the track - pre-gap. The first index in a track must have a number of 0 or 1, - and subsequently, index_numbers must increase by 1. Index_numbers - must be unique within a track. And index_offset must be evenly - divisible by 588 samples. - - Attributes: - - * index_number -- index point number - * index_offset -- offset in samples from track start - """ - - def __new__(cls, index_number, index_offset): - return super(cls, CueSheetTrackIndex).__new__( - cls, (index_number, index_offset)) - - index_number = property(lambda self: self[0]) - index_offset = property(lambda self: self[1]) - - -class CueSheetTrack(object): - """A track in a cuesheet. - - For CD-DA, track_numbers must be 1-99, or 170 for the - lead-out. Track_numbers must be unique within a cue sheet. There - must be atleast one index in every track except the lead-out track - which must have none. - - Attributes: - - * track_number -- track number - * start_offset -- track offset in samples from start of FLAC stream - * isrc -- ISRC code - * type -- 0 for audio, 1 for digital data - * pre_emphasis -- true if the track is recorded with pre-emphasis - * indexes -- list of CueSheetTrackIndex objects - """ - - def __init__(self, track_number, start_offset, isrc='', type_=0, - pre_emphasis=False): - self.track_number = track_number - self.start_offset = start_offset - self.isrc = isrc - self.type = type_ - self.pre_emphasis = pre_emphasis - self.indexes = [] - - def __eq__(self, other): - try: - return (self.track_number == other.track_number and - self.start_offset == other.start_offset and - self.isrc == other.isrc and - self.type == other.type and - self.pre_emphasis == other.pre_emphasis and - self.indexes == other.indexes) - except (AttributeError, TypeError): - return False - - __hash__ = object.__hash__ - - def __repr__(self): - return (("<%s number=%r, offset=%d, isrc=%r, type=%r, " - "pre_emphasis=%r, indexes=%r)>") % - (type(self).__name__, self.track_number, self.start_offset, - self.isrc, self.type, self.pre_emphasis, self.indexes)) - - -class CueSheet(MetadataBlock): - """Read and write FLAC embedded cue sheets. - - Number of tracks should be from 1 to 100. There should always be - exactly one lead-out track and that track must be the last track - in the cue sheet. - - Attributes: - - * media_catalog_number -- media catalog number in ASCII - * lead_in_samples -- number of lead-in samples - * compact_disc -- true if the cuesheet corresponds to a compact disc - * tracks -- list of CueSheetTrack objects - * lead_out -- lead-out as CueSheetTrack or None if lead-out was not found - """ - - __CUESHEET_FORMAT = '>128sQB258xB' - __CUESHEET_SIZE = struct.calcsize(__CUESHEET_FORMAT) - __CUESHEET_TRACK_FORMAT = '>QB12sB13xB' - __CUESHEET_TRACK_SIZE = struct.calcsize(__CUESHEET_TRACK_FORMAT) - __CUESHEET_TRACKINDEX_FORMAT = '>QB3x' - __CUESHEET_TRACKINDEX_SIZE = struct.calcsize(__CUESHEET_TRACKINDEX_FORMAT) - - code = 5 - - media_catalog_number = b'' - lead_in_samples = 88200 - compact_disc = True - - def __init__(self, data): - self.tracks = [] - super(CueSheet, self).__init__(data) - - def __eq__(self, other): - try: - return (self.media_catalog_number == other.media_catalog_number and - self.lead_in_samples == other.lead_in_samples and - self.compact_disc == other.compact_disc and - self.tracks == other.tracks) - except (AttributeError, TypeError): - return False - - __hash__ = MetadataBlock.__hash__ - - def load(self, data): - header = data.read(self.__CUESHEET_SIZE) - media_catalog_number, lead_in_samples, flags, num_tracks = \ - struct.unpack(self.__CUESHEET_FORMAT, header) - self.media_catalog_number = media_catalog_number.rstrip(b'\0') - self.lead_in_samples = lead_in_samples - self.compact_disc = bool(flags & 0x80) - self.tracks = [] - for i in xrange(num_tracks): - track = data.read(self.__CUESHEET_TRACK_SIZE) - start_offset, track_number, isrc_padded, flags, num_indexes = \ - struct.unpack(self.__CUESHEET_TRACK_FORMAT, track) - isrc = isrc_padded.rstrip(b'\0') - type_ = (flags & 0x80) >> 7 - pre_emphasis = bool(flags & 0x40) - val = CueSheetTrack( - track_number, start_offset, isrc, type_, pre_emphasis) - for j in xrange(num_indexes): - index = data.read(self.__CUESHEET_TRACKINDEX_SIZE) - index_offset, index_number = struct.unpack( - self.__CUESHEET_TRACKINDEX_FORMAT, index) - val.indexes.append( - CueSheetTrackIndex(index_number, index_offset)) - self.tracks.append(val) - - def write(self): - f = cBytesIO() - flags = 0 - if self.compact_disc: - flags |= 0x80 - packed = struct.pack( - self.__CUESHEET_FORMAT, self.media_catalog_number, - self.lead_in_samples, flags, len(self.tracks)) - f.write(packed) - for track in self.tracks: - track_flags = 0 - track_flags |= (track.type & 1) << 7 - if track.pre_emphasis: - track_flags |= 0x40 - track_packed = struct.pack( - self.__CUESHEET_TRACK_FORMAT, track.start_offset, - track.track_number, track.isrc, track_flags, - len(track.indexes)) - f.write(track_packed) - for index in track.indexes: - index_packed = struct.pack( - self.__CUESHEET_TRACKINDEX_FORMAT, - index.index_offset, index.index_number) - f.write(index_packed) - return f.getvalue() - - def __repr__(self): - return (("<%s media_catalog_number=%r, lead_in=%r, compact_disc=%r, " - "tracks=%r>") % - (type(self).__name__, self.media_catalog_number, - self.lead_in_samples, self.compact_disc, self.tracks)) - - -class Picture(MetadataBlock): - """Read and write FLAC embed pictures. - - Attributes: - - * type -- picture type (same as types for ID3 APIC frames) - * mime -- MIME type of the picture - * desc -- picture's description - * width -- width in pixels - * height -- height in pixels - * depth -- color depth in bits-per-pixel - * colors -- number of colors for indexed palettes (like GIF), - 0 for non-indexed - * data -- picture data - - To create a picture from file (in order to add to a FLAC file), - instantiate this object without passing anything to the constructor and - then set the properties manually:: - - p = Picture() - - with open("Folder.jpg", "rb") as f: - pic.data = f.read() - - pic.type = id3.PictureType.COVER_FRONT - pic.mime = u"image/jpeg" - pic.width = 500 - pic.height = 500 - pic.depth = 16 # color depth - """ - - code = 6 - _distrust_size = True - - def __init__(self, data=None): - self.type = 0 - self.mime = u'' - self.desc = u'' - self.width = 0 - self.height = 0 - self.depth = 0 - self.colors = 0 - self.data = b'' - super(Picture, self).__init__(data) - - def __eq__(self, other): - try: - return (self.type == other.type and - self.mime == other.mime and - self.desc == other.desc and - self.width == other.width and - self.height == other.height and - self.depth == other.depth and - self.colors == other.colors and - self.data == other.data) - except (AttributeError, TypeError): - return False - - __hash__ = MetadataBlock.__hash__ - - def load(self, data): - self.type, length = struct.unpack('>2I', data.read(8)) - self.mime = data.read(length).decode('UTF-8', 'replace') - length, = struct.unpack('>I', data.read(4)) - self.desc = data.read(length).decode('UTF-8', 'replace') - (self.width, self.height, self.depth, - self.colors, length) = struct.unpack('>5I', data.read(20)) - self.data = data.read(length) - - def write(self): - f = cBytesIO() - mime = self.mime.encode('UTF-8') - f.write(struct.pack('>2I', self.type, len(mime))) - f.write(mime) - desc = self.desc.encode('UTF-8') - f.write(struct.pack('>I', len(desc))) - f.write(desc) - f.write(struct.pack('>5I', self.width, self.height, self.depth, - self.colors, len(self.data))) - f.write(self.data) - return f.getvalue() - - def __repr__(self): - return "<%s '%s' (%d bytes)>" % (type(self).__name__, self.mime, - len(self.data)) - - -class Padding(MetadataBlock): - """Empty padding space for metadata blocks. - - To avoid rewriting the entire FLAC file when editing comments, - metadata is often padded. Padding should occur at the end, and no - more than one padding block should be in any FLAC file. - """ - - code = 1 - - def __init__(self, data=b""): - super(Padding, self).__init__(data) - - def load(self, data): - self.length = len(data.read()) - - def write(self): - try: - return b"\x00" * self.length - # On some 64 bit platforms this won't generate a MemoryError - # or OverflowError since you might have enough RAM, but it - # still generates a ValueError. On other 64 bit platforms, - # this will still succeed for extremely large values. - # Those should never happen in the real world, and if they - # do, writeblocks will catch it. - except (OverflowError, ValueError, MemoryError): - raise error("cannot write %d bytes" % self.length) - - def __eq__(self, other): - return isinstance(other, Padding) and self.length == other.length - - __hash__ = MetadataBlock.__hash__ - - def __repr__(self): - return "<%s (%d bytes)>" % (type(self).__name__, self.length) - - -class FLAC(mutagen.FileType): - """A FLAC audio file. - - Attributes: - - * cuesheet -- CueSheet object, if any - * seektable -- SeekTable object, if any - * pictures -- list of embedded pictures - """ - - _mimes = ["audio/x-flac", "application/x-flac"] - - info = None - """A `StreamInfo`""" - - tags = None - """A `VCommentDict`""" - - METADATA_BLOCKS = [StreamInfo, Padding, None, SeekTable, VCFLACDict, - CueSheet, Picture] - """Known metadata block types, indexed by ID.""" - - @staticmethod - def score(filename, fileobj, header_data): - return (header_data.startswith(b"fLaC") + - endswith(filename.lower(), ".flac") * 3) - - def __read_metadata_block(self, fileobj): - byte = ord(fileobj.read(1)) - size = to_int_be(fileobj.read(3)) - code = byte & 0x7F - last_block = bool(byte & 0x80) - - try: - block_type = self.METADATA_BLOCKS[code] or MetadataBlock - except IndexError: - block_type = MetadataBlock - - if block_type._distrust_size: - # Some jackass is writing broken Metadata block length - # for Vorbis comment blocks, and the FLAC reference - # implementaton can parse them (mostly by accident), - # so we have to too. Instead of parsing the size - # given, parse an actual Vorbis comment, leaving - # fileobj in the right position. - # http://code.google.com/p/mutagen/issues/detail?id=52 - # ..same for the Picture block: - # http://code.google.com/p/mutagen/issues/detail?id=106 - start = fileobj.tell() - block = block_type(fileobj) - real_size = fileobj.tell() - start - if real_size > MetadataBlock._MAX_SIZE: - block._invalid_overflow_size = size - else: - data = fileobj.read(size) - block = block_type(data) - block.code = code - - if block.code == VCFLACDict.code: - if self.tags is None: - self.tags = block - else: - raise FLACVorbisError("> 1 Vorbis comment block found") - elif block.code == CueSheet.code: - if self.cuesheet is None: - self.cuesheet = block - else: - raise error("> 1 CueSheet block found") - elif block.code == SeekTable.code: - if self.seektable is None: - self.seektable = block - else: - raise error("> 1 SeekTable block found") - self.metadata_blocks.append(block) - return not last_block - - def add_tags(self): - """Add a Vorbis comment block to the file.""" - if self.tags is None: - self.tags = VCFLACDict() - self.metadata_blocks.append(self.tags) - else: - raise FLACVorbisError("a Vorbis comment already exists") - - add_vorbiscomment = add_tags - - def delete(self, filename=None): - """Remove Vorbis comments from a file. - - If no filename is given, the one most recently loaded is used. - """ - if filename is None: - filename = self.filename - - if self.tags is not None: - self.metadata_blocks.remove(self.tags) - self.save(padding=lambda x: 0) - self.metadata_blocks.append(self.tags) - self.tags.clear() - - vc = property(lambda s: s.tags, doc="Alias for tags; don't use this.") - - def load(self, filename): - """Load file information from a filename.""" - - self.metadata_blocks = [] - self.tags = None - self.cuesheet = None - self.seektable = None - self.filename = filename - fileobj = StrictFileObject(open(filename, "rb")) - try: - self.__check_header(fileobj) - while self.__read_metadata_block(fileobj): - pass - finally: - fileobj.close() - - try: - self.metadata_blocks[0].length - except (AttributeError, IndexError): - raise FLACNoHeaderError("Stream info block not found") - - @property - def info(self): - return self.metadata_blocks[0] - - def add_picture(self, picture): - """Add a new picture to the file.""" - self.metadata_blocks.append(picture) - - def clear_pictures(self): - """Delete all pictures from the file.""" - - blocks = [b for b in self.metadata_blocks if b.code != Picture.code] - self.metadata_blocks = blocks - - @property - def pictures(self): - """List of embedded pictures""" - - return [b for b in self.metadata_blocks if b.code == Picture.code] - - def save(self, filename=None, deleteid3=False, padding=None): - """Save metadata blocks to a file. - - If no filename is given, the one most recently loaded is used. - """ - - if filename is None: - filename = self.filename - - with open(filename, 'rb+') as f: - header = self.__check_header(f) - audio_offset = self.__find_audio_offset(f) - # "fLaC" and maybe ID3 - available = audio_offset - header - - # Delete ID3v2 - if deleteid3 and header > 4: - available += header - 4 - header = 4 - - content_size = get_size(f) - audio_offset - assert content_size >= 0 - data = MetadataBlock._writeblocks( - self.metadata_blocks, available, content_size, padding) - data_size = len(data) - - resize_bytes(f, available, data_size, header) - f.seek(header - 4) - f.write(b"fLaC") - f.write(data) - - # Delete ID3v1 - if deleteid3: - try: - f.seek(-128, 2) - except IOError: - pass - else: - if f.read(3) == b"TAG": - f.seek(-128, 2) - f.truncate() - - def __find_audio_offset(self, fileobj): - byte = 0x00 - while not (byte & 0x80): - byte = ord(fileobj.read(1)) - size = to_int_be(fileobj.read(3)) - try: - block_type = self.METADATA_BLOCKS[byte & 0x7F] - except IndexError: - block_type = None - - if block_type and block_type._distrust_size: - # See comments in read_metadata_block; the size can't - # be trusted for Vorbis comment blocks and Picture block - block_type(fileobj) - else: - fileobj.read(size) - return fileobj.tell() - - def __check_header(self, fileobj): - """Returns the offset of the flac block start - (skipping id3 tags if found). The passed fileobj will be advanced to - that offset as well. - """ - - size = 4 - header = fileobj.read(4) - if header != b"fLaC": - size = None - if header[:3] == b"ID3": - size = 14 + BitPaddedInt(fileobj.read(6)[2:]) - fileobj.seek(size - 4) - if fileobj.read(4) != b"fLaC": - size = None - if size is None: - raise FLACNoHeaderError( - "%r is not a valid FLAC file" % fileobj.name) - return size - - -Open = FLAC - - -def delete(filename): - """Remove tags from a file.""" - FLAC(filename).delete() diff --git a/resources/lib/libraries/mutagen/id3/__init__.py b/resources/lib/libraries/mutagen/id3/__init__.py deleted file mode 100644 index 9aef865b..00000000 --- a/resources/lib/libraries/mutagen/id3/__init__.py +++ /dev/null @@ -1,1093 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Michael Urman -# 2006 Lukas Lalinsky -# 2013 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""ID3v2 reading and writing. - -This is based off of the following references: - -* http://id3.org/id3v2.4.0-structure -* http://id3.org/id3v2.4.0-frames -* http://id3.org/id3v2.3.0 -* http://id3.org/id3v2-00 -* http://id3.org/ID3v1 - -Its largest deviation from the above (versions 2.3 and 2.2) is that it -will not interpret the / characters as a separator, and will almost -always accept null separators to generate multi-valued text frames. - -Because ID3 frame structure differs between frame types, each frame is -implemented as a different class (e.g. TIT2 as mutagen.id3.TIT2). Each -frame's documentation contains a list of its attributes. - -Since this file's documentation is a little unwieldy, you are probably -interested in the :class:`ID3` class to start with. -""" - -__all__ = ['ID3', 'ID3FileType', 'Frames', 'Open', 'delete'] - -import struct -import errno - -from struct import unpack, pack, error as StructError - -import mutagen -from mutagen._util import insert_bytes, delete_bytes, DictProxy, enum -from mutagen._tags import PaddingInfo -from .._compat import chr_, PY3 - -from ._util import * -from ._frames import * -from ._specs import * - - -@enum -class ID3v1SaveOptions(object): - - REMOVE = 0 - """ID3v1 tags will be removed""" - - UPDATE = 1 - """ID3v1 tags will be updated but not added""" - - CREATE = 2 - """ID3v1 tags will be created and/or updated""" - - -def _fullread(fileobj, size): - """Read a certain number of bytes from the source file. - - Raises ValueError on invalid size input or EOFError/IOError. - """ - - if size < 0: - raise ValueError('Requested bytes (%s) less than zero' % size) - data = fileobj.read(size) - if len(data) != size: - raise EOFError("Not enough data to read") - return data - - -class ID3Header(object): - - _V24 = (2, 4, 0) - _V23 = (2, 3, 0) - _V22 = (2, 2, 0) - _V11 = (1, 1) - - f_unsynch = property(lambda s: bool(s._flags & 0x80)) - f_extended = property(lambda s: bool(s._flags & 0x40)) - f_experimental = property(lambda s: bool(s._flags & 0x20)) - f_footer = property(lambda s: bool(s._flags & 0x10)) - - def __init__(self, fileobj=None): - """Raises ID3NoHeaderError, ID3UnsupportedVersionError or error""" - - if fileobj is None: - # for testing - self._flags = 0 - return - - fn = getattr(fileobj, "name", "<unknown>") - try: - data = _fullread(fileobj, 10) - except EOFError: - raise ID3NoHeaderError("%s: too small" % fn) - - id3, vmaj, vrev, flags, size = unpack('>3sBBB4s', data) - self._flags = flags - self.size = BitPaddedInt(size) + 10 - self.version = (2, vmaj, vrev) - - if id3 != b'ID3': - raise ID3NoHeaderError("%r doesn't start with an ID3 tag" % fn) - - if vmaj not in [2, 3, 4]: - raise ID3UnsupportedVersionError("%r ID3v2.%d not supported" - % (fn, vmaj)) - - if not BitPaddedInt.has_valid_padding(size): - raise error("Header size not synchsafe") - - if (self.version >= self._V24) and (flags & 0x0f): - raise error( - "%r has invalid flags %#02x" % (fn, flags)) - elif (self._V23 <= self.version < self._V24) and (flags & 0x1f): - raise error( - "%r has invalid flags %#02x" % (fn, flags)) - - if self.f_extended: - try: - extsize_data = _fullread(fileobj, 4) - except EOFError: - raise error("%s: too small" % fn) - - if PY3: - frame_id = extsize_data.decode("ascii", "replace") - else: - frame_id = extsize_data - - if frame_id in Frames: - # Some tagger sets the extended header flag but - # doesn't write an extended header; in this case, the - # ID3 data follows immediately. Since no extended - # header is going to be long enough to actually match - # a frame, and if it's *not* a frame we're going to be - # completely lost anyway, this seems to be the most - # correct check. - # http://code.google.com/p/quodlibet/issues/detail?id=126 - self._flags ^= 0x40 - extsize = 0 - fileobj.seek(-4, 1) - elif self.version >= self._V24: - # "Where the 'Extended header size' is the size of the whole - # extended header, stored as a 32 bit synchsafe integer." - extsize = BitPaddedInt(extsize_data) - 4 - if not BitPaddedInt.has_valid_padding(extsize_data): - raise error( - "Extended header size not synchsafe") - else: - # "Where the 'Extended header size', currently 6 or 10 bytes, - # excludes itself." - extsize = unpack('>L', extsize_data)[0] - - try: - self._extdata = _fullread(fileobj, extsize) - except EOFError: - raise error("%s: too small" % fn) - - -class ID3(DictProxy, mutagen.Metadata): - """A file with an ID3v2 tag. - - Attributes: - - * version -- ID3 tag version as a tuple - * unknown_frames -- raw frame data of any unknown frames found - * size -- the total size of the ID3 tag, including the header - """ - - __module__ = "mutagen.id3" - - PEDANTIC = True - """Deprecated. Doesn't have any effect""" - - filename = None - - def __init__(self, *args, **kwargs): - self.unknown_frames = [] - self.__unknown_version = None - self._header = None - self._version = (2, 4, 0) - super(ID3, self).__init__(*args, **kwargs) - - @property - def version(self): - """ID3 tag version as a tuple (of the loaded file)""" - - if self._header is not None: - return self._header.version - return self._version - - @version.setter - def version(self, value): - self._version = value - - @property - def f_unsynch(self): - if self._header is not None: - return self._header.f_unsynch - return False - - @property - def f_extended(self): - if self._header is not None: - return self._header.f_extended - return False - - @property - def size(self): - if self._header is not None: - return self._header.size - return 0 - - def _pre_load_header(self, fileobj): - # XXX: for aiff to adjust the offset.. - pass - - def load(self, filename, known_frames=None, translate=True, v2_version=4): - """Load tags from a filename. - - Keyword arguments: - - * filename -- filename to load tag data from - * known_frames -- dict mapping frame IDs to Frame objects - * translate -- Update all tags to ID3v2.3/4 internally. If you - intend to save, this must be true or you have to - call update_to_v23() / update_to_v24() manually. - * v2_version -- if update_to_v23 or update_to_v24 get called (3 or 4) - - Example of loading a custom frame:: - - my_frames = dict(mutagen.id3.Frames) - class XMYF(Frame): ... - my_frames["XMYF"] = XMYF - mutagen.id3.ID3(filename, known_frames=my_frames) - """ - - if v2_version not in (3, 4): - raise ValueError("Only 3 and 4 possible for v2_version") - - self.filename = filename - self.unknown_frames = [] - self.__known_frames = known_frames - self._header = None - self._padding = 0 # for testing - - with open(filename, 'rb') as fileobj: - self._pre_load_header(fileobj) - - try: - self._header = ID3Header(fileobj) - except (ID3NoHeaderError, ID3UnsupportedVersionError): - frames, offset = _find_id3v1(fileobj) - if frames is None: - raise - - self.version = ID3Header._V11 - for v in frames.values(): - self.add(v) - else: - frames = self.__known_frames - if frames is None: - if self.version >= ID3Header._V23: - frames = Frames - elif self.version >= ID3Header._V22: - frames = Frames_2_2 - - try: - data = _fullread(fileobj, self.size - 10) - except (ValueError, EOFError, IOError) as e: - raise error(e) - - for frame in self.__read_frames(data, frames=frames): - if isinstance(frame, Frame): - self.add(frame) - else: - self.unknown_frames.append(frame) - self.__unknown_version = self.version[:2] - - if translate: - if v2_version == 3: - self.update_to_v23() - else: - self.update_to_v24() - - def getall(self, key): - """Return all frames with a given name (the list may be empty). - - This is best explained by examples:: - - id3.getall('TIT2') == [id3['TIT2']] - id3.getall('TTTT') == [] - id3.getall('TXXX') == [TXXX(desc='woo', text='bar'), - TXXX(desc='baz', text='quuuux'), ...] - - Since this is based on the frame's HashKey, which is - colon-separated, you can use it to do things like - ``getall('COMM:MusicMatch')`` or ``getall('TXXX:QuodLibet:')``. - """ - if key in self: - return [self[key]] - else: - key = key + ":" - return [v for s, v in self.items() if s.startswith(key)] - - def delall(self, key): - """Delete all tags of a given kind; see getall.""" - if key in self: - del(self[key]) - else: - key = key + ":" - for k in list(self.keys()): - if k.startswith(key): - del(self[k]) - - def setall(self, key, values): - """Delete frames of the given type and add frames in 'values'.""" - self.delall(key) - for tag in values: - self[tag.HashKey] = tag - - def pprint(self): - """Return tags in a human-readable format. - - "Human-readable" is used loosely here. The format is intended - to mirror that used for Vorbis or APEv2 output, e.g. - - ``TIT2=My Title`` - - However, ID3 frames can have multiple keys: - - ``POPM=user@example.org=3 128/255`` - """ - frames = sorted(Frame.pprint(s) for s in self.values()) - return "\n".join(frames) - - def loaded_frame(self, tag): - """Deprecated; use the add method.""" - # turn 2.2 into 2.3/2.4 tags - if len(type(tag).__name__) == 3: - tag = type(tag).__base__(tag) - self[tag.HashKey] = tag - - # add = loaded_frame (and vice versa) break applications that - # expect to be able to override loaded_frame (e.g. Quod Libet), - # as does making loaded_frame call add. - def add(self, frame): - """Add a frame to the tag.""" - return self.loaded_frame(frame) - - def __read_frames(self, data, frames): - assert self.version >= ID3Header._V22 - - if self.version < ID3Header._V24 and self.f_unsynch: - try: - data = unsynch.decode(data) - except ValueError: - pass - - if self.version >= ID3Header._V23: - if self.version < ID3Header._V24: - bpi = int - else: - bpi = _determine_bpi(data, frames) - - while data: - header = data[:10] - try: - name, size, flags = unpack('>4sLH', header) - except struct.error: - return # not enough header - if name.strip(b'\x00') == b'': - return - - size = bpi(size) - framedata = data[10:10 + size] - data = data[10 + size:] - self._padding = len(data) - if size == 0: - continue # drop empty frames - - if PY3: - try: - name = name.decode('ascii') - except UnicodeDecodeError: - continue - - try: - # someone writes 2.3 frames with 2.2 names - if name[-1] == "\x00": - tag = Frames_2_2[name[:-1]] - name = tag.__base__.__name__ - - tag = frames[name] - except KeyError: - if is_valid_frame_id(name): - yield header + framedata - else: - try: - yield tag._fromData(self._header, flags, framedata) - except NotImplementedError: - yield header + framedata - except ID3JunkFrameError: - pass - elif self.version >= ID3Header._V22: - while data: - header = data[0:6] - try: - name, size = unpack('>3s3s', header) - except struct.error: - return # not enough header - size, = struct.unpack('>L', b'\x00' + size) - if name.strip(b'\x00') == b'': - return - - framedata = data[6:6 + size] - data = data[6 + size:] - self._padding = len(data) - if size == 0: - continue # drop empty frames - - if PY3: - try: - name = name.decode('ascii') - except UnicodeDecodeError: - continue - - try: - tag = frames[name] - except KeyError: - if is_valid_frame_id(name): - yield header + framedata - else: - try: - yield tag._fromData(self._header, 0, framedata) - except (ID3EncryptionUnsupportedError, - NotImplementedError): - yield header + framedata - except ID3JunkFrameError: - pass - - def _prepare_data(self, fileobj, start, available, v2_version, v23_sep, - pad_func): - if v2_version == 3: - version = ID3Header._V23 - elif v2_version == 4: - version = ID3Header._V24 - else: - raise ValueError("Only 3 or 4 allowed for v2_version") - - # Sort frames by 'importance' - order = ["TIT2", "TPE1", "TRCK", "TALB", "TPOS", "TDRC", "TCON"] - order = dict((b, a) for a, b in enumerate(order)) - last = len(order) - frames = sorted(self.items(), - key=lambda a: (order.get(a[0][:4], last), a[0])) - - framedata = [self.__save_frame(frame, version=version, v23_sep=v23_sep) - for (key, frame) in frames] - - # only write unknown frames if they were loaded from the version - # we are saving with or upgraded to it - if self.__unknown_version == version[:2]: - framedata.extend(data for data in self.unknown_frames - if len(data) > 10) - - needed = sum(map(len, framedata)) + 10 - - fileobj.seek(0, 2) - trailing_size = fileobj.tell() - start - - info = PaddingInfo(available - needed, trailing_size) - new_padding = info._get_padding(pad_func) - if new_padding < 0: - raise error("invalid padding") - new_size = needed + new_padding - - new_framesize = BitPaddedInt.to_str(new_size - 10, width=4) - header = pack('>3sBBB4s', b'ID3', v2_version, 0, 0, new_framesize) - - data = bytearray(header) - for frame in framedata: - data += frame - assert new_size >= len(data) - data += (new_size - len(data)) * b'\x00' - assert new_size == len(data) - - return data - - def save(self, filename=None, v1=1, v2_version=4, v23_sep='/', - padding=None): - """Save changes to a file. - - Args: - filename: - Filename to save the tag to. If no filename is given, - the one most recently loaded is used. - v1 (ID3v1SaveOptions): - if 0, ID3v1 tags will be removed. - if 1, ID3v1 tags will be updated but not added. - if 2, ID3v1 tags will be created and/or updated - v2 (int): - version of ID3v2 tags (3 or 4). - v23_sep (str): - the separator used to join multiple text values - if v2_version == 3. Defaults to '/' but if it's None - will be the ID3v2v2.4 null separator. - padding (function): - A function taking a PaddingInfo which should - return the amount of padding to use. If None (default) - will default to something reasonable. - - By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 - tags, you must call method update_to_v23 before saving the file. - - The lack of a way to update only an ID3v1 tag is intentional. - - Can raise id3.error. - """ - - if filename is None: - filename = self.filename - - try: - f = open(filename, 'rb+') - except IOError as err: - from errno import ENOENT - if err.errno != ENOENT: - raise - f = open(filename, 'ab') # create, then reopen - f = open(filename, 'rb+') - - try: - try: - header = ID3Header(f) - except ID3NoHeaderError: - old_size = 0 - else: - old_size = header.size - - data = self._prepare_data( - f, 0, old_size, v2_version, v23_sep, padding) - new_size = len(data) - - if (old_size < new_size): - insert_bytes(f, new_size - old_size, old_size) - elif (old_size > new_size): - delete_bytes(f, old_size - new_size, new_size) - f.seek(0) - f.write(data) - - self.__save_v1(f, v1) - - finally: - f.close() - - def __save_v1(self, f, v1): - tag, offset = _find_id3v1(f) - has_v1 = tag is not None - - f.seek(offset, 2) - if v1 == ID3v1SaveOptions.UPDATE and has_v1 or \ - v1 == ID3v1SaveOptions.CREATE: - f.write(MakeID3v1(self)) - else: - f.truncate() - - def delete(self, filename=None, delete_v1=True, delete_v2=True): - """Remove tags from a file. - - If no filename is given, the one most recently loaded is used. - - Keyword arguments: - - * delete_v1 -- delete any ID3v1 tag - * delete_v2 -- delete any ID3v2 tag - """ - if filename is None: - filename = self.filename - delete(filename, delete_v1, delete_v2) - self.clear() - - def __save_frame(self, frame, name=None, version=ID3Header._V24, - v23_sep=None): - flags = 0 - if isinstance(frame, TextFrame): - if len(str(frame)) == 0: - return b'' - - if version == ID3Header._V23: - framev23 = frame._get_v23_frame(sep=v23_sep) - framedata = framev23._writeData() - else: - framedata = frame._writeData() - - usize = len(framedata) - if usize > 2048: - # Disabled as this causes iTunes and other programs - # to fail to find these frames, which usually includes - # e.g. APIC. - # framedata = BitPaddedInt.to_str(usize) + framedata.encode('zlib') - # flags |= Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN - pass - - if version == ID3Header._V24: - bits = 7 - elif version == ID3Header._V23: - bits = 8 - else: - raise ValueError - - datasize = BitPaddedInt.to_str(len(framedata), width=4, bits=bits) - - if name is not None: - assert isinstance(name, bytes) - frame_name = name - else: - frame_name = type(frame).__name__ - if PY3: - frame_name = frame_name.encode("ascii") - - header = pack('>4s4sH', frame_name, datasize, flags) - return header + framedata - - def __update_common(self): - """Updates done by both v23 and v24 update""" - - if "TCON" in self: - # Get rid of "(xx)Foobr" format. - self["TCON"].genres = self["TCON"].genres - - # ID3v2.2 LNK frames are just way too different to upgrade. - for frame in self.getall("LINK"): - if len(frame.frameid) != 4: - del self[frame.HashKey] - - mimes = {"PNG": "image/png", "JPG": "image/jpeg"} - for pic in self.getall("APIC"): - if pic.mime in mimes: - newpic = APIC( - encoding=pic.encoding, mime=mimes[pic.mime], - type=pic.type, desc=pic.desc, data=pic.data) - self.add(newpic) - - def update_to_v24(self): - """Convert older tags into an ID3v2.4 tag. - - This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to - TDRC). If you intend to save tags, you must call this function - at some point; it is called by default when loading the tag. - """ - - self.__update_common() - - if self.__unknown_version == (2, 3): - # convert unknown 2.3 frames (flags/size) to 2.4 - converted = [] - for frame in self.unknown_frames: - try: - name, size, flags = unpack('>4sLH', frame[:10]) - except struct.error: - continue - - try: - frame = BinaryFrame._fromData( - self._header, flags, frame[10:]) - except (error, NotImplementedError): - continue - - converted.append(self.__save_frame(frame, name=name)) - self.unknown_frames[:] = converted - self.__unknown_version = (2, 4) - - # TDAT, TYER, and TIME have been turned into TDRC. - try: - date = text_type(self.get("TYER", "")) - if date.strip(u"\x00"): - self.pop("TYER") - dat = text_type(self.get("TDAT", "")) - if dat.strip("\x00"): - self.pop("TDAT") - date = "%s-%s-%s" % (date, dat[2:], dat[:2]) - time = text_type(self.get("TIME", "")) - if time.strip("\x00"): - self.pop("TIME") - date += "T%s:%s:00" % (time[:2], time[2:]) - if "TDRC" not in self: - self.add(TDRC(encoding=0, text=date)) - except UnicodeDecodeError: - # Old ID3 tags have *lots* of Unicode problems, so if TYER - # is bad, just chuck the frames. - pass - - # TORY can be the first part of a TDOR. - if "TORY" in self: - f = self.pop("TORY") - if "TDOR" not in self: - try: - self.add(TDOR(encoding=0, text=str(f))) - except UnicodeDecodeError: - pass - - # IPLS is now TIPL. - if "IPLS" in self: - f = self.pop("IPLS") - if "TIPL" not in self: - self.add(TIPL(encoding=f.encoding, people=f.people)) - - # These can't be trivially translated to any ID3v2.4 tags, or - # should have been removed already. - for key in ["RVAD", "EQUA", "TRDA", "TSIZ", "TDAT", "TIME", "CRM"]: - if key in self: - del(self[key]) - - def update_to_v23(self): - """Convert older (and newer) tags into an ID3v2.3 tag. - - This updates incompatible ID3v2 frames to ID3v2.3 ones. If you - intend to save tags as ID3v2.3, you must call this function - at some point. - - If you want to to go off spec and include some v2.4 frames - in v2.3, remove them before calling this and add them back afterwards. - """ - - self.__update_common() - - # we could downgrade unknown v2.4 frames here, but given that - # the main reason to save v2.3 is compatibility and this - # might increase the chance of some parser breaking.. better not - - # TMCL, TIPL -> TIPL - if "TIPL" in self or "TMCL" in self: - people = [] - if "TIPL" in self: - f = self.pop("TIPL") - people.extend(f.people) - if "TMCL" in self: - f = self.pop("TMCL") - people.extend(f.people) - if "IPLS" not in self: - self.add(IPLS(encoding=f.encoding, people=people)) - - # TDOR -> TORY - if "TDOR" in self: - f = self.pop("TDOR") - if f.text: - d = f.text[0] - if d.year and "TORY" not in self: - self.add(TORY(encoding=f.encoding, text="%04d" % d.year)) - - # TDRC -> TYER, TDAT, TIME - if "TDRC" in self: - f = self.pop("TDRC") - if f.text: - d = f.text[0] - if d.year and "TYER" not in self: - self.add(TYER(encoding=f.encoding, text="%04d" % d.year)) - if d.month and d.day and "TDAT" not in self: - self.add(TDAT(encoding=f.encoding, - text="%02d%02d" % (d.day, d.month))) - if d.hour and d.minute and "TIME" not in self: - self.add(TIME(encoding=f.encoding, - text="%02d%02d" % (d.hour, d.minute))) - - # New frames added in v2.4 - v24_frames = [ - 'ASPI', 'EQU2', 'RVA2', 'SEEK', 'SIGN', 'TDEN', 'TDOR', - 'TDRC', 'TDRL', 'TDTG', 'TIPL', 'TMCL', 'TMOO', 'TPRO', - 'TSOA', 'TSOP', 'TSOT', 'TSST', - ] - - for key in v24_frames: - if key in self: - del(self[key]) - - -def delete(filename, delete_v1=True, delete_v2=True): - """Remove tags from a file. - - Keyword arguments: - - * delete_v1 -- delete any ID3v1 tag - * delete_v2 -- delete any ID3v2 tag - """ - - with open(filename, 'rb+') as f: - - if delete_v1: - tag, offset = _find_id3v1(f) - if tag is not None: - f.seek(offset, 2) - f.truncate() - - # technically an insize=0 tag is invalid, but we delete it anyway - # (primarily because we used to write it) - if delete_v2: - f.seek(0, 0) - idata = f.read(10) - try: - id3, vmaj, vrev, flags, insize = unpack('>3sBBB4s', idata) - except struct.error: - id3, insize = b'', -1 - insize = BitPaddedInt(insize) - if id3 == b'ID3' and insize >= 0: - delete_bytes(f, insize + 10, 0) - - -# support open(filename) as interface -Open = ID3 - - -def _determine_bpi(data, frames, EMPTY=b"\x00" * 10): - """Takes id3v2.4 frame data and determines if ints or bitpaddedints - should be used for parsing. Needed because iTunes used to write - normal ints for frame sizes. - """ - - # count number of tags found as BitPaddedInt and how far past - o = 0 - asbpi = 0 - while o < len(data) - 10: - part = data[o:o + 10] - if part == EMPTY: - bpioff = -((len(data) - o) % 10) - break - name, size, flags = unpack('>4sLH', part) - size = BitPaddedInt(size) - o += 10 + size - if PY3: - try: - name = name.decode("ascii") - except UnicodeDecodeError: - continue - if name in frames: - asbpi += 1 - else: - bpioff = o - len(data) - - # count number of tags found as int and how far past - o = 0 - asint = 0 - while o < len(data) - 10: - part = data[o:o + 10] - if part == EMPTY: - intoff = -((len(data) - o) % 10) - break - name, size, flags = unpack('>4sLH', part) - o += 10 + size - if PY3: - try: - name = name.decode("ascii") - except UnicodeDecodeError: - continue - if name in frames: - asint += 1 - else: - intoff = o - len(data) - - # if more tags as int, or equal and bpi is past and int is not - if asint > asbpi or (asint == asbpi and (bpioff >= 1 and intoff <= 1)): - return int - return BitPaddedInt - - -def _find_id3v1(fileobj): - """Returns a tuple of (id3tag, offset_to_end) or (None, 0) - - offset mainly because we used to write too short tags in some cases and - we need the offset to delete them. - """ - - # id3v1 is always at the end (after apev2) - - extra_read = b"APETAGEX".index(b"TAG") - - try: - fileobj.seek(-128 - extra_read, 2) - except IOError as e: - if e.errno == errno.EINVAL: - # If the file is too small, might be ok since we wrote too small - # tags at some point. let's see how the parsing goes.. - fileobj.seek(0, 0) - else: - raise - - data = fileobj.read(128 + extra_read) - try: - idx = data.index(b"TAG") - except ValueError: - return (None, 0) - else: - # FIXME: make use of the apev2 parser here - # if TAG is part of APETAGEX assume this is an APEv2 tag - try: - ape_idx = data.index(b"APETAGEX") - except ValueError: - pass - else: - if idx == ape_idx + extra_read: - return (None, 0) - - tag = ParseID3v1(data[idx:]) - if tag is None: - return (None, 0) - - offset = idx - len(data) - return (tag, offset) - - -# ID3v1.1 support. -def ParseID3v1(data): - """Parse an ID3v1 tag, returning a list of ID3v2.4 frames. - - Returns a {frame_name: frame} dict or None. - """ - - try: - data = data[data.index(b"TAG"):] - except ValueError: - return None - if 128 < len(data) or len(data) < 124: - return None - - # Issue #69 - Previous versions of Mutagen, when encountering - # out-of-spec TDRC and TYER frames of less than four characters, - # wrote only the characters available - e.g. "1" or "" - into the - # year field. To parse those, reduce the size of the year field. - # Amazingly, "0s" works as a struct format string. - unpack_fmt = "3s30s30s30s%ds29sBB" % (len(data) - 124) - - try: - tag, title, artist, album, year, comment, track, genre = unpack( - unpack_fmt, data) - except StructError: - return None - - if tag != b"TAG": - return None - - def fix(data): - return data.split(b"\x00")[0].strip().decode('latin1') - - title, artist, album, year, comment = map( - fix, [title, artist, album, year, comment]) - - frames = {} - if title: - frames["TIT2"] = TIT2(encoding=0, text=title) - if artist: - frames["TPE1"] = TPE1(encoding=0, text=[artist]) - if album: - frames["TALB"] = TALB(encoding=0, text=album) - if year: - frames["TDRC"] = TDRC(encoding=0, text=year) - if comment: - frames["COMM"] = COMM( - encoding=0, lang="eng", desc="ID3v1 Comment", text=comment) - # Don't read a track number if it looks like the comment was - # padded with spaces instead of nulls (thanks, WinAmp). - if track and ((track != 32) or (data[-3] == b'\x00'[0])): - frames["TRCK"] = TRCK(encoding=0, text=str(track)) - if genre != 255: - frames["TCON"] = TCON(encoding=0, text=str(genre)) - return frames - - -def MakeID3v1(id3): - """Return an ID3v1.1 tag string from a dict of ID3v2.4 frames.""" - - v1 = {} - - for v2id, name in {"TIT2": "title", "TPE1": "artist", - "TALB": "album"}.items(): - if v2id in id3: - text = id3[v2id].text[0].encode('latin1', 'replace')[:30] - else: - text = b"" - v1[name] = text + (b"\x00" * (30 - len(text))) - - if "COMM" in id3: - cmnt = id3["COMM"].text[0].encode('latin1', 'replace')[:28] - else: - cmnt = b"" - v1["comment"] = cmnt + (b"\x00" * (29 - len(cmnt))) - - if "TRCK" in id3: - try: - v1["track"] = chr_(+id3["TRCK"]) - except ValueError: - v1["track"] = b"\x00" - else: - v1["track"] = b"\x00" - - if "TCON" in id3: - try: - genre = id3["TCON"].genres[0] - except IndexError: - pass - else: - if genre in TCON.GENRES: - v1["genre"] = chr_(TCON.GENRES.index(genre)) - if "genre" not in v1: - v1["genre"] = b"\xff" - - if "TDRC" in id3: - year = text_type(id3["TDRC"]).encode('ascii') - elif "TYER" in id3: - year = text_type(id3["TYER"]).encode('ascii') - else: - year = b"" - v1["year"] = (year + b"\x00\x00\x00\x00")[:4] - - return ( - b"TAG" + - v1["title"] + - v1["artist"] + - v1["album"] + - v1["year"] + - v1["comment"] + - v1["track"] + - v1["genre"] - ) - - -class ID3FileType(mutagen.FileType): - """An unknown type of file with ID3 tags.""" - - ID3 = ID3 - - class _Info(mutagen.StreamInfo): - length = 0 - - def __init__(self, fileobj, offset): - pass - - @staticmethod - def pprint(): - return "Unknown format with ID3 tag" - - @staticmethod - def score(filename, fileobj, header_data): - return header_data.startswith(b"ID3") - - def add_tags(self, ID3=None): - """Add an empty ID3 tag to the file. - - A custom tag reader may be used in instead of the default - mutagen.id3.ID3 object, e.g. an EasyID3 reader. - """ - if ID3 is None: - ID3 = self.ID3 - if self.tags is None: - self.ID3 = ID3 - self.tags = ID3() - else: - raise error("an ID3 tag already exists") - - def load(self, filename, ID3=None, **kwargs): - """Load stream and tag information from a file. - - A custom tag reader may be used in instead of the default - mutagen.id3.ID3 object, e.g. an EasyID3 reader. - """ - - if ID3 is None: - ID3 = self.ID3 - else: - # If this was initialized with EasyID3, remember that for - # when tags are auto-instantiated in add_tags. - self.ID3 = ID3 - self.filename = filename - try: - self.tags = ID3(filename, **kwargs) - except ID3NoHeaderError: - self.tags = None - - if self.tags is not None: - try: - offset = self.tags.size - except AttributeError: - offset = None - else: - offset = None - - with open(filename, "rb") as fileobj: - self.info = self._Info(fileobj, offset) diff --git a/resources/lib/libraries/mutagen/id3/__pycache__/__init__.cpython-35.pyc b/resources/lib/libraries/mutagen/id3/__pycache__/__init__.cpython-35.pyc deleted file mode 100644 index f423db1a..00000000 Binary files a/resources/lib/libraries/mutagen/id3/__pycache__/__init__.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/id3/__pycache__/_frames.cpython-35.pyc b/resources/lib/libraries/mutagen/id3/__pycache__/_frames.cpython-35.pyc deleted file mode 100644 index 0331df59..00000000 Binary files a/resources/lib/libraries/mutagen/id3/__pycache__/_frames.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/id3/__pycache__/_specs.cpython-35.pyc b/resources/lib/libraries/mutagen/id3/__pycache__/_specs.cpython-35.pyc deleted file mode 100644 index 9af16aa1..00000000 Binary files a/resources/lib/libraries/mutagen/id3/__pycache__/_specs.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/id3/__pycache__/_util.cpython-35.pyc b/resources/lib/libraries/mutagen/id3/__pycache__/_util.cpython-35.pyc deleted file mode 100644 index c2c75dad..00000000 Binary files a/resources/lib/libraries/mutagen/id3/__pycache__/_util.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/id3/_frames.py b/resources/lib/libraries/mutagen/id3/_frames.py deleted file mode 100644 index c185cef3..00000000 --- a/resources/lib/libraries/mutagen/id3/_frames.py +++ /dev/null @@ -1,1925 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Michael Urman -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -import zlib -from struct import unpack - -from ._util import ID3JunkFrameError, ID3EncryptionUnsupportedError, unsynch -from ._specs import ( - BinaryDataSpec, StringSpec, Latin1TextSpec, EncodedTextSpec, ByteSpec, - EncodingSpec, ASPIIndexSpec, SizedIntegerSpec, IntegerSpec, - VolumeAdjustmentsSpec, VolumePeakSpec, VolumeAdjustmentSpec, - ChannelSpec, MultiSpec, SynchronizedTextSpec, KeyEventSpec, TimeStampSpec, - EncodedNumericPartTextSpec, EncodedNumericTextSpec, SpecError) -from .._compat import text_type, string_types, swap_to_string, iteritems, izip - - -def is_valid_frame_id(frame_id): - return frame_id.isalnum() and frame_id.isupper() - - -def _bytes2key(b): - assert isinstance(b, bytes) - - return b.decode("latin1") - - -class Frame(object): - """Fundamental unit of ID3 data. - - ID3 tags are split into frames. Each frame has a potentially - different structure, and so this base class is not very featureful. - """ - - FLAG23_ALTERTAG = 0x8000 - FLAG23_ALTERFILE = 0x4000 - FLAG23_READONLY = 0x2000 - FLAG23_COMPRESS = 0x0080 - FLAG23_ENCRYPT = 0x0040 - FLAG23_GROUP = 0x0020 - - FLAG24_ALTERTAG = 0x4000 - FLAG24_ALTERFILE = 0x2000 - FLAG24_READONLY = 0x1000 - FLAG24_GROUPID = 0x0040 - FLAG24_COMPRESS = 0x0008 - FLAG24_ENCRYPT = 0x0004 - FLAG24_UNSYNCH = 0x0002 - FLAG24_DATALEN = 0x0001 - - _framespec = [] - - def __init__(self, *args, **kwargs): - if len(args) == 1 and len(kwargs) == 0 and \ - isinstance(args[0], type(self)): - other = args[0] - # ask the sub class to fill in our data - other._to_other(self) - else: - for checker, val in izip(self._framespec, args): - setattr(self, checker.name, checker.validate(self, val)) - for checker in self._framespec[len(args):]: - try: - validated = checker.validate( - self, kwargs.get(checker.name, None)) - except ValueError as e: - raise ValueError("%s: %s" % (checker.name, e)) - setattr(self, checker.name, validated) - - def _to_other(self, other): - # this impl covers subclasses with the same framespec - if other._framespec is not self._framespec: - raise ValueError - - for checker in other._framespec: - setattr(other, checker.name, getattr(self, checker.name)) - - def _get_v23_frame(self, **kwargs): - """Returns a frame copy which is suitable for writing into a v2.3 tag. - - kwargs get passed to the specs. - """ - - new_kwargs = {} - for checker in self._framespec: - name = checker.name - value = getattr(self, name) - new_kwargs[name] = checker._validate23(self, value, **kwargs) - return type(self)(**new_kwargs) - - @property - def HashKey(self): - """An internal key used to ensure frame uniqueness in a tag""" - - return self.FrameID - - @property - def FrameID(self): - """ID3v2 three or four character frame ID""" - - return type(self).__name__ - - def __repr__(self): - """Python representation of a frame. - - The string returned is a valid Python expression to construct - a copy of this frame. - """ - kw = [] - for attr in self._framespec: - # so repr works during __init__ - if hasattr(self, attr.name): - kw.append('%s=%r' % (attr.name, getattr(self, attr.name))) - return '%s(%s)' % (type(self).__name__, ', '.join(kw)) - - def _readData(self, data): - """Raises ID3JunkFrameError; Returns leftover data""" - - for reader in self._framespec: - if len(data): - try: - value, data = reader.read(self, data) - except SpecError as e: - raise ID3JunkFrameError(e) - else: - raise ID3JunkFrameError("no data left") - setattr(self, reader.name, value) - - return data - - def _writeData(self): - data = [] - for writer in self._framespec: - data.append(writer.write(self, getattr(self, writer.name))) - return b''.join(data) - - def pprint(self): - """Return a human-readable representation of the frame.""" - return "%s=%s" % (type(self).__name__, self._pprint()) - - def _pprint(self): - return "[unrepresentable data]" - - @classmethod - def _fromData(cls, id3, tflags, data): - """Construct this ID3 frame from raw string data. - - Raises: - - ID3JunkFrameError in case parsing failed - NotImplementedError in case parsing isn't implemented - ID3EncryptionUnsupportedError in case the frame is encrypted. - """ - - if id3.version >= id3._V24: - if tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN): - # The data length int is syncsafe in 2.4 (but not 2.3). - # However, we don't actually need the data length int, - # except to work around a QL 0.12 bug, and in that case - # all we need are the raw bytes. - datalen_bytes = data[:4] - data = data[4:] - if tflags & Frame.FLAG24_UNSYNCH or id3.f_unsynch: - try: - data = unsynch.decode(data) - except ValueError: - # Some things write synch-unsafe data with either the frame - # or global unsynch flag set. Try to load them as is. - # https://bitbucket.org/lazka/mutagen/issue/210 - # https://bitbucket.org/lazka/mutagen/issue/223 - pass - if tflags & Frame.FLAG24_ENCRYPT: - raise ID3EncryptionUnsupportedError - if tflags & Frame.FLAG24_COMPRESS: - try: - data = zlib.decompress(data) - except zlib.error as err: - # the initial mutagen that went out with QL 0.12 did not - # write the 4 bytes of uncompressed size. Compensate. - data = datalen_bytes + data - try: - data = zlib.decompress(data) - except zlib.error as err: - raise ID3JunkFrameError( - 'zlib: %s: %r' % (err, data)) - - elif id3.version >= id3._V23: - if tflags & Frame.FLAG23_COMPRESS: - usize, = unpack('>L', data[:4]) - data = data[4:] - if tflags & Frame.FLAG23_ENCRYPT: - raise ID3EncryptionUnsupportedError - if tflags & Frame.FLAG23_COMPRESS: - try: - data = zlib.decompress(data) - except zlib.error as err: - raise ID3JunkFrameError('zlib: %s: %r' % (err, data)) - - frame = cls() - frame._readData(data) - return frame - - def __hash__(self): - raise TypeError("Frame objects are unhashable") - - -class FrameOpt(Frame): - """A frame with optional parts. - - Some ID3 frames have optional data; this class extends Frame to - provide support for those parts. - """ - - _optionalspec = [] - - def __init__(self, *args, **kwargs): - super(FrameOpt, self).__init__(*args, **kwargs) - for spec in self._optionalspec: - if spec.name in kwargs: - validated = spec.validate(self, kwargs[spec.name]) - setattr(self, spec.name, validated) - else: - break - - def _to_other(self, other): - super(FrameOpt, self)._to_other(other) - - # this impl covers subclasses with the same optionalspec - if other._optionalspec is not self._optionalspec: - raise ValueError - - for checker in other._optionalspec: - if hasattr(self, checker.name): - setattr(other, checker.name, getattr(self, checker.name)) - - def _readData(self, data): - """Raises ID3JunkFrameError; Returns leftover data""" - - for reader in self._framespec: - if len(data): - try: - value, data = reader.read(self, data) - except SpecError as e: - raise ID3JunkFrameError(e) - else: - raise ID3JunkFrameError("no data left") - setattr(self, reader.name, value) - - if data: - for reader in self._optionalspec: - if len(data): - try: - value, data = reader.read(self, data) - except SpecError as e: - raise ID3JunkFrameError(e) - else: - break - setattr(self, reader.name, value) - - return data - - def _writeData(self): - data = [] - for writer in self._framespec: - data.append(writer.write(self, getattr(self, writer.name))) - for writer in self._optionalspec: - try: - data.append(writer.write(self, getattr(self, writer.name))) - except AttributeError: - break - return b''.join(data) - - def __repr__(self): - kw = [] - for attr in self._framespec: - kw.append('%s=%r' % (attr.name, getattr(self, attr.name))) - for attr in self._optionalspec: - if hasattr(self, attr.name): - kw.append('%s=%r' % (attr.name, getattr(self, attr.name))) - return '%s(%s)' % (type(self).__name__, ', '.join(kw)) - - -@swap_to_string -class TextFrame(Frame): - """Text strings. - - Text frames support casts to unicode or str objects, as well as - list-like indexing, extend, and append. - - Iterating over a TextFrame iterates over its strings, not its - characters. - - Text frames have a 'text' attribute which is the list of strings, - and an 'encoding' attribute; 0 for ISO-8859 1, 1 UTF-16, 2 for - UTF-16BE, and 3 for UTF-8. If you don't want to worry about - encodings, just set it to 3. - """ - - _framespec = [ - EncodingSpec('encoding'), - MultiSpec('text', EncodedTextSpec('text'), sep=u'\u0000'), - ] - - def __bytes__(self): - return text_type(self).encode('utf-8') - - def __str__(self): - return u'\u0000'.join(self.text) - - def __eq__(self, other): - if isinstance(other, bytes): - return bytes(self) == other - elif isinstance(other, text_type): - return text_type(self) == other - return self.text == other - - __hash__ = Frame.__hash__ - - def __getitem__(self, item): - return self.text[item] - - def __iter__(self): - return iter(self.text) - - def append(self, value): - """Append a string.""" - - return self.text.append(value) - - def extend(self, value): - """Extend the list by appending all strings from the given list.""" - - return self.text.extend(value) - - def _pprint(self): - return " / ".join(self.text) - - -class NumericTextFrame(TextFrame): - """Numerical text strings. - - The numeric value of these frames can be gotten with unary plus, e.g.:: - - frame = TLEN('12345') - length = +frame - """ - - _framespec = [ - EncodingSpec('encoding'), - MultiSpec('text', EncodedNumericTextSpec('text'), sep=u'\u0000'), - ] - - def __pos__(self): - """Return the numerical value of the string.""" - return int(self.text[0]) - - -class NumericPartTextFrame(TextFrame): - """Multivalue numerical text strings. - - These strings indicate 'part (e.g. track) X of Y', and unary plus - returns the first value:: - - frame = TRCK('4/15') - track = +frame # track == 4 - """ - - _framespec = [ - EncodingSpec('encoding'), - MultiSpec('text', EncodedNumericPartTextSpec('text'), sep=u'\u0000'), - ] - - def __pos__(self): - return int(self.text[0].split("/")[0]) - - -@swap_to_string -class TimeStampTextFrame(TextFrame): - """A list of time stamps. - - The 'text' attribute in this frame is a list of ID3TimeStamp - objects, not a list of strings. - """ - - _framespec = [ - EncodingSpec('encoding'), - MultiSpec('text', TimeStampSpec('stamp'), sep=u','), - ] - - def __bytes__(self): - return text_type(self).encode('utf-8') - - def __str__(self): - return u','.join([stamp.text for stamp in self.text]) - - def _pprint(self): - return u" / ".join([stamp.text for stamp in self.text]) - - -@swap_to_string -class UrlFrame(Frame): - """A frame containing a URL string. - - The ID3 specification is silent about IRIs and normalized URL - forms. Mutagen assumes all URLs in files are encoded as Latin 1, - but string conversion of this frame returns a UTF-8 representation - for compatibility with other string conversions. - - The only sane way to handle URLs in MP3s is to restrict them to - ASCII. - """ - - _framespec = [Latin1TextSpec('url')] - - def __bytes__(self): - return self.url.encode('utf-8') - - def __str__(self): - return self.url - - def __eq__(self, other): - return self.url == other - - __hash__ = Frame.__hash__ - - def _pprint(self): - return self.url - - -class UrlFrameU(UrlFrame): - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.url) - - -class TALB(TextFrame): - "Album" - - -class TBPM(NumericTextFrame): - "Beats per minute" - - -class TCOM(TextFrame): - "Composer" - - -class TCON(TextFrame): - """Content type (Genre) - - ID3 has several ways genres can be represented; for convenience, - use the 'genres' property rather than the 'text' attribute. - """ - - from mutagen._constants import GENRES - GENRES = GENRES - - def __get_genres(self): - genres = [] - import re - genre_re = re.compile(r"((?:\((?P<id>[0-9]+|RX|CR)\))*)(?P<str>.+)?") - for value in self.text: - # 255 possible entries in id3v1 - if value.isdigit() and int(value) < 256: - try: - genres.append(self.GENRES[int(value)]) - except IndexError: - genres.append(u"Unknown") - elif value == "CR": - genres.append(u"Cover") - elif value == "RX": - genres.append(u"Remix") - elif value: - newgenres = [] - genreid, dummy, genrename = genre_re.match(value).groups() - - if genreid: - for gid in genreid[1:-1].split(")("): - if gid.isdigit() and int(gid) < len(self.GENRES): - gid = text_type(self.GENRES[int(gid)]) - newgenres.append(gid) - elif gid == "CR": - newgenres.append(u"Cover") - elif gid == "RX": - newgenres.append(u"Remix") - else: - newgenres.append(u"Unknown") - - if genrename: - # "Unescaping" the first parenthesis - if genrename.startswith("(("): - genrename = genrename[1:] - if genrename not in newgenres: - newgenres.append(genrename) - - genres.extend(newgenres) - - return genres - - def __set_genres(self, genres): - if isinstance(genres, string_types): - genres = [genres] - self.text = [self.__decode(g) for g in genres] - - def __decode(self, value): - if isinstance(value, bytes): - enc = EncodedTextSpec._encodings[self.encoding][0] - return value.decode(enc) - else: - return value - - genres = property(__get_genres, __set_genres, None, - "A list of genres parsed from the raw text data.") - - def _pprint(self): - return " / ".join(self.genres) - - -class TCOP(TextFrame): - "Copyright (c)" - - -class TCMP(NumericTextFrame): - "iTunes Compilation Flag" - - -class TDAT(TextFrame): - "Date of recording (DDMM)" - - -class TDEN(TimeStampTextFrame): - "Encoding Time" - - -class TDES(TextFrame): - "iTunes Podcast Description" - - -class TDOR(TimeStampTextFrame): - "Original Release Time" - - -class TDLY(NumericTextFrame): - "Audio Delay (ms)" - - -class TDRC(TimeStampTextFrame): - "Recording Time" - - -class TDRL(TimeStampTextFrame): - "Release Time" - - -class TDTG(TimeStampTextFrame): - "Tagging Time" - - -class TENC(TextFrame): - "Encoder" - - -class TEXT(TextFrame): - "Lyricist" - - -class TFLT(TextFrame): - "File type" - - -class TGID(TextFrame): - "iTunes Podcast Identifier" - - -class TIME(TextFrame): - "Time of recording (HHMM)" - - -class TIT1(TextFrame): - "Content group description" - - -class TIT2(TextFrame): - "Title" - - -class TIT3(TextFrame): - "Subtitle/Description refinement" - - -class TKEY(TextFrame): - "Starting Key" - - -class TLAN(TextFrame): - "Audio Languages" - - -class TLEN(NumericTextFrame): - "Audio Length (ms)" - - -class TMED(TextFrame): - "Source Media Type" - - -class TMOO(TextFrame): - "Mood" - - -class TOAL(TextFrame): - "Original Album" - - -class TOFN(TextFrame): - "Original Filename" - - -class TOLY(TextFrame): - "Original Lyricist" - - -class TOPE(TextFrame): - "Original Artist/Performer" - - -class TORY(NumericTextFrame): - "Original Release Year" - - -class TOWN(TextFrame): - "Owner/Licensee" - - -class TPE1(TextFrame): - "Lead Artist/Performer/Soloist/Group" - - -class TPE2(TextFrame): - "Band/Orchestra/Accompaniment" - - -class TPE3(TextFrame): - "Conductor" - - -class TPE4(TextFrame): - "Interpreter/Remixer/Modifier" - - -class TPOS(NumericPartTextFrame): - "Part of set" - - -class TPRO(TextFrame): - "Produced (P)" - - -class TPUB(TextFrame): - "Publisher" - - -class TRCK(NumericPartTextFrame): - "Track Number" - - -class TRDA(TextFrame): - "Recording Dates" - - -class TRSN(TextFrame): - "Internet Radio Station Name" - - -class TRSO(TextFrame): - "Internet Radio Station Owner" - - -class TSIZ(NumericTextFrame): - "Size of audio data (bytes)" - - -class TSO2(TextFrame): - "iTunes Album Artist Sort" - - -class TSOA(TextFrame): - "Album Sort Order key" - - -class TSOC(TextFrame): - "iTunes Composer Sort" - - -class TSOP(TextFrame): - "Perfomer Sort Order key" - - -class TSOT(TextFrame): - "Title Sort Order key" - - -class TSRC(TextFrame): - "International Standard Recording Code (ISRC)" - - -class TSSE(TextFrame): - "Encoder settings" - - -class TSST(TextFrame): - "Set Subtitle" - - -class TYER(NumericTextFrame): - "Year of recording" - - -class TXXX(TextFrame): - """User-defined text data. - - TXXX frames have a 'desc' attribute which is set to any Unicode - value (though the encoding of the text and the description must be - the same). Many taggers use this frame to store freeform keys. - """ - - _framespec = [ - EncodingSpec('encoding'), - EncodedTextSpec('desc'), - MultiSpec('text', EncodedTextSpec('text'), sep=u'\u0000'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - def _pprint(self): - return "%s=%s" % (self.desc, " / ".join(self.text)) - - -class WCOM(UrlFrameU): - "Commercial Information" - - -class WCOP(UrlFrame): - "Copyright Information" - - -class WFED(UrlFrame): - "iTunes Podcast Feed" - - -class WOAF(UrlFrame): - "Official File Information" - - -class WOAR(UrlFrameU): - "Official Artist/Performer Information" - - -class WOAS(UrlFrame): - "Official Source Information" - - -class WORS(UrlFrame): - "Official Internet Radio Information" - - -class WPAY(UrlFrame): - "Payment Information" - - -class WPUB(UrlFrame): - "Official Publisher Information" - - -class WXXX(UrlFrame): - """User-defined URL data. - - Like TXXX, this has a freeform description associated with it. - """ - - _framespec = [ - EncodingSpec('encoding'), - EncodedTextSpec('desc'), - Latin1TextSpec('url'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - -class PairedTextFrame(Frame): - """Paired text strings. - - Some ID3 frames pair text strings, to associate names with a more - specific involvement in the song. The 'people' attribute of these - frames contains a list of pairs:: - - [['trumpet', 'Miles Davis'], ['bass', 'Paul Chambers']] - - Like text frames, these frames also have an encoding attribute. - """ - - _framespec = [ - EncodingSpec('encoding'), - MultiSpec('people', - EncodedTextSpec('involvement'), - EncodedTextSpec('person')) - ] - - def __eq__(self, other): - return self.people == other - - __hash__ = Frame.__hash__ - - -class TIPL(PairedTextFrame): - "Involved People List" - - -class TMCL(PairedTextFrame): - "Musicians Credits List" - - -class IPLS(TIPL): - "Involved People List" - - -class BinaryFrame(Frame): - """Binary data - - The 'data' attribute contains the raw byte string. - """ - - _framespec = [BinaryDataSpec('data')] - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - -class MCDI(BinaryFrame): - "Binary dump of CD's TOC" - - -class ETCO(Frame): - """Event timing codes.""" - - _framespec = [ - ByteSpec("format"), - KeyEventSpec("events"), - ] - - def __eq__(self, other): - return self.events == other - - __hash__ = Frame.__hash__ - - -class MLLT(Frame): - """MPEG location lookup table. - - This frame's attributes may be changed in the future based on - feedback from real-world use. - """ - - _framespec = [ - SizedIntegerSpec('frames', 2), - SizedIntegerSpec('bytes', 3), - SizedIntegerSpec('milliseconds', 3), - ByteSpec('bits_for_bytes'), - ByteSpec('bits_for_milliseconds'), - BinaryDataSpec('data'), - ] - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - -class SYTC(Frame): - """Synchronised tempo codes. - - This frame's attributes may be changed in the future based on - feedback from real-world use. - """ - - _framespec = [ - ByteSpec("format"), - BinaryDataSpec("data"), - ] - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - -@swap_to_string -class USLT(Frame): - """Unsynchronised lyrics/text transcription. - - Lyrics have a three letter ISO language code ('lang'), a - description ('desc'), and a block of plain text ('text'). - """ - - _framespec = [ - EncodingSpec('encoding'), - StringSpec('lang', 3), - EncodedTextSpec('desc'), - EncodedTextSpec('text'), - ] - - @property - def HashKey(self): - return '%s:%s:%s' % (self.FrameID, self.desc, self.lang) - - def __bytes__(self): - return self.text.encode('utf-8') - - def __str__(self): - return self.text - - def __eq__(self, other): - return self.text == other - - __hash__ = Frame.__hash__ - - -@swap_to_string -class SYLT(Frame): - """Synchronised lyrics/text.""" - - _framespec = [ - EncodingSpec('encoding'), - StringSpec('lang', 3), - ByteSpec('format'), - ByteSpec('type'), - EncodedTextSpec('desc'), - SynchronizedTextSpec('text'), - ] - - @property - def HashKey(self): - return '%s:%s:%s' % (self.FrameID, self.desc, self.lang) - - def __eq__(self, other): - return str(self) == other - - __hash__ = Frame.__hash__ - - def __str__(self): - return u"".join(text for (text, time) in self.text) - - def __bytes__(self): - return text_type(self).encode("utf-8") - - -class COMM(TextFrame): - """User comment. - - User comment frames have a descrption, like TXXX, and also a three - letter ISO language code in the 'lang' attribute. - """ - - _framespec = [ - EncodingSpec('encoding'), - StringSpec('lang', 3), - EncodedTextSpec('desc'), - MultiSpec('text', EncodedTextSpec('text'), sep=u'\u0000'), - ] - - @property - def HashKey(self): - return '%s:%s:%s' % (self.FrameID, self.desc, self.lang) - - def _pprint(self): - return "%s=%s=%s" % (self.desc, self.lang, " / ".join(self.text)) - - -class RVA2(Frame): - """Relative volume adjustment (2). - - This frame is used to implemented volume scaling, and in - particular, normalization using ReplayGain. - - Attributes: - - * desc -- description or context of this adjustment - * channel -- audio channel to adjust (master is 1) - * gain -- a + or - dB gain relative to some reference level - * peak -- peak of the audio as a floating point number, [0, 1] - - When storing ReplayGain tags, use descriptions of 'album' and - 'track' on channel 1. - """ - - _framespec = [ - Latin1TextSpec('desc'), - ChannelSpec('channel'), - VolumeAdjustmentSpec('gain'), - VolumePeakSpec('peak'), - ] - - _channels = ["Other", "Master volume", "Front right", "Front left", - "Back right", "Back left", "Front centre", "Back centre", - "Subwoofer"] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - def __eq__(self, other): - try: - return ((str(self) == other) or - (self.desc == other.desc and - self.channel == other.channel and - self.gain == other.gain and - self.peak == other.peak)) - except AttributeError: - return False - - __hash__ = Frame.__hash__ - - def __str__(self): - return "%s: %+0.4f dB/%0.4f" % ( - self._channels[self.channel], self.gain, self.peak) - - -class EQU2(Frame): - """Equalisation (2). - - Attributes: - method -- interpolation method (0 = band, 1 = linear) - desc -- identifying description - adjustments -- list of (frequency, vol_adjustment) pairs - """ - - _framespec = [ - ByteSpec("method"), - Latin1TextSpec("desc"), - VolumeAdjustmentsSpec("adjustments"), - ] - - def __eq__(self, other): - return self.adjustments == other - - __hash__ = Frame.__hash__ - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - -# class RVAD: unsupported -# class EQUA: unsupported - - -class RVRB(Frame): - """Reverb.""" - - _framespec = [ - SizedIntegerSpec('left', 2), - SizedIntegerSpec('right', 2), - ByteSpec('bounce_left'), - ByteSpec('bounce_right'), - ByteSpec('feedback_ltl'), - ByteSpec('feedback_ltr'), - ByteSpec('feedback_rtr'), - ByteSpec('feedback_rtl'), - ByteSpec('premix_ltr'), - ByteSpec('premix_rtl'), - ] - - def __eq__(self, other): - return (self.left, self.right) == other - - __hash__ = Frame.__hash__ - - -class APIC(Frame): - """Attached (or linked) Picture. - - Attributes: - - * encoding -- text encoding for the description - * mime -- a MIME type (e.g. image/jpeg) or '-->' if the data is a URI - * type -- the source of the image (3 is the album front cover) - * desc -- a text description of the image - * data -- raw image data, as a byte string - - Mutagen will automatically compress large images when saving tags. - """ - - _framespec = [ - EncodingSpec('encoding'), - Latin1TextSpec('mime'), - ByteSpec('type'), - EncodedTextSpec('desc'), - BinaryDataSpec('data'), - ] - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - def _validate_from_22(self, other, checker): - if checker.name == "mime": - self.mime = other.mime.decode("ascii", "ignore") - else: - super(APIC, self)._validate_from_22(other, checker) - - def _pprint(self): - return "%s (%s, %d bytes)" % ( - self.desc, self.mime, len(self.data)) - - -class PCNT(Frame): - """Play counter. - - The 'count' attribute contains the (recorded) number of times this - file has been played. - - This frame is basically obsoleted by POPM. - """ - - _framespec = [IntegerSpec('count')] - - def __eq__(self, other): - return self.count == other - - __hash__ = Frame.__hash__ - - def __pos__(self): - return self.count - - def _pprint(self): - return text_type(self.count) - - -class POPM(FrameOpt): - """Popularimeter. - - This frame keys a rating (out of 255) and a play count to an email - address. - - Attributes: - - * email -- email this POPM frame is for - * rating -- rating from 0 to 255 - * count -- number of times the files has been played (optional) - """ - - _framespec = [ - Latin1TextSpec('email'), - ByteSpec('rating'), - ] - - _optionalspec = [IntegerSpec('count')] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.email) - - def __eq__(self, other): - return self.rating == other - - __hash__ = FrameOpt.__hash__ - - def __pos__(self): - return self.rating - - def _pprint(self): - return "%s=%r %r/255" % ( - self.email, getattr(self, 'count', None), self.rating) - - -class GEOB(Frame): - """General Encapsulated Object. - - A blob of binary data, that is not a picture (those go in APIC). - - Attributes: - - * encoding -- encoding of the description - * mime -- MIME type of the data or '-->' if the data is a URI - * filename -- suggested filename if extracted - * desc -- text description of the data - * data -- raw data, as a byte string - """ - - _framespec = [ - EncodingSpec('encoding'), - Latin1TextSpec('mime'), - EncodedTextSpec('filename'), - EncodedTextSpec('desc'), - BinaryDataSpec('data'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.desc) - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - -class RBUF(FrameOpt): - """Recommended buffer size. - - Attributes: - - * size -- recommended buffer size in bytes - * info -- if ID3 tags may be elsewhere in the file (optional) - * offset -- the location of the next ID3 tag, if any - - Mutagen will not find the next tag itself. - """ - - _framespec = [SizedIntegerSpec('size', 3)] - - _optionalspec = [ - ByteSpec('info'), - SizedIntegerSpec('offset', 4), - ] - - def __eq__(self, other): - return self.size == other - - __hash__ = FrameOpt.__hash__ - - def __pos__(self): - return self.size - - -@swap_to_string -class AENC(FrameOpt): - """Audio encryption. - - Attributes: - - * owner -- key identifying this encryption type - * preview_start -- unencrypted data block offset - * preview_length -- number of unencrypted blocks - * data -- data required for decryption (optional) - - Mutagen cannot decrypt files. - """ - - _framespec = [ - Latin1TextSpec('owner'), - SizedIntegerSpec('preview_start', 2), - SizedIntegerSpec('preview_length', 2), - ] - - _optionalspec = [BinaryDataSpec('data')] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.owner) - - def __bytes__(self): - return self.owner.encode('utf-8') - - def __str__(self): - return self.owner - - def __eq__(self, other): - return self.owner == other - - __hash__ = FrameOpt.__hash__ - - -class LINK(FrameOpt): - """Linked information. - - Attributes: - - * frameid -- the ID of the linked frame - * url -- the location of the linked frame - * data -- further ID information for the frame - """ - - _framespec = [ - StringSpec('frameid', 4), - Latin1TextSpec('url'), - ] - - _optionalspec = [BinaryDataSpec('data')] - - @property - def HashKey(self): - try: - return "%s:%s:%s:%s" % ( - self.FrameID, self.frameid, self.url, _bytes2key(self.data)) - except AttributeError: - return "%s:%s:%s" % (self.FrameID, self.frameid, self.url) - - def __eq__(self, other): - try: - return (self.frameid, self.url, self.data) == other - except AttributeError: - return (self.frameid, self.url) == other - - __hash__ = FrameOpt.__hash__ - - -class POSS(Frame): - """Position synchronisation frame - - Attribute: - - * format -- format of the position attribute (frames or milliseconds) - * position -- current position of the file - """ - - _framespec = [ - ByteSpec('format'), - IntegerSpec('position'), - ] - - def __pos__(self): - return self.position - - def __eq__(self, other): - return self.position == other - - __hash__ = Frame.__hash__ - - -class UFID(Frame): - """Unique file identifier. - - Attributes: - - * owner -- format/type of identifier - * data -- identifier - """ - - _framespec = [ - Latin1TextSpec('owner'), - BinaryDataSpec('data'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.owner) - - def __eq__(s, o): - if isinstance(o, UFI): - return s.owner == o.owner and s.data == o.data - else: - return s.data == o - - __hash__ = Frame.__hash__ - - def _pprint(self): - return "%s=%r" % (self.owner, self.data) - - -@swap_to_string -class USER(Frame): - """Terms of use. - - Attributes: - - * encoding -- text encoding - * lang -- ISO three letter language code - * text -- licensing terms for the audio - """ - - _framespec = [ - EncodingSpec('encoding'), - StringSpec('lang', 3), - EncodedTextSpec('text'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.lang) - - def __bytes__(self): - return self.text.encode('utf-8') - - def __str__(self): - return self.text - - def __eq__(self, other): - return self.text == other - - __hash__ = Frame.__hash__ - - def _pprint(self): - return "%r=%s" % (self.lang, self.text) - - -@swap_to_string -class OWNE(Frame): - """Ownership frame.""" - - _framespec = [ - EncodingSpec('encoding'), - Latin1TextSpec('price'), - StringSpec('date', 8), - EncodedTextSpec('seller'), - ] - - def __bytes__(self): - return self.seller.encode('utf-8') - - def __str__(self): - return self.seller - - def __eq__(self, other): - return self.seller == other - - __hash__ = Frame.__hash__ - - -class COMR(FrameOpt): - """Commercial frame.""" - - _framespec = [ - EncodingSpec('encoding'), - Latin1TextSpec('price'), - StringSpec('valid_until', 8), - Latin1TextSpec('contact'), - ByteSpec('format'), - EncodedTextSpec('seller'), - EncodedTextSpec('desc'), - ] - - _optionalspec = [ - Latin1TextSpec('mime'), - BinaryDataSpec('logo'), - ] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, _bytes2key(self._writeData())) - - def __eq__(self, other): - return self._writeData() == other._writeData() - - __hash__ = FrameOpt.__hash__ - - -@swap_to_string -class ENCR(Frame): - """Encryption method registration. - - The standard does not allow multiple ENCR frames with the same owner - or the same method. Mutagen only verifies that the owner is unique. - """ - - _framespec = [ - Latin1TextSpec('owner'), - ByteSpec('method'), - BinaryDataSpec('data'), - ] - - @property - def HashKey(self): - return "%s:%s" % (self.FrameID, self.owner) - - def __bytes__(self): - return self.data - - def __eq__(self, other): - return self.data == other - - __hash__ = Frame.__hash__ - - -@swap_to_string -class GRID(FrameOpt): - """Group identification registration.""" - - _framespec = [ - Latin1TextSpec('owner'), - ByteSpec('group'), - ] - - _optionalspec = [BinaryDataSpec('data')] - - @property - def HashKey(self): - return '%s:%s' % (self.FrameID, self.group) - - def __pos__(self): - return self.group - - def __bytes__(self): - return self.owner.encode('utf-8') - - def __str__(self): - return self.owner - - def __eq__(self, other): - return self.owner == other or self.group == other - - __hash__ = FrameOpt.__hash__ - - -@swap_to_string -class PRIV(Frame): - """Private frame.""" - - _framespec = [ - Latin1TextSpec('owner'), - BinaryDataSpec('data'), - ] - - @property - def HashKey(self): - return '%s:%s:%s' % ( - self.FrameID, self.owner, _bytes2key(self.data)) - - def __bytes__(self): - return self.data - - def __eq__(self, other): - return self.data == other - - def _pprint(self): - return "%s=%r" % (self.owner, self.data) - - __hash__ = Frame.__hash__ - - -@swap_to_string -class SIGN(Frame): - """Signature frame.""" - - _framespec = [ - ByteSpec('group'), - BinaryDataSpec('sig'), - ] - - @property - def HashKey(self): - return '%s:%s:%s' % (self.FrameID, self.group, _bytes2key(self.sig)) - - def __bytes__(self): - return self.sig - - def __eq__(self, other): - return self.sig == other - - __hash__ = Frame.__hash__ - - -class SEEK(Frame): - """Seek frame. - - Mutagen does not find tags at seek offsets. - """ - - _framespec = [IntegerSpec('offset')] - - def __pos__(self): - return self.offset - - def __eq__(self, other): - return self.offset == other - - __hash__ = Frame.__hash__ - - -class ASPI(Frame): - """Audio seek point index. - - Attributes: S, L, N, b, and Fi. For the meaning of these, see - the ID3v2.4 specification. Fi is a list of integers. - """ - _framespec = [ - SizedIntegerSpec("S", 4), - SizedIntegerSpec("L", 4), - SizedIntegerSpec("N", 2), - ByteSpec("b"), - ASPIIndexSpec("Fi"), - ] - - def __eq__(self, other): - return self.Fi == other - - __hash__ = Frame.__hash__ - - -# ID3v2.2 frames -class UFI(UFID): - "Unique File Identifier" - - -class TT1(TIT1): - "Content group description" - - -class TT2(TIT2): - "Title" - - -class TT3(TIT3): - "Subtitle/Description refinement" - - -class TP1(TPE1): - "Lead Artist/Performer/Soloist/Group" - - -class TP2(TPE2): - "Band/Orchestra/Accompaniment" - - -class TP3(TPE3): - "Conductor" - - -class TP4(TPE4): - "Interpreter/Remixer/Modifier" - - -class TCM(TCOM): - "Composer" - - -class TXT(TEXT): - "Lyricist" - - -class TLA(TLAN): - "Audio Language(s)" - - -class TCO(TCON): - "Content Type (Genre)" - - -class TAL(TALB): - "Album" - - -class TPA(TPOS): - "Part of set" - - -class TRK(TRCK): - "Track Number" - - -class TRC(TSRC): - "International Standard Recording Code (ISRC)" - - -class TYE(TYER): - "Year of recording" - - -class TDA(TDAT): - "Date of recording (DDMM)" - - -class TIM(TIME): - "Time of recording (HHMM)" - - -class TRD(TRDA): - "Recording Dates" - - -class TMT(TMED): - "Source Media Type" - - -class TFT(TFLT): - "File Type" - - -class TBP(TBPM): - "Beats per minute" - - -class TCP(TCMP): - "iTunes Compilation Flag" - - -class TCR(TCOP): - "Copyright (C)" - - -class TPB(TPUB): - "Publisher" - - -class TEN(TENC): - "Encoder" - - -class TSS(TSSE): - "Encoder settings" - - -class TOF(TOFN): - "Original Filename" - - -class TLE(TLEN): - "Audio Length (ms)" - - -class TSI(TSIZ): - "Audio Data size (bytes)" - - -class TDY(TDLY): - "Audio Delay (ms)" - - -class TKE(TKEY): - "Starting Key" - - -class TOT(TOAL): - "Original Album" - - -class TOA(TOPE): - "Original Artist/Perfomer" - - -class TOL(TOLY): - "Original Lyricist" - - -class TOR(TORY): - "Original Release Year" - - -class TXX(TXXX): - "User-defined Text" - - -class WAF(WOAF): - "Official File Information" - - -class WAR(WOAR): - "Official Artist/Performer Information" - - -class WAS(WOAS): - "Official Source Information" - - -class WCM(WCOM): - "Commercial Information" - - -class WCP(WCOP): - "Copyright Information" - - -class WPB(WPUB): - "Official Publisher Information" - - -class WXX(WXXX): - "User-defined URL" - - -class IPL(IPLS): - "Involved people list" - - -class MCI(MCDI): - "Binary dump of CD's TOC" - - -class ETC(ETCO): - "Event timing codes" - - -class MLL(MLLT): - "MPEG location lookup table" - - -class STC(SYTC): - "Synced tempo codes" - - -class ULT(USLT): - "Unsychronised lyrics/text transcription" - - -class SLT(SYLT): - "Synchronised lyrics/text" - - -class COM(COMM): - "Comment" - - -# class RVA(RVAD) -# class EQU(EQUA) - - -class REV(RVRB): - "Reverb" - - -class PIC(APIC): - """Attached Picture. - - The 'mime' attribute of an ID3v2.2 attached picture must be either - 'PNG' or 'JPG'. - """ - - _framespec = [ - EncodingSpec('encoding'), - StringSpec('mime', 3), - ByteSpec('type'), - EncodedTextSpec('desc'), - BinaryDataSpec('data') - ] - - def _to_other(self, other): - if not isinstance(other, APIC): - raise TypeError - - other.encoding = self.encoding - other.mime = self.mime - other.type = self.type - other.desc = self.desc - other.data = self.data - - -class GEO(GEOB): - "General Encapsulated Object" - - -class CNT(PCNT): - "Play counter" - - -class POP(POPM): - "Popularimeter" - - -class BUF(RBUF): - "Recommended buffer size" - - -class CRM(Frame): - """Encrypted meta frame""" - _framespec = [Latin1TextSpec('owner'), Latin1TextSpec('desc'), - BinaryDataSpec('data')] - - def __eq__(self, other): - return self.data == other - __hash__ = Frame.__hash__ - - -class CRA(AENC): - "Audio encryption" - - -class LNK(LINK): - """Linked information""" - - _framespec = [ - StringSpec('frameid', 3), - Latin1TextSpec('url') - ] - - _optionalspec = [BinaryDataSpec('data')] - - def _to_other(self, other): - if not isinstance(other, LINK): - raise TypeError - - other.frameid = self.frameid - other.url = self.url - if hasattr(self, "data"): - other.data = self.data - - -Frames = {} -"""All supported ID3v2.3/4 frames, keyed by frame name.""" - - -Frames_2_2 = {} -"""All supported ID3v2.2 frames, keyed by frame name.""" - - -k, v = None, None -for k, v in iteritems(globals()): - if isinstance(v, type) and issubclass(v, Frame): - v.__module__ = "mutagen.id3" - - if len(k) == 3: - Frames_2_2[k] = v - elif len(k) == 4: - Frames[k] = v - -try: - del k - del v -except NameError: - pass diff --git a/resources/lib/libraries/mutagen/id3/_specs.py b/resources/lib/libraries/mutagen/id3/_specs.py deleted file mode 100644 index 4358a65d..00000000 --- a/resources/lib/libraries/mutagen/id3/_specs.py +++ /dev/null @@ -1,635 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Michael Urman -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -import struct -from struct import unpack, pack - -from .._compat import text_type, chr_, PY3, swap_to_string, string_types, \ - xrange -from .._util import total_ordering, decode_terminated, enum, izip -from ._util import BitPaddedInt - - -@enum -class PictureType(object): - """Enumeration of image types defined by the ID3 standard for the APIC - frame, but also reused in WMA/FLAC/VorbisComment. - """ - - OTHER = 0 - """Other""" - - FILE_ICON = 1 - """32x32 pixels 'file icon' (PNG only)""" - - OTHER_FILE_ICON = 2 - """Other file icon""" - - COVER_FRONT = 3 - """Cover (front)""" - - COVER_BACK = 4 - """Cover (back)""" - - LEAFLET_PAGE = 5 - """Leaflet page""" - - MEDIA = 6 - """Media (e.g. label side of CD)""" - - LEAD_ARTIST = 7 - """Lead artist/lead performer/soloist""" - - ARTIST = 8 - """Artist/performer""" - - CONDUCTOR = 9 - """Conductor""" - - BAND = 10 - """Band/Orchestra""" - - COMPOSER = 11 - """Composer""" - - LYRICIST = 12 - """Lyricist/text writer""" - - RECORDING_LOCATION = 13 - """Recording Location""" - - DURING_RECORDING = 14 - """During recording""" - - DURING_PERFORMANCE = 15 - """During performance""" - - SCREEN_CAPTURE = 16 - """Movie/video screen capture""" - - FISH = 17 - """A bright coloured fish""" - - ILLUSTRATION = 18 - """Illustration""" - - BAND_LOGOTYPE = 19 - """Band/artist logotype""" - - PUBLISHER_LOGOTYPE = 20 - """Publisher/Studio logotype""" - - -class SpecError(Exception): - pass - - -class Spec(object): - - def __init__(self, name): - self.name = name - - def __hash__(self): - raise TypeError("Spec objects are unhashable") - - def _validate23(self, frame, value, **kwargs): - """Return a possibly modified value which, if written, - results in valid id3v2.3 data. - """ - - return value - - def read(self, frame, data): - """Returns the (value, left_data) or raises SpecError""" - - raise NotImplementedError - - def write(self, frame, value): - raise NotImplementedError - - def validate(self, frame, value): - """Returns the validated data or raises ValueError/TypeError""" - - raise NotImplementedError - - -class ByteSpec(Spec): - def read(self, frame, data): - return bytearray(data)[0], data[1:] - - def write(self, frame, value): - return chr_(value) - - def validate(self, frame, value): - if value is not None: - chr_(value) - return value - - -class IntegerSpec(Spec): - def read(self, frame, data): - return int(BitPaddedInt(data, bits=8)), b'' - - def write(self, frame, value): - return BitPaddedInt.to_str(value, bits=8, width=-1) - - def validate(self, frame, value): - return value - - -class SizedIntegerSpec(Spec): - def __init__(self, name, size): - self.name, self.__sz = name, size - - def read(self, frame, data): - return int(BitPaddedInt(data[:self.__sz], bits=8)), data[self.__sz:] - - def write(self, frame, value): - return BitPaddedInt.to_str(value, bits=8, width=self.__sz) - - def validate(self, frame, value): - return value - - -@enum -class Encoding(object): - """Text Encoding""" - - LATIN1 = 0 - """ISO-8859-1""" - - UTF16 = 1 - """UTF-16 with BOM""" - - UTF16BE = 2 - """UTF-16BE without BOM""" - - UTF8 = 3 - """UTF-8""" - - -class EncodingSpec(ByteSpec): - - def read(self, frame, data): - enc, data = super(EncodingSpec, self).read(frame, data) - if enc not in (Encoding.LATIN1, Encoding.UTF16, Encoding.UTF16BE, - Encoding.UTF8): - raise SpecError('Invalid Encoding: %r' % enc) - return enc, data - - def validate(self, frame, value): - if value is None: - return None - if value not in (Encoding.LATIN1, Encoding.UTF16, Encoding.UTF16BE, - Encoding.UTF8): - raise ValueError('Invalid Encoding: %r' % value) - return value - - def _validate23(self, frame, value, **kwargs): - # only 0, 1 are valid in v2.3, default to utf-16 - if value not in (Encoding.LATIN1, Encoding.UTF16): - value = Encoding.UTF16 - return value - - -class StringSpec(Spec): - """A fixed size ASCII only payload.""" - - def __init__(self, name, length): - super(StringSpec, self).__init__(name) - self.len = length - - def read(s, frame, data): - chunk = data[:s.len] - try: - ascii = chunk.decode("ascii") - except UnicodeDecodeError: - raise SpecError("not ascii") - else: - if PY3: - chunk = ascii - - return chunk, data[s.len:] - - def write(s, frame, value): - if value is None: - return b'\x00' * s.len - else: - if PY3: - value = value.encode("ascii") - return (bytes(value) + b'\x00' * s.len)[:s.len] - - def validate(s, frame, value): - if value is None: - return None - - if PY3: - if not isinstance(value, str): - raise TypeError("%s has to be str" % s.name) - value.encode("ascii") - else: - if not isinstance(value, bytes): - value = value.encode("ascii") - - if len(value) == s.len: - return value - - raise ValueError('Invalid StringSpec[%d] data: %r' % (s.len, value)) - - -class BinaryDataSpec(Spec): - def read(self, frame, data): - return data, b'' - - def write(self, frame, value): - if value is None: - return b"" - if isinstance(value, bytes): - return value - value = text_type(value).encode("ascii") - return value - - def validate(self, frame, value): - if value is None: - return None - - if isinstance(value, bytes): - return value - elif PY3: - raise TypeError("%s has to be bytes" % self.name) - - value = text_type(value).encode("ascii") - return value - - -class EncodedTextSpec(Spec): - - _encodings = { - Encoding.LATIN1: ('latin1', b'\x00'), - Encoding.UTF16: ('utf16', b'\x00\x00'), - Encoding.UTF16BE: ('utf_16_be', b'\x00\x00'), - Encoding.UTF8: ('utf8', b'\x00'), - } - - def read(self, frame, data): - enc, term = self._encodings[frame.encoding] - try: - # allow missing termination - return decode_terminated(data, enc, strict=False) - except ValueError: - # utf-16 termination with missing BOM, or single NULL - if not data[:len(term)].strip(b"\x00"): - return u"", data[len(term):] - - # utf-16 data with single NULL, see issue 169 - try: - return decode_terminated(data + b"\x00", enc) - except ValueError: - raise SpecError("Decoding error") - - def write(self, frame, value): - enc, term = self._encodings[frame.encoding] - return value.encode(enc) + term - - def validate(self, frame, value): - return text_type(value) - - -class MultiSpec(Spec): - def __init__(self, name, *specs, **kw): - super(MultiSpec, self).__init__(name) - self.specs = specs - self.sep = kw.get('sep') - - def read(self, frame, data): - values = [] - while data: - record = [] - for spec in self.specs: - value, data = spec.read(frame, data) - record.append(value) - if len(self.specs) != 1: - values.append(record) - else: - values.append(record[0]) - return values, data - - def write(self, frame, value): - data = [] - if len(self.specs) == 1: - for v in value: - data.append(self.specs[0].write(frame, v)) - else: - for record in value: - for v, s in izip(record, self.specs): - data.append(s.write(frame, v)) - return b''.join(data) - - def validate(self, frame, value): - if value is None: - return [] - if self.sep and isinstance(value, string_types): - value = value.split(self.sep) - if isinstance(value, list): - if len(self.specs) == 1: - return [self.specs[0].validate(frame, v) for v in value] - else: - return [ - [s.validate(frame, v) for (v, s) in izip(val, self.specs)] - for val in value] - raise ValueError('Invalid MultiSpec data: %r' % value) - - def _validate23(self, frame, value, **kwargs): - if len(self.specs) != 1: - return [[s._validate23(frame, v, **kwargs) - for (v, s) in izip(val, self.specs)] - for val in value] - - spec = self.specs[0] - - # Merge single text spec multispecs only. - # (TimeStampSpec beeing the exception, but it's not a valid v2.3 frame) - if not isinstance(spec, EncodedTextSpec) or \ - isinstance(spec, TimeStampSpec): - return value - - value = [spec._validate23(frame, v, **kwargs) for v in value] - if kwargs.get("sep") is not None: - return [spec.validate(frame, kwargs["sep"].join(value))] - return value - - -class EncodedNumericTextSpec(EncodedTextSpec): - pass - - -class EncodedNumericPartTextSpec(EncodedTextSpec): - pass - - -class Latin1TextSpec(EncodedTextSpec): - def read(self, frame, data): - if b'\x00' in data: - data, ret = data.split(b'\x00', 1) - else: - ret = b'' - return data.decode('latin1'), ret - - def write(self, data, value): - return value.encode('latin1') + b'\x00' - - def validate(self, frame, value): - return text_type(value) - - -@swap_to_string -@total_ordering -class ID3TimeStamp(object): - """A time stamp in ID3v2 format. - - This is a restricted form of the ISO 8601 standard; time stamps - take the form of: - YYYY-MM-DD HH:MM:SS - Or some partial form (YYYY-MM-DD HH, YYYY, etc.). - - The 'text' attribute contains the raw text data of the time stamp. - """ - - import re - - def __init__(self, text): - if isinstance(text, ID3TimeStamp): - text = text.text - elif not isinstance(text, text_type): - if PY3: - raise TypeError("not a str") - text = text.decode("utf-8") - - self.text = text - - __formats = ['%04d'] + ['%02d'] * 5 - __seps = ['-', '-', ' ', ':', ':', 'x'] - - def get_text(self): - parts = [self.year, self.month, self.day, - self.hour, self.minute, self.second] - pieces = [] - for i, part in enumerate(parts): - if part is None: - break - pieces.append(self.__formats[i] % part + self.__seps[i]) - return u''.join(pieces)[:-1] - - def set_text(self, text, splitre=re.compile('[-T:/.]|\s+')): - year, month, day, hour, minute, second = \ - splitre.split(text + ':::::')[:6] - for a in 'year month day hour minute second'.split(): - try: - v = int(locals()[a]) - except ValueError: - v = None - setattr(self, a, v) - - text = property(get_text, set_text, doc="ID3v2.4 date and time.") - - def __str__(self): - return self.text - - def __bytes__(self): - return self.text.encode("utf-8") - - def __repr__(self): - return repr(self.text) - - def __eq__(self, other): - return self.text == other.text - - def __lt__(self, other): - return self.text < other.text - - __hash__ = object.__hash__ - - def encode(self, *args): - return self.text.encode(*args) - - -class TimeStampSpec(EncodedTextSpec): - def read(self, frame, data): - value, data = super(TimeStampSpec, self).read(frame, data) - return self.validate(frame, value), data - - def write(self, frame, data): - return super(TimeStampSpec, self).write(frame, - data.text.replace(' ', 'T')) - - def validate(self, frame, value): - try: - return ID3TimeStamp(value) - except TypeError: - raise ValueError("Invalid ID3TimeStamp: %r" % value) - - -class ChannelSpec(ByteSpec): - (OTHER, MASTER, FRONTRIGHT, FRONTLEFT, BACKRIGHT, BACKLEFT, FRONTCENTRE, - BACKCENTRE, SUBWOOFER) = xrange(9) - - -class VolumeAdjustmentSpec(Spec): - def read(self, frame, data): - value, = unpack('>h', data[0:2]) - return value / 512.0, data[2:] - - def write(self, frame, value): - number = int(round(value * 512)) - # pack only fails in 2.7, do it manually in 2.6 - if not -32768 <= number <= 32767: - raise SpecError("not in range") - return pack('>h', number) - - def validate(self, frame, value): - if value is not None: - try: - self.write(frame, value) - except SpecError: - raise ValueError("out of range") - return value - - -class VolumePeakSpec(Spec): - def read(self, frame, data): - # http://bugs.xmms.org/attachment.cgi?id=113&action=view - peak = 0 - data_array = bytearray(data) - bits = data_array[0] - vol_bytes = min(4, (bits + 7) >> 3) - # not enough frame data - if vol_bytes + 1 > len(data): - raise SpecError("not enough frame data") - shift = ((8 - (bits & 7)) & 7) + (4 - vol_bytes) * 8 - for i in xrange(1, vol_bytes + 1): - peak *= 256 - peak += data_array[i] - peak *= 2 ** shift - return (float(peak) / (2 ** 31 - 1)), data[1 + vol_bytes:] - - def write(self, frame, value): - number = int(round(value * 32768)) - # pack only fails in 2.7, do it manually in 2.6 - if not 0 <= number <= 65535: - raise SpecError("not in range") - # always write as 16 bits for sanity. - return b"\x10" + pack('>H', number) - - def validate(self, frame, value): - if value is not None: - try: - self.write(frame, value) - except SpecError: - raise ValueError("out of range") - return value - - -class SynchronizedTextSpec(EncodedTextSpec): - def read(self, frame, data): - texts = [] - encoding, term = self._encodings[frame.encoding] - while data: - try: - value, data = decode_terminated(data, encoding) - except ValueError: - raise SpecError("decoding error") - - if len(data) < 4: - raise SpecError("not enough data") - time, = struct.unpack(">I", data[:4]) - - texts.append((value, time)) - data = data[4:] - return texts, b"" - - def write(self, frame, value): - data = [] - encoding, term = self._encodings[frame.encoding] - for text, time in value: - text = text.encode(encoding) + term - data.append(text + struct.pack(">I", time)) - return b"".join(data) - - def validate(self, frame, value): - return value - - -class KeyEventSpec(Spec): - def read(self, frame, data): - events = [] - while len(data) >= 5: - events.append(struct.unpack(">bI", data[:5])) - data = data[5:] - return events, data - - def write(self, frame, value): - return b"".join(struct.pack(">bI", *event) for event in value) - - def validate(self, frame, value): - return value - - -class VolumeAdjustmentsSpec(Spec): - # Not to be confused with VolumeAdjustmentSpec. - def read(self, frame, data): - adjustments = {} - while len(data) >= 4: - freq, adj = struct.unpack(">Hh", data[:4]) - data = data[4:] - freq /= 2.0 - adj /= 512.0 - adjustments[freq] = adj - adjustments = sorted(adjustments.items()) - return adjustments, data - - def write(self, frame, value): - value.sort() - return b"".join(struct.pack(">Hh", int(freq * 2), int(adj * 512)) - for (freq, adj) in value) - - def validate(self, frame, value): - return value - - -class ASPIIndexSpec(Spec): - def read(self, frame, data): - if frame.b == 16: - format = "H" - size = 2 - elif frame.b == 8: - format = "B" - size = 1 - else: - raise SpecError("invalid bit count in ASPI (%d)" % frame.b) - - indexes = data[:frame.N * size] - data = data[frame.N * size:] - try: - return list(struct.unpack(">" + format * frame.N, indexes)), data - except struct.error as e: - raise SpecError(e) - - def write(self, frame, values): - if frame.b == 16: - format = "H" - elif frame.b == 8: - format = "B" - else: - raise SpecError("frame.b must be 8 or 16") - try: - return struct.pack(">" + format * frame.N, *values) - except struct.error as e: - raise SpecError(e) - - def validate(self, frame, values): - return values diff --git a/resources/lib/libraries/mutagen/id3/_util.py b/resources/lib/libraries/mutagen/id3/_util.py deleted file mode 100644 index 29f7241d..00000000 --- a/resources/lib/libraries/mutagen/id3/_util.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2005 Michael Urman -# 2013 Christoph Reiter -# 2014 Ben Ockmore -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -from .._compat import long_, integer_types, PY3 -from .._util import MutagenError - - -class error(MutagenError): - pass - - -class ID3NoHeaderError(error, ValueError): - pass - - -class ID3UnsupportedVersionError(error, NotImplementedError): - pass - - -class ID3EncryptionUnsupportedError(error, NotImplementedError): - pass - - -class ID3JunkFrameError(error, ValueError): - pass - - -class unsynch(object): - @staticmethod - def decode(value): - fragments = bytearray(value).split(b'\xff') - if len(fragments) > 1 and not fragments[-1]: - raise ValueError('string ended unsafe') - - for f in fragments[1:]: - if (not f) or (f[0] >= 0xE0): - raise ValueError('invalid sync-safe string') - - if f[0] == 0x00: - del f[0] - - return bytes(bytearray(b'\xff').join(fragments)) - - @staticmethod - def encode(value): - fragments = bytearray(value).split(b'\xff') - for f in fragments[1:]: - if (not f) or (f[0] >= 0xE0) or (f[0] == 0x00): - f.insert(0, 0x00) - return bytes(bytearray(b'\xff').join(fragments)) - - -class _BitPaddedMixin(object): - - def as_str(self, width=4, minwidth=4): - return self.to_str(self, self.bits, self.bigendian, width, minwidth) - - @staticmethod - def to_str(value, bits=7, bigendian=True, width=4, minwidth=4): - mask = (1 << bits) - 1 - - if width != -1: - index = 0 - bytes_ = bytearray(width) - try: - while value: - bytes_[index] = value & mask - value >>= bits - index += 1 - except IndexError: - raise ValueError('Value too wide (>%d bytes)' % width) - else: - # PCNT and POPM use growing integers - # of at least 4 bytes (=minwidth) as counters. - bytes_ = bytearray() - append = bytes_.append - while value: - append(value & mask) - value >>= bits - bytes_ = bytes_.ljust(minwidth, b"\x00") - - if bigendian: - bytes_.reverse() - return bytes(bytes_) - - @staticmethod - def has_valid_padding(value, bits=7): - """Whether the padding bits are all zero""" - - assert bits <= 8 - - mask = (((1 << (8 - bits)) - 1) << bits) - - if isinstance(value, integer_types): - while value: - if value & mask: - return False - value >>= 8 - elif isinstance(value, bytes): - for byte in bytearray(value): - if byte & mask: - return False - else: - raise TypeError - - return True - - -class BitPaddedInt(int, _BitPaddedMixin): - - def __new__(cls, value, bits=7, bigendian=True): - - mask = (1 << (bits)) - 1 - numeric_value = 0 - shift = 0 - - if isinstance(value, integer_types): - while value: - numeric_value += (value & mask) << shift - value >>= 8 - shift += bits - elif isinstance(value, bytes): - if bigendian: - value = reversed(value) - for byte in bytearray(value): - numeric_value += (byte & mask) << shift - shift += bits - else: - raise TypeError - - if isinstance(numeric_value, int): - self = int.__new__(BitPaddedInt, numeric_value) - else: - self = long_.__new__(BitPaddedLong, numeric_value) - - self.bits = bits - self.bigendian = bigendian - return self - -if PY3: - BitPaddedLong = BitPaddedInt -else: - class BitPaddedLong(long_, _BitPaddedMixin): - pass - - -class ID3BadUnsynchData(error, ValueError): - """Deprecated""" - - -class ID3BadCompressedData(error, ValueError): - """Deprecated""" - - -class ID3TagError(error, ValueError): - """Deprecated""" - - -class ID3Warning(error, UserWarning): - """Deprecated""" diff --git a/resources/lib/libraries/mutagen/m4a.py b/resources/lib/libraries/mutagen/m4a.py deleted file mode 100644 index 5730ace3..00000000 --- a/resources/lib/libraries/mutagen/m4a.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -""" -since 1.9: mutagen.m4a is deprecated; use mutagen.mp4 instead. -since 1.31: mutagen.m4a will no longer work; any operation that could fail - will fail now. -""" - -import warnings - -from mutagen import FileType, Metadata, StreamInfo -from ._util import DictProxy, MutagenError - -warnings.warn( - "mutagen.m4a is deprecated; use mutagen.mp4 instead.", - DeprecationWarning) - - -class error(IOError, MutagenError): - pass - - -class M4AMetadataError(error): - pass - - -class M4AStreamInfoError(error): - pass - - -class M4AMetadataValueError(ValueError, M4AMetadataError): - pass - - -__all__ = ['M4A', 'Open', 'delete', 'M4ACover'] - - -class M4ACover(bytes): - - FORMAT_JPEG = 0x0D - FORMAT_PNG = 0x0E - - def __new__(cls, data, imageformat=None): - self = bytes.__new__(cls, data) - if imageformat is None: - imageformat = M4ACover.FORMAT_JPEG - self.imageformat = imageformat - return self - - -class M4ATags(DictProxy, Metadata): - - def load(self, atoms, fileobj): - raise error("deprecated") - - def save(self, filename): - raise error("deprecated") - - def delete(self, filename): - raise error("deprecated") - - def pprint(self): - return u"" - - -class M4AInfo(StreamInfo): - - bitrate = 0 - - def __init__(self, atoms, fileobj): - raise error("deprecated") - - def pprint(self): - return u"" - - -class M4A(FileType): - - _mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"] - - def load(self, filename): - raise error("deprecated") - - def add_tags(self): - self.tags = M4ATags() - - @staticmethod - def score(filename, fileobj, header): - return 0 - - -Open = M4A - - -def delete(filename): - raise error("deprecated") diff --git a/resources/lib/libraries/mutagen/monkeysaudio.py b/resources/lib/libraries/mutagen/monkeysaudio.py deleted file mode 100644 index 0e29273f..00000000 --- a/resources/lib/libraries/mutagen/monkeysaudio.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Lukas Lalinsky -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Monkey's Audio streams with APEv2 tags. - -Monkey's Audio is a very efficient lossless audio compressor developed -by Matt Ashland. - -For more information, see http://www.monkeysaudio.com/. -""" - -__all__ = ["MonkeysAudio", "Open", "delete"] - -import struct - -from ._compat import endswith -from mutagen import StreamInfo -from mutagen.apev2 import APEv2File, error, delete -from mutagen._util import cdata - - -class MonkeysAudioHeaderError(error): - pass - - -class MonkeysAudioInfo(StreamInfo): - """Monkey's Audio stream information. - - Attributes: - - * channels -- number of audio channels - * length -- file length in seconds, as a float - * sample_rate -- audio sampling rate in Hz - * bits_per_sample -- bits per sample - * version -- Monkey's Audio stream version, as a float (eg: 3.99) - """ - - def __init__(self, fileobj): - header = fileobj.read(76) - if len(header) != 76 or not header.startswith(b"MAC "): - raise MonkeysAudioHeaderError("not a Monkey's Audio file") - self.version = cdata.ushort_le(header[4:6]) - if self.version >= 3980: - (blocks_per_frame, final_frame_blocks, total_frames, - self.bits_per_sample, self.channels, - self.sample_rate) = struct.unpack("<IIIHHI", header[56:76]) - else: - compression_level = cdata.ushort_le(header[6:8]) - self.channels, self.sample_rate = struct.unpack( - "<HI", header[10:16]) - total_frames, final_frame_blocks = struct.unpack( - "<II", header[24:32]) - if self.version >= 3950: - blocks_per_frame = 73728 * 4 - elif self.version >= 3900 or (self.version >= 3800 and - compression_level == 4): - blocks_per_frame = 73728 - else: - blocks_per_frame = 9216 - self.version /= 1000.0 - self.length = 0.0 - if (self.sample_rate != 0) and (total_frames > 0): - total_blocks = ((total_frames - 1) * blocks_per_frame + - final_frame_blocks) - self.length = float(total_blocks) / self.sample_rate - - def pprint(self): - return u"Monkey's Audio %.2f, %.2f seconds, %d Hz" % ( - self.version, self.length, self.sample_rate) - - -class MonkeysAudio(APEv2File): - _Info = MonkeysAudioInfo - _mimes = ["audio/ape", "audio/x-ape"] - - @staticmethod - def score(filename, fileobj, header): - return header.startswith(b"MAC ") + endswith(filename.lower(), ".ape") - - -Open = MonkeysAudio diff --git a/resources/lib/libraries/mutagen/mp3.py b/resources/lib/libraries/mutagen/mp3.py deleted file mode 100644 index afb600cf..00000000 --- a/resources/lib/libraries/mutagen/mp3.py +++ /dev/null @@ -1,362 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""MPEG audio stream information and tags.""" - -import os -import struct - -from ._compat import endswith, xrange -from ._mp3util import XingHeader, XingHeaderError, VBRIHeader, VBRIHeaderError -from mutagen import StreamInfo -from mutagen._util import MutagenError, enum -from mutagen.id3 import ID3FileType, BitPaddedInt, delete - -__all__ = ["MP3", "Open", "delete", "MP3"] - - -class error(RuntimeError, MutagenError): - pass - - -class HeaderNotFoundError(error, IOError): - pass - - -class InvalidMPEGHeader(error, IOError): - pass - - -@enum -class BitrateMode(object): - - UNKNOWN = 0 - """Probably a CBR file, but not sure""" - - CBR = 1 - """Constant Bitrate""" - - VBR = 2 - """Variable Bitrate""" - - ABR = 3 - """Average Bitrate (a variant of VBR)""" - - -def _guess_xing_bitrate_mode(xing): - - if xing.lame_header: - lame = xing.lame_header - if lame.vbr_method in (1, 8): - return BitrateMode.CBR - elif lame.vbr_method in (2, 9): - return BitrateMode.ABR - elif lame.vbr_method in (3, 4, 5, 6): - return BitrateMode.VBR - # everything else undefined, continue guessing - - # info tags get only written by lame for cbr files - if xing.is_info: - return BitrateMode.CBR - - # older lame and non-lame with some variant of vbr - if xing.vbr_scale != -1 or xing.lame_version: - return BitrateMode.VBR - - return BitrateMode.UNKNOWN - - -# Mode values. -STEREO, JOINTSTEREO, DUALCHANNEL, MONO = xrange(4) - - -class MPEGInfo(StreamInfo): - """MPEG audio stream information - - Parse information about an MPEG audio file. This also reads the - Xing VBR header format. - - This code was implemented based on the format documentation at - http://mpgedit.org/mpgedit/mpeg_format/mpeghdr.htm. - - Useful attributes: - - * length -- audio length, in seconds - * channels -- number of audio channels - * bitrate -- audio bitrate, in bits per second - * sketchy -- if true, the file may not be valid MPEG audio - * encoder_info -- a string containing encoder name and possibly version. - In case a lame tag is present this will start with - ``"LAME "``, if unknown it is empty, otherwise the - text format is undefined. - * bitrate_mode -- a :class:`BitrateMode` - - * track_gain -- replaygain track gain (89db) or None - * track_peak -- replaygain track peak or None - * album_gain -- replaygain album gain (89db) or None - - Useless attributes: - - * version -- MPEG version (1, 2, 2.5) - * layer -- 1, 2, or 3 - * mode -- One of STEREO, JOINTSTEREO, DUALCHANNEL, or MONO (0-3) - * protected -- whether or not the file is "protected" - * padding -- whether or not audio frames are padded - * sample_rate -- audio sample rate, in Hz - """ - - # Map (version, layer) tuples to bitrates. - __BITRATE = { - (1, 1): [0, 32, 64, 96, 128, 160, 192, 224, - 256, 288, 320, 352, 384, 416, 448], - (1, 2): [0, 32, 48, 56, 64, 80, 96, 112, 128, - 160, 192, 224, 256, 320, 384], - (1, 3): [0, 32, 40, 48, 56, 64, 80, 96, 112, - 128, 160, 192, 224, 256, 320], - (2, 1): [0, 32, 48, 56, 64, 80, 96, 112, 128, - 144, 160, 176, 192, 224, 256], - (2, 2): [0, 8, 16, 24, 32, 40, 48, 56, 64, - 80, 96, 112, 128, 144, 160], - } - - __BITRATE[(2, 3)] = __BITRATE[(2, 2)] - for i in xrange(1, 4): - __BITRATE[(2.5, i)] = __BITRATE[(2, i)] - - # Map version to sample rates. - __RATES = { - 1: [44100, 48000, 32000], - 2: [22050, 24000, 16000], - 2.5: [11025, 12000, 8000] - } - - sketchy = False - encoder_info = u"" - bitrate_mode = BitrateMode.UNKNOWN - track_gain = track_peak = album_gain = album_peak = None - - def __init__(self, fileobj, offset=None): - """Parse MPEG stream information from a file-like object. - - If an offset argument is given, it is used to start looking - for stream information and Xing headers; otherwise, ID3v2 tags - will be skipped automatically. A correct offset can make - loading files significantly faster. - """ - - try: - size = os.path.getsize(fileobj.name) - except (IOError, OSError, AttributeError): - fileobj.seek(0, 2) - size = fileobj.tell() - - # If we don't get an offset, try to skip an ID3v2 tag. - if offset is None: - fileobj.seek(0, 0) - idata = fileobj.read(10) - try: - id3, insize = struct.unpack('>3sxxx4s', idata) - except struct.error: - id3, insize = b'', 0 - insize = BitPaddedInt(insize) - if id3 == b'ID3' and insize > 0: - offset = insize + 10 - else: - offset = 0 - - # Try to find two valid headers (meaning, very likely MPEG data) - # at the given offset, 30% through the file, 60% through the file, - # and 90% through the file. - for i in [offset, 0.3 * size, 0.6 * size, 0.9 * size]: - try: - self.__try(fileobj, int(i), size - offset) - except error: - pass - else: - break - # If we can't find any two consecutive frames, try to find just - # one frame back at the original offset given. - else: - self.__try(fileobj, offset, size - offset, False) - self.sketchy = True - - def __try(self, fileobj, offset, real_size, check_second=True): - # This is going to be one really long function; bear with it, - # because there's not really a sane point to cut it up. - fileobj.seek(offset, 0) - - # We "know" we have an MPEG file if we find two frames that look like - # valid MPEG data. If we can't find them in 32k of reads, something - # is horribly wrong (the longest frame can only be about 4k). This - # is assuming the offset didn't lie. - data = fileobj.read(32768) - - frame_1 = data.find(b"\xff") - while 0 <= frame_1 <= (len(data) - 4): - frame_data = struct.unpack(">I", data[frame_1:frame_1 + 4])[0] - if ((frame_data >> 16) & 0xE0) != 0xE0: - frame_1 = data.find(b"\xff", frame_1 + 2) - else: - version = (frame_data >> 19) & 0x3 - layer = (frame_data >> 17) & 0x3 - protection = (frame_data >> 16) & 0x1 - bitrate = (frame_data >> 12) & 0xF - sample_rate = (frame_data >> 10) & 0x3 - padding = (frame_data >> 9) & 0x1 - # private = (frame_data >> 8) & 0x1 - self.mode = (frame_data >> 6) & 0x3 - # mode_extension = (frame_data >> 4) & 0x3 - # copyright = (frame_data >> 3) & 0x1 - # original = (frame_data >> 2) & 0x1 - # emphasis = (frame_data >> 0) & 0x3 - if (version == 1 or layer == 0 or sample_rate == 0x3 or - bitrate == 0 or bitrate == 0xF): - frame_1 = data.find(b"\xff", frame_1 + 2) - else: - break - else: - raise HeaderNotFoundError("can't sync to an MPEG frame") - - self.channels = 1 if self.mode == MONO else 2 - - # There is a serious problem here, which is that many flags - # in an MPEG header are backwards. - self.version = [2.5, None, 2, 1][version] - self.layer = 4 - layer - self.protected = not protection - self.padding = bool(padding) - - self.bitrate = self.__BITRATE[(self.version, self.layer)][bitrate] - self.bitrate *= 1000 - self.sample_rate = self.__RATES[self.version][sample_rate] - - if self.layer == 1: - frame_length = ( - (12 * self.bitrate // self.sample_rate) + padding) * 4 - frame_size = 384 - elif self.version >= 2 and self.layer == 3: - frame_length = (72 * self.bitrate // self.sample_rate) + padding - frame_size = 576 - else: - frame_length = (144 * self.bitrate // self.sample_rate) + padding - frame_size = 1152 - - if check_second: - possible = int(frame_1 + frame_length) - if possible > len(data) + 4: - raise HeaderNotFoundError("can't sync to second MPEG frame") - try: - frame_data = struct.unpack( - ">H", data[possible:possible + 2])[0] - except struct.error: - raise HeaderNotFoundError("can't sync to second MPEG frame") - if (frame_data & 0xFFE0) != 0xFFE0: - raise HeaderNotFoundError("can't sync to second MPEG frame") - - self.length = 8 * real_size / float(self.bitrate) - - # Try to find/parse the Xing header, which trumps the above length - # and bitrate calculation. - - if self.layer != 3: - return - - # Xing - xing_offset = XingHeader.get_offset(self) - fileobj.seek(offset + frame_1 + xing_offset, 0) - try: - xing = XingHeader(fileobj) - except XingHeaderError: - pass - else: - lame = xing.lame_header - self.sketchy = False - self.bitrate_mode = _guess_xing_bitrate_mode(xing) - if xing.frames != -1: - samples = frame_size * xing.frames - if lame is not None: - samples -= lame.encoder_delay_start - samples -= lame.encoder_padding_end - self.length = float(samples) / self.sample_rate - if xing.bytes != -1 and self.length: - self.bitrate = int((xing.bytes * 8) / self.length) - if xing.lame_version: - self.encoder_info = u"LAME %s" % xing.lame_version - if lame is not None: - self.track_gain = lame.track_gain_adjustment - self.track_peak = lame.track_peak - self.album_gain = lame.album_gain_adjustment - return - - # VBRI - vbri_offset = VBRIHeader.get_offset(self) - fileobj.seek(offset + frame_1 + vbri_offset, 0) - try: - vbri = VBRIHeader(fileobj) - except VBRIHeaderError: - pass - else: - self.bitrate_mode = BitrateMode.VBR - self.encoder_info = u"FhG" - self.sketchy = False - self.length = float(frame_size * vbri.frames) / self.sample_rate - if self.length: - self.bitrate = int((vbri.bytes * 8) / self.length) - - def pprint(self): - info = str(self.bitrate_mode).split(".", 1)[-1] - if self.bitrate_mode == BitrateMode.UNKNOWN: - info = u"CBR?" - if self.encoder_info: - info += ", %s" % self.encoder_info - s = u"MPEG %s layer %d, %d bps (%s), %s Hz, %d chn, %.2f seconds" % ( - self.version, self.layer, self.bitrate, info, - self.sample_rate, self.channels, self.length) - if self.sketchy: - s += u" (sketchy)" - return s - - -class MP3(ID3FileType): - """An MPEG audio (usually MPEG-1 Layer 3) file. - - :ivar info: :class:`MPEGInfo` - :ivar tags: :class:`ID3 <mutagen.id3.ID3>` - """ - - _Info = MPEGInfo - - _mimes = ["audio/mpeg", "audio/mpg", "audio/x-mpeg"] - - @property - def mime(self): - l = self.info.layer - return ["audio/mp%d" % l, "audio/x-mp%d" % l] + super(MP3, self).mime - - @staticmethod - def score(filename, fileobj, header_data): - filename = filename.lower() - - return (header_data.startswith(b"ID3") * 2 + - endswith(filename, b".mp3") + - endswith(filename, b".mp2") + endswith(filename, b".mpg") + - endswith(filename, b".mpeg")) - - -Open = MP3 - - -class EasyMP3(MP3): - """Like MP3, but uses EasyID3 for tags. - - :ivar info: :class:`MPEGInfo` - :ivar tags: :class:`EasyID3 <mutagen.easyid3.EasyID3>` - """ - - from mutagen.easyid3 import EasyID3 as ID3 - ID3 = ID3 diff --git a/resources/lib/libraries/mutagen/mp4/__init__.py b/resources/lib/libraries/mutagen/mp4/__init__.py deleted file mode 100644 index bc242ee8..00000000 --- a/resources/lib/libraries/mutagen/mp4/__init__.py +++ /dev/null @@ -1,1010 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write MPEG-4 audio files with iTunes metadata. - -This module will read MPEG-4 audio information and metadata, -as found in Apple's MP4 (aka M4A, M4B, M4P) files. - -There is no official specification for this format. The source code -for TagLib, FAAD, and various MPEG specifications at - -* http://developer.apple.com/documentation/QuickTime/QTFF/ -* http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt -* http://standards.iso.org/ittf/PubliclyAvailableStandards/\ -c041828_ISO_IEC_14496-12_2005(E).zip -* http://wiki.multimedia.cx/index.php?title=Apple_QuickTime - -were all consulted. -""" - -import struct -import sys - -from mutagen import FileType, Metadata, StreamInfo, PaddingInfo -from mutagen._constants import GENRES -from mutagen._util import (cdata, insert_bytes, DictProxy, MutagenError, - hashable, enum, get_size, resize_bytes) -from mutagen._compat import (reraise, PY2, string_types, text_type, chr_, - iteritems, PY3, cBytesIO, izip, xrange) -from ._atom import Atoms, Atom, AtomError -from ._util import parse_full_atom -from ._as_entry import AudioSampleEntry, ASEntryError - - -class error(IOError, MutagenError): - pass - - -class MP4MetadataError(error): - pass - - -class MP4StreamInfoError(error): - pass - - -class MP4MetadataValueError(ValueError, MP4MetadataError): - pass - - -__all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType'] - - -@enum -class AtomDataType(object): - """Enum for `dataformat` attribute of MP4FreeForm. - - .. versionadded:: 1.25 - """ - - IMPLICIT = 0 - """for use with tags for which no type needs to be indicated because - only one type is allowed""" - - UTF8 = 1 - """without any count or null terminator""" - - UTF16 = 2 - """also known as UTF-16BE""" - - SJIS = 3 - """deprecated unless it is needed for special Japanese characters""" - - HTML = 6 - """the HTML file header specifies which HTML version""" - - XML = 7 - """the XML header must identify the DTD or schemas""" - - UUID = 8 - """also known as GUID; stored as 16 bytes in binary (valid as an ID)""" - - ISRC = 9 - """stored as UTF-8 text (valid as an ID)""" - - MI3P = 10 - """stored as UTF-8 text (valid as an ID)""" - - GIF = 12 - """(deprecated) a GIF image""" - - JPEG = 13 - """a JPEG image""" - - PNG = 14 - """PNG image""" - - URL = 15 - """absolute, in UTF-8 characters""" - - DURATION = 16 - """in milliseconds, 32-bit integer""" - - DATETIME = 17 - """in UTC, counting seconds since midnight, January 1, 1904; - 32 or 64-bits""" - - GENRES = 18 - """a list of enumerated values""" - - INTEGER = 21 - """a signed big-endian integer with length one of { 1,2,3,4,8 } bytes""" - - RIAA_PA = 24 - """RIAA parental advisory; { -1=no, 1=yes, 0=unspecified }, - 8-bit ingteger""" - - UPC = 25 - """Universal Product Code, in text UTF-8 format (valid as an ID)""" - - BMP = 27 - """Windows bitmap image""" - - -@hashable -class MP4Cover(bytes): - """A cover artwork. - - Attributes: - - * imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG) - """ - - FORMAT_JPEG = AtomDataType.JPEG - FORMAT_PNG = AtomDataType.PNG - - def __new__(cls, data, *args, **kwargs): - return bytes.__new__(cls, data) - - def __init__(self, data, imageformat=FORMAT_JPEG): - self.imageformat = imageformat - - __hash__ = bytes.__hash__ - - def __eq__(self, other): - if not isinstance(other, MP4Cover): - return bytes(self) == other - - return (bytes(self) == bytes(other) and - self.imageformat == other.imageformat) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s(%r, %r)" % ( - type(self).__name__, bytes(self), - AtomDataType(self.imageformat)) - - -@hashable -class MP4FreeForm(bytes): - """A freeform value. - - Attributes: - - * dataformat -- format of the data (see AtomDataType) - """ - - FORMAT_DATA = AtomDataType.IMPLICIT # deprecated - FORMAT_TEXT = AtomDataType.UTF8 # deprecated - - def __new__(cls, data, *args, **kwargs): - return bytes.__new__(cls, data) - - def __init__(self, data, dataformat=AtomDataType.UTF8, version=0): - self.dataformat = dataformat - self.version = version - - __hash__ = bytes.__hash__ - - def __eq__(self, other): - if not isinstance(other, MP4FreeForm): - return bytes(self) == other - - return (bytes(self) == bytes(other) and - self.dataformat == other.dataformat and - self.version == other.version) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return "%s(%r, %r)" % ( - type(self).__name__, bytes(self), - AtomDataType(self.dataformat)) - - - -def _name2key(name): - if PY2: - return name - return name.decode("latin-1") - - -def _key2name(key): - if PY2: - return key - return key.encode("latin-1") - - -def _find_padding(atom_path): - # Check for padding "free" atom - # XXX: we only use them if they are adjacent to ilst, and only one. - # and there also is a top level free atom which we could use maybe..? - - meta, ilst = atom_path[-2:] - assert meta.name == b"meta" and ilst.name == b"ilst" - index = meta.children.index(ilst) - try: - prev = meta.children[index - 1] - if prev.name == b"free": - return prev - except IndexError: - pass - - try: - next_ = meta.children[index + 1] - if next_.name == b"free": - return next_ - except IndexError: - pass - - -class MP4Tags(DictProxy, Metadata): - r"""Dictionary containing Apple iTunes metadata list key/values. - - Keys are four byte identifiers, except for freeform ('----') - keys. Values are usually unicode strings, but some atoms have a - special structure: - - Text values (multiple values per key are supported): - - * '\\xa9nam' -- track title - * '\\xa9alb' -- album - * '\\xa9ART' -- artist - * 'aART' -- album artist - * '\\xa9wrt' -- composer - * '\\xa9day' -- year - * '\\xa9cmt' -- comment - * 'desc' -- description (usually used in podcasts) - * 'purd' -- purchase date - * '\\xa9grp' -- grouping - * '\\xa9gen' -- genre - * '\\xa9lyr' -- lyrics - * 'purl' -- podcast URL - * 'egid' -- podcast episode GUID - * 'catg' -- podcast category - * 'keyw' -- podcast keywords - * '\\xa9too' -- encoded by - * 'cprt' -- copyright - * 'soal' -- album sort order - * 'soaa' -- album artist sort order - * 'soar' -- artist sort order - * 'sonm' -- title sort order - * 'soco' -- composer sort order - * 'sosn' -- show sort order - * 'tvsh' -- show name - - Boolean values: - - * 'cpil' -- part of a compilation - * 'pgap' -- part of a gapless album - * 'pcst' -- podcast (iTunes reads this only on import) - - Tuples of ints (multiple values per key are supported): - - * 'trkn' -- track number, total tracks - * 'disk' -- disc number, total discs - - Others: - - * 'tmpo' -- tempo/BPM, 16 bit int - * 'covr' -- cover artwork, list of MP4Cover objects (which are - tagged strs) - * 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead. - - The freeform '----' frames use a key in the format '----:mean:name' - where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique - identifier for this frame. The value is a str, but is probably - text that can be decoded as UTF-8. Multiple values per key are - supported. - - MP4 tag data cannot exist outside of the structure of an MP4 file, - so this class should not be manually instantiated. - - Unknown non-text tags and tags that failed to parse will be written - back as is. - """ - - def __init__(self, *args, **kwargs): - self._failed_atoms = {} - super(MP4Tags, self).__init__(*args, **kwargs) - - def load(self, atoms, fileobj): - try: - path = atoms.path(b"moov", b"udta", b"meta", b"ilst") - except KeyError as key: - raise MP4MetadataError(key) - - free = _find_padding(path) - self._padding = free.datalength if free is not None else 0 - - ilst = path[-1] - for atom in ilst.children: - ok, data = atom.read(fileobj) - if not ok: - raise MP4MetadataError("Not enough data") - - try: - if atom.name in self.__atoms: - info = self.__atoms[atom.name] - info[0](self, atom, data) - else: - # unknown atom, try as text - self.__parse_text(atom, data, implicit=False) - except MP4MetadataError: - # parsing failed, save them so we can write them back - key = _name2key(atom.name) - self._failed_atoms.setdefault(key, []).append(data) - - def __setitem__(self, key, value): - if not isinstance(key, str): - raise TypeError("key has to be str") - super(MP4Tags, self).__setitem__(key, value) - - @classmethod - def _can_load(cls, atoms): - return b"moov.udta.meta.ilst" in atoms - - @staticmethod - def _key_sort(item): - (key, v) = item - # iTunes always writes the tags in order of "relevance", try - # to copy it as closely as possible. - order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb", - "\xa9gen", "gnre", "trkn", "disk", - "\xa9day", "cpil", "pgap", "pcst", "tmpo", - "\xa9too", "----", "covr", "\xa9lyr"] - order = dict(izip(order, xrange(len(order)))) - last = len(order) - # If there's no key-based way to distinguish, order by length. - # If there's still no way, go by string comparison on the - # values, so we at least have something determinstic. - return (order.get(key[:4], last), len(repr(v)), repr(v)) - - def save(self, filename, padding=None): - """Save the metadata to the given filename.""" - - values = [] - items = sorted(self.items(), key=self._key_sort) - for key, value in items: - atom_name = _key2name(key)[:4] - if atom_name in self.__atoms: - render_func = self.__atoms[atom_name][1] - else: - render_func = type(self).__render_text - - try: - values.append(render_func(self, key, value)) - except (TypeError, ValueError) as s: - reraise(MP4MetadataValueError, s, sys.exc_info()[2]) - - for key, failed in iteritems(self._failed_atoms): - # don't write atoms back if we have added a new one with - # the same name, this excludes freeform which can have - # multiple atoms with the same key (most parsers seem to be able - # to handle that) - if key in self: - assert _key2name(key) != b"----" - continue - for data in failed: - values.append(Atom.render(_key2name(key), data)) - - data = Atom.render(b"ilst", b"".join(values)) - - # Find the old atoms. - with open(filename, "rb+") as fileobj: - try: - atoms = Atoms(fileobj) - except AtomError as err: - reraise(error, err, sys.exc_info()[2]) - - self.__save(fileobj, atoms, data, padding) - - def __save(self, fileobj, atoms, data, padding): - try: - path = atoms.path(b"moov", b"udta", b"meta", b"ilst") - except KeyError: - self.__save_new(fileobj, atoms, data, padding) - else: - self.__save_existing(fileobj, atoms, path, data, padding) - - def __pad_ilst(self, data, length=None): - if length is None: - length = ((len(data) + 1023) & ~1023) - len(data) - return Atom.render(b"free", b"\x00" * length) - - def __save_new(self, fileobj, atoms, ilst_data, padding_func): - hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9) - meta_data = b"\x00\x00\x00\x00" + hdlr + ilst_data - - try: - path = atoms.path(b"moov", b"udta") - except KeyError: - path = atoms.path(b"moov") - - offset = path[-1]._dataoffset - - # ignoring some atom overhead... but we don't have padding left anyway - # and padding_size is guaranteed to be less than zero - content_size = get_size(fileobj) - offset - padding_size = -len(meta_data) - assert padding_size < 0 - info = PaddingInfo(padding_size, content_size) - new_padding = info._get_padding(padding_func) - new_padding = min(0xFFFFFFFF, new_padding) - - free = Atom.render(b"free", b"\x00" * new_padding) - meta = Atom.render(b"meta", meta_data + free) - if path[-1].name != b"udta": - # moov.udta not found -- create one - data = Atom.render(b"udta", meta) - else: - data = meta - - insert_bytes(fileobj, len(data), offset) - fileobj.seek(offset) - fileobj.write(data) - self.__update_parents(fileobj, path, len(data)) - self.__update_offsets(fileobj, atoms, len(data), offset) - - def __save_existing(self, fileobj, atoms, path, ilst_data, padding_func): - # Replace the old ilst atom. - ilst = path[-1] - offset = ilst.offset - length = ilst.length - - # Use adjacent free atom if there is one - free = _find_padding(path) - if free is not None: - offset = min(offset, free.offset) - length += free.length - - # Always add a padding atom to make things easier - padding_overhead = len(Atom.render(b"free", b"")) - content_size = get_size(fileobj) - (offset + length) - padding_size = length - (len(ilst_data) + padding_overhead) - info = PaddingInfo(padding_size, content_size) - new_padding = info._get_padding(padding_func) - # Limit padding size so we can be sure the free atom overhead is as we - # calculated above (see Atom.render) - new_padding = min(0xFFFFFFFF, new_padding) - - ilst_data += Atom.render(b"free", b"\x00" * new_padding) - - resize_bytes(fileobj, length, len(ilst_data), offset) - delta = len(ilst_data) - length - - fileobj.seek(offset) - fileobj.write(ilst_data) - self.__update_parents(fileobj, path[:-1], delta) - self.__update_offsets(fileobj, atoms, delta, offset) - - def __update_parents(self, fileobj, path, delta): - """Update all parent atoms with the new size.""" - - if delta == 0: - return - - for atom in path: - fileobj.seek(atom.offset) - size = cdata.uint_be(fileobj.read(4)) - if size == 1: # 64bit - # skip name (4B) and read size (8B) - size = cdata.ulonglong_be(fileobj.read(12)[4:]) - fileobj.seek(atom.offset + 8) - fileobj.write(cdata.to_ulonglong_be(size + delta)) - else: # 32bit - fileobj.seek(atom.offset) - fileobj.write(cdata.to_uint_be(size + delta)) - - def __update_offset_table(self, fileobj, fmt, atom, delta, offset): - """Update offset table in the specified atom.""" - if atom.offset > offset: - atom.offset += delta - fileobj.seek(atom.offset + 12) - data = fileobj.read(atom.length - 12) - fmt = fmt % cdata.uint_be(data[:4]) - offsets = struct.unpack(fmt, data[4:]) - offsets = [o + (0, delta)[offset < o] for o in offsets] - fileobj.seek(atom.offset + 16) - fileobj.write(struct.pack(fmt, *offsets)) - - def __update_tfhd(self, fileobj, atom, delta, offset): - if atom.offset > offset: - atom.offset += delta - fileobj.seek(atom.offset + 9) - data = fileobj.read(atom.length - 9) - flags = cdata.uint_be(b"\x00" + data[:3]) - if flags & 1: - o = cdata.ulonglong_be(data[7:15]) - if o > offset: - o += delta - fileobj.seek(atom.offset + 16) - fileobj.write(cdata.to_ulonglong_be(o)) - - def __update_offsets(self, fileobj, atoms, delta, offset): - """Update offset tables in all 'stco' and 'co64' atoms.""" - if delta == 0: - return - moov = atoms[b"moov"] - for atom in moov.findall(b'stco', True): - self.__update_offset_table(fileobj, ">%dI", atom, delta, offset) - for atom in moov.findall(b'co64', True): - self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset) - try: - for atom in atoms[b"moof"].findall(b'tfhd', True): - self.__update_tfhd(fileobj, atom, delta, offset) - except KeyError: - pass - - def __parse_data(self, atom, data): - pos = 0 - while pos < atom.length - 8: - head = data[pos:pos + 12] - if len(head) != 12: - raise MP4MetadataError("truncated atom % r" % atom.name) - length, name = struct.unpack(">I4s", head[:8]) - version = ord(head[8:9]) - flags = struct.unpack(">I", b"\x00" + head[9:12])[0] - if name != b"data": - raise MP4MetadataError( - "unexpected atom %r inside %r" % (name, atom.name)) - - chunk = data[pos + 16:pos + length] - if len(chunk) != length - 16: - raise MP4MetadataError("truncated atom % r" % atom.name) - yield version, flags, chunk - pos += length - - def __add(self, key, value, single=False): - assert isinstance(key, str) - - if single: - self[key] = value - else: - self.setdefault(key, []).extend(value) - - def __render_data(self, key, version, flags, value): - return Atom.render(_key2name(key), b"".join([ - Atom.render( - b"data", struct.pack(">2I", version << 24 | flags, 0) + data) - for data in value])) - - def __parse_freeform(self, atom, data): - length = cdata.uint_be(data[:4]) - mean = data[12:length] - pos = length - length = cdata.uint_be(data[pos:pos + 4]) - name = data[pos + 12:pos + length] - pos += length - value = [] - while pos < atom.length - 8: - length, atom_name = struct.unpack(">I4s", data[pos:pos + 8]) - if atom_name != b"data": - raise MP4MetadataError( - "unexpected atom %r inside %r" % (atom_name, atom.name)) - - version = ord(data[pos + 8:pos + 8 + 1]) - flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0] - value.append(MP4FreeForm(data[pos + 16:pos + length], - dataformat=flags, version=version)) - pos += length - - key = _name2key(atom.name + b":" + mean + b":" + name) - self.__add(key, value) - - def __render_freeform(self, key, value): - if isinstance(value, bytes): - value = [value] - - dummy, mean, name = _key2name(key).split(b":", 2) - mean = struct.pack(">I4sI", len(mean) + 12, b"mean", 0) + mean - name = struct.pack(">I4sI", len(name) + 12, b"name", 0) + name - - data = b"" - for v in value: - flags = AtomDataType.UTF8 - version = 0 - if isinstance(v, MP4FreeForm): - flags = v.dataformat - version = v.version - - data += struct.pack( - ">I4s2I", len(v) + 16, b"data", version << 24 | flags, 0) - data += v - - return Atom.render(b"----", mean + name + data) - - def __parse_pair(self, atom, data): - key = _name2key(atom.name) - values = [struct.unpack(">2H", d[2:6]) for - version, flags, d in self.__parse_data(atom, data)] - self.__add(key, values) - - def __render_pair(self, key, value): - data = [] - for (track, total) in value: - if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: - data.append(struct.pack(">4H", 0, track, total, 0)) - else: - raise MP4MetadataValueError( - "invalid numeric pair %r" % ((track, total),)) - return self.__render_data(key, 0, AtomDataType.IMPLICIT, data) - - def __render_pair_no_trailing(self, key, value): - data = [] - for (track, total) in value: - if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: - data.append(struct.pack(">3H", 0, track, total)) - else: - raise MP4MetadataValueError( - "invalid numeric pair %r" % ((track, total),)) - return self.__render_data(key, 0, AtomDataType.IMPLICIT, data) - - def __parse_genre(self, atom, data): - values = [] - for version, flags, data in self.__parse_data(atom, data): - # version = 0, flags = 0 - if len(data) != 2: - raise MP4MetadataValueError("invalid genre") - genre = cdata.short_be(data) - # Translate to a freeform genre. - try: - genre = GENRES[genre - 1] - except IndexError: - # this will make us write it back at least - raise MP4MetadataValueError("unknown genre") - values.append(genre) - key = _name2key(b"\xa9gen") - self.__add(key, values) - - def __parse_tempo(self, atom, data): - values = [] - for version, flags, data in self.__parse_data(atom, data): - # version = 0, flags = 0 or 21 - if len(data) != 2: - raise MP4MetadataValueError("invalid tempo") - values.append(cdata.ushort_be(data)) - key = _name2key(atom.name) - self.__add(key, values) - - def __render_tempo(self, key, value): - try: - if len(value) == 0: - return self.__render_data(key, 0, AtomDataType.INTEGER, b"") - - if (min(value) < 0) or (max(value) >= 2 ** 16): - raise MP4MetadataValueError( - "invalid 16 bit integers: %r" % value) - except TypeError: - raise MP4MetadataValueError( - "tmpo must be a list of 16 bit integers") - - values = [cdata.to_ushort_be(v) for v in value] - return self.__render_data(key, 0, AtomDataType.INTEGER, values) - - def __parse_bool(self, atom, data): - for version, flags, data in self.__parse_data(atom, data): - if len(data) != 1: - raise MP4MetadataValueError("invalid bool") - - value = bool(ord(data)) - key = _name2key(atom.name) - self.__add(key, value, single=True) - - def __render_bool(self, key, value): - return self.__render_data( - key, 0, AtomDataType.INTEGER, [chr_(bool(value))]) - - def __parse_cover(self, atom, data): - values = [] - pos = 0 - while pos < atom.length - 8: - length, name, imageformat = struct.unpack(">I4sI", - data[pos:pos + 12]) - if name != b"data": - if name == b"name": - pos += length - continue - raise MP4MetadataError( - "unexpected atom %r inside 'covr'" % name) - if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG): - # Sometimes AtomDataType.IMPLICIT or simply wrong. - # In all cases it was jpeg, so default to it - imageformat = MP4Cover.FORMAT_JPEG - cover = MP4Cover(data[pos + 16:pos + length], imageformat) - values.append(cover) - pos += length - - key = _name2key(atom.name) - self.__add(key, values) - - def __render_cover(self, key, value): - atom_data = [] - for cover in value: - try: - imageformat = cover.imageformat - except AttributeError: - imageformat = MP4Cover.FORMAT_JPEG - atom_data.append(Atom.render( - b"data", struct.pack(">2I", imageformat, 0) + cover)) - return Atom.render(_key2name(key), b"".join(atom_data)) - - def __parse_text(self, atom, data, implicit=True): - # implicit = False, for parsing unknown atoms only take utf8 ones. - # For known ones we can assume the implicit are utf8 too. - values = [] - for version, flags, atom_data in self.__parse_data(atom, data): - if implicit: - if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8): - raise MP4MetadataError( - "Unknown atom type %r for %r" % (flags, atom.name)) - else: - if flags != AtomDataType.UTF8: - raise MP4MetadataError( - "%r is not text, ignore" % atom.name) - - try: - text = atom_data.decode("utf-8") - except UnicodeDecodeError as e: - raise MP4MetadataError("%s: %s" % (_name2key(atom.name), e)) - - values.append(text) - - key = _name2key(atom.name) - self.__add(key, values) - - def __render_text(self, key, value, flags=AtomDataType.UTF8): - if isinstance(value, string_types): - value = [value] - - encoded = [] - for v in value: - if not isinstance(v, text_type): - if PY3: - raise TypeError("%r not str" % v) - v = v.decode("utf-8") - encoded.append(v.encode("utf-8")) - - return self.__render_data(key, 0, flags, encoded) - - def delete(self, filename): - """Remove the metadata from the given filename.""" - - self._failed_atoms.clear() - self.clear() - self.save(filename, padding=lambda x: 0) - - __atoms = { - b"----": (__parse_freeform, __render_freeform), - b"trkn": (__parse_pair, __render_pair), - b"disk": (__parse_pair, __render_pair_no_trailing), - b"gnre": (__parse_genre, None), - b"tmpo": (__parse_tempo, __render_tempo), - b"cpil": (__parse_bool, __render_bool), - b"pgap": (__parse_bool, __render_bool), - b"pcst": (__parse_bool, __render_bool), - b"covr": (__parse_cover, __render_cover), - b"purl": (__parse_text, __render_text), - b"egid": (__parse_text, __render_text), - } - - # these allow implicit flags and parse as text - for name in [b"\xa9nam", b"\xa9alb", b"\xa9ART", b"aART", b"\xa9wrt", - b"\xa9day", b"\xa9cmt", b"desc", b"purd", b"\xa9grp", - b"\xa9gen", b"\xa9lyr", b"catg", b"keyw", b"\xa9too", - b"cprt", b"soal", b"soaa", b"soar", b"sonm", b"soco", - b"sosn", b"tvsh"]: - __atoms[name] = (__parse_text, __render_text) - - def pprint(self): - - def to_line(key, value): - assert isinstance(key, text_type) - if isinstance(value, text_type): - return u"%s=%s" % (key, value) - return u"%s=%r" % (key, value) - - values = [] - for key, value in sorted(iteritems(self)): - if not isinstance(key, text_type): - key = key.decode("latin-1") - if key == "covr": - values.append(u"%s=%s" % (key, u", ".join( - [u"[%d bytes of data]" % len(data) for data in value]))) - elif isinstance(value, list): - for v in value: - values.append(to_line(key, v)) - else: - values.append(to_line(key, value)) - return u"\n".join(values) - - -class MP4Info(StreamInfo): - """MPEG-4 stream information. - - Attributes: - - * bitrate -- bitrate in bits per second, as an int - * length -- file length in seconds, as a float - * channels -- number of audio channels - * sample_rate -- audio sampling rate in Hz - * bits_per_sample -- bits per sample - * codec (string): - * if starting with ``"mp4a"`` uses an mp4a audio codec - (see the codec parameter in rfc6381 for details e.g. ``"mp4a.40.2"``) - * for everything else see a list of possible values at - http://www.mp4ra.org/codecs.html - - e.g. ``"mp4a"``, ``"alac"``, ``"mp4a.40.2"``, ``"ac-3"`` etc. - * codec_description (string): - Name of the codec used (ALAC, AAC LC, AC-3...). Values might change in - the future, use for display purposes only. - """ - - bitrate = 0 - channels = 0 - sample_rate = 0 - bits_per_sample = 0 - codec = u"" - codec_name = u"" - - def __init__(self, atoms, fileobj): - try: - moov = atoms[b"moov"] - except KeyError: - raise MP4StreamInfoError("not a MP4 file") - - for trak in moov.findall(b"trak"): - hdlr = trak[b"mdia", b"hdlr"] - ok, data = hdlr.read(fileobj) - if not ok: - raise MP4StreamInfoError("Not enough data") - if data[8:12] == b"soun": - break - else: - raise MP4StreamInfoError("track has no audio data") - - mdhd = trak[b"mdia", b"mdhd"] - ok, data = mdhd.read(fileobj) - if not ok: - raise MP4StreamInfoError("Not enough data") - - try: - version, flags, data = parse_full_atom(data) - except ValueError as e: - raise MP4StreamInfoError(e) - - if version == 0: - offset = 8 - fmt = ">2I" - elif version == 1: - offset = 16 - fmt = ">IQ" - else: - raise MP4StreamInfoError("Unknown mdhd version %d" % version) - - end = offset + struct.calcsize(fmt) - unit, length = struct.unpack(fmt, data[offset:end]) - try: - self.length = float(length) / unit - except ZeroDivisionError: - self.length = 0 - - try: - atom = trak[b"mdia", b"minf", b"stbl", b"stsd"] - except KeyError: - pass - else: - self._parse_stsd(atom, fileobj) - - def _parse_stsd(self, atom, fileobj): - """Sets channels, bits_per_sample, sample_rate and optionally bitrate. - - Can raise MP4StreamInfoError. - """ - - assert atom.name == b"stsd" - - ok, data = atom.read(fileobj) - if not ok: - raise MP4StreamInfoError("Invalid stsd") - - try: - version, flags, data = parse_full_atom(data) - except ValueError as e: - raise MP4StreamInfoError(e) - - if version != 0: - raise MP4StreamInfoError("Unsupported stsd version") - - try: - num_entries, offset = cdata.uint32_be_from(data, 0) - except cdata.error as e: - raise MP4StreamInfoError(e) - - if num_entries == 0: - return - - # look at the first entry if there is one - entry_fileobj = cBytesIO(data[offset:]) - try: - entry_atom = Atom(entry_fileobj) - except AtomError as e: - raise MP4StreamInfoError(e) - - try: - entry = AudioSampleEntry(entry_atom, entry_fileobj) - except ASEntryError as e: - raise MP4StreamInfoError(e) - else: - self.channels = entry.channels - self.bits_per_sample = entry.sample_size - self.sample_rate = entry.sample_rate - self.bitrate = entry.bitrate - self.codec = entry.codec - self.codec_description = entry.codec_description - - def pprint(self): - return "MPEG-4 audio (%s), %.2f seconds, %d bps" % ( - self.codec_description, self.length, self.bitrate) - - -class MP4(FileType): - """An MPEG-4 audio file, probably containing AAC. - - If more than one track is present in the file, the first is used. - Only audio ('soun') tracks will be read. - - :ivar info: :class:`MP4Info` - :ivar tags: :class:`MP4Tags` - """ - - MP4Tags = MP4Tags - - _mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"] - - def load(self, filename): - self.filename = filename - with open(filename, "rb") as fileobj: - try: - atoms = Atoms(fileobj) - except AtomError as err: - reraise(error, err, sys.exc_info()[2]) - - try: - self.info = MP4Info(atoms, fileobj) - except error: - raise - except Exception as err: - reraise(MP4StreamInfoError, err, sys.exc_info()[2]) - - if not MP4Tags._can_load(atoms): - self.tags = None - self._padding = 0 - else: - try: - self.tags = self.MP4Tags(atoms, fileobj) - except error: - raise - except Exception as err: - reraise(MP4MetadataError, err, sys.exc_info()[2]) - else: - self._padding = self.tags._padding - - def add_tags(self): - if self.tags is None: - self.tags = self.MP4Tags() - else: - raise error("an MP4 tag already exists") - - @staticmethod - def score(filename, fileobj, header_data): - return (b"ftyp" in header_data) + (b"mp4" in header_data) - - -Open = MP4 - - -def delete(filename): - """Remove tags from a file.""" - - MP4(filename).delete() diff --git a/resources/lib/libraries/mutagen/mp4/__pycache__/__init__.cpython-35.pyc b/resources/lib/libraries/mutagen/mp4/__pycache__/__init__.cpython-35.pyc deleted file mode 100644 index de968da9..00000000 Binary files a/resources/lib/libraries/mutagen/mp4/__pycache__/__init__.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/mp4/__pycache__/_as_entry.cpython-35.pyc b/resources/lib/libraries/mutagen/mp4/__pycache__/_as_entry.cpython-35.pyc deleted file mode 100644 index 31483c46..00000000 Binary files a/resources/lib/libraries/mutagen/mp4/__pycache__/_as_entry.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/mp4/__pycache__/_atom.cpython-35.pyc b/resources/lib/libraries/mutagen/mp4/__pycache__/_atom.cpython-35.pyc deleted file mode 100644 index f4abf385..00000000 Binary files a/resources/lib/libraries/mutagen/mp4/__pycache__/_atom.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/mp4/__pycache__/_util.cpython-35.pyc b/resources/lib/libraries/mutagen/mp4/__pycache__/_util.cpython-35.pyc deleted file mode 100644 index e7df30cc..00000000 Binary files a/resources/lib/libraries/mutagen/mp4/__pycache__/_util.cpython-35.pyc and /dev/null differ diff --git a/resources/lib/libraries/mutagen/mp4/_as_entry.py b/resources/lib/libraries/mutagen/mp4/_as_entry.py deleted file mode 100644 index 306d5720..00000000 --- a/resources/lib/libraries/mutagen/mp4/_as_entry.py +++ /dev/null @@ -1,542 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2014 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -from mutagen._compat import cBytesIO, xrange -from mutagen.aac import ProgramConfigElement -from mutagen._util import BitReader, BitReaderError, cdata -from mutagen._compat import text_type -from ._util import parse_full_atom -from ._atom import Atom, AtomError - - -class ASEntryError(Exception): - pass - - -class AudioSampleEntry(object): - """Parses an AudioSampleEntry atom. - - Private API. - - Attrs: - channels (int): number of channels - sample_size (int): sample size in bits - sample_rate (int): sample rate in Hz - bitrate (int): bits per second (0 means unknown) - codec (string): - audio codec, either 'mp4a[.*][.*]' (rfc6381) or 'alac' - codec_description (string): descriptive codec name e.g. "AAC LC+SBR" - - Can raise ASEntryError. - """ - - channels = 0 - sample_size = 0 - sample_rate = 0 - bitrate = 0 - codec = None - codec_description = None - - def __init__(self, atom, fileobj): - ok, data = atom.read(fileobj) - if not ok: - raise ASEntryError("too short %r atom" % atom.name) - - fileobj = cBytesIO(data) - r = BitReader(fileobj) - - try: - # SampleEntry - r.skip(6 * 8) # reserved - r.skip(2 * 8) # data_ref_index - - # AudioSampleEntry - r.skip(8 * 8) # reserved - self.channels = r.bits(16) - self.sample_size = r.bits(16) - r.skip(2 * 8) # pre_defined - r.skip(2 * 8) # reserved - self.sample_rate = r.bits(32) >> 16 - except BitReaderError as e: - raise ASEntryError(e) - - assert r.is_aligned() - - try: - extra = Atom(fileobj) - except AtomError as e: - raise ASEntryError(e) - - self.codec = atom.name.decode("latin-1") - self.codec_description = None - - if atom.name == b"mp4a" and extra.name == b"esds": - self._parse_esds(extra, fileobj) - elif atom.name == b"alac" and extra.name == b"alac": - self._parse_alac(extra, fileobj) - elif atom.name == b"ac-3" and extra.name == b"dac3": - self._parse_dac3(extra, fileobj) - - if self.codec_description is None: - self.codec_description = self.codec.upper() - - def _parse_dac3(self, atom, fileobj): - # ETSI TS 102 366 - - assert atom.name == b"dac3" - - ok, data = atom.read(fileobj) - if not ok: - raise ASEntryError("truncated %s atom" % atom.name) - fileobj = cBytesIO(data) - r = BitReader(fileobj) - - # sample_rate in AudioSampleEntry covers values in - # fscod2 and not just fscod, so ignore fscod here. - try: - r.skip(2 + 5 + 3) # fscod, bsid, bsmod - acmod = r.bits(3) - lfeon = r.bits(1) - bit_rate_code = r.bits(5) - r.skip(5) # reserved - except BitReaderError as e: - raise ASEntryError(e) - - self.channels = [2, 1, 2, 3, 3, 4, 4, 5][acmod] + lfeon - - try: - self.bitrate = [ - 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, - 224, 256, 320, 384, 448, 512, 576, 640][bit_rate_code] * 1000 - except IndexError: - pass - - def _parse_alac(self, atom, fileobj): - # https://alac.macosforge.org/trac/browser/trunk/ - # ALACMagicCookieDescription.txt - - assert atom.name == b"alac" - - ok, data = atom.read(fileobj) - if not ok: - raise ASEntryError("truncated %s atom" % atom.name) - - try: - version, flags, data = parse_full_atom(data) - except ValueError as e: - raise ASEntryError(e) - - if version != 0: - raise ASEntryError("Unsupported version %d" % version) - - fileobj = cBytesIO(data) - r = BitReader(fileobj) - - try: - # for some files the AudioSampleEntry values default to 44100/2chan - # and the real info is in the alac cookie, so prefer it - r.skip(32) # frameLength - compatibleVersion = r.bits(8) - if compatibleVersion != 0: - return - self.sample_size = r.bits(8) - r.skip(8 + 8 + 8) - self.channels = r.bits(8) - r.skip(16 + 32) - self.bitrate = r.bits(32) - self.sample_rate = r.bits(32) - except BitReaderError as e: - raise ASEntryError(e) - - def _parse_esds(self, esds, fileobj): - assert esds.name == b"esds" - - ok, data = esds.read(fileobj) - if not ok: - raise ASEntryError("truncated %s atom" % esds.name) - - try: - version, flags, data = parse_full_atom(data) - except ValueError as e: - raise ASEntryError(e) - - if version != 0: - raise ASEntryError("Unsupported version %d" % version) - - fileobj = cBytesIO(data) - r = BitReader(fileobj) - - try: - tag = r.bits(8) - if tag != ES_Descriptor.TAG: - raise ASEntryError("unexpected descriptor: %d" % tag) - assert r.is_aligned() - except BitReaderError as e: - raise ASEntryError(e) - - try: - decSpecificInfo = ES_Descriptor.parse(fileobj) - except DescriptorError as e: - raise ASEntryError(e) - dec_conf_desc = decSpecificInfo.decConfigDescr - - self.bitrate = dec_conf_desc.avgBitrate - self.codec += dec_conf_desc.codec_param - self.codec_description = dec_conf_desc.codec_desc - - decSpecificInfo = dec_conf_desc.decSpecificInfo - if decSpecificInfo is not None: - if decSpecificInfo.channels != 0: - self.channels = decSpecificInfo.channels - - if decSpecificInfo.sample_rate != 0: - self.sample_rate = decSpecificInfo.sample_rate - - -class DescriptorError(Exception): - pass - - -class BaseDescriptor(object): - - TAG = None - - @classmethod - def _parse_desc_length_file(cls, fileobj): - """May raise ValueError""" - - value = 0 - for i in xrange(4): - try: - b = cdata.uint8(fileobj.read(1)) - except cdata.error as e: - raise ValueError(e) - value = (value << 7) | (b & 0x7f) - if not b >> 7: - break - else: - raise ValueError("invalid descriptor length") - - return value - - @classmethod - def parse(cls, fileobj): - """Returns a parsed instance of the called type. - The file position is right after the descriptor after this returns. - - Raises DescriptorError - """ - - try: - length = cls._parse_desc_length_file(fileobj) - except ValueError as e: - raise DescriptorError(e) - pos = fileobj.tell() - instance = cls(fileobj, length) - left = length - (fileobj.tell() - pos) - if left < 0: - raise DescriptorError("descriptor parsing read too much data") - fileobj.seek(left, 1) - return instance - - -class ES_Descriptor(BaseDescriptor): - - TAG = 0x3 - - def __init__(self, fileobj, length): - """Raises DescriptorError""" - - r = BitReader(fileobj) - try: - self.ES_ID = r.bits(16) - self.streamDependenceFlag = r.bits(1) - self.URL_Flag = r.bits(1) - self.OCRstreamFlag = r.bits(1) - self.streamPriority = r.bits(5) - if self.streamDependenceFlag: - self.dependsOn_ES_ID = r.bits(16) - if self.URL_Flag: - URLlength = r.bits(8) - self.URLstring = r.bytes(URLlength) - if self.OCRstreamFlag: - self.OCR_ES_Id = r.bits(16) - - tag = r.bits(8) - except BitReaderError as e: - raise DescriptorError(e) - - if tag != DecoderConfigDescriptor.TAG: - raise DescriptorError("unexpected DecoderConfigDescrTag %d" % tag) - - assert r.is_aligned() - self.decConfigDescr = DecoderConfigDescriptor.parse(fileobj) - - -class DecoderConfigDescriptor(BaseDescriptor): - - TAG = 0x4 - - decSpecificInfo = None - """A DecoderSpecificInfo, optional""" - - def __init__(self, fileobj, length): - """Raises DescriptorError""" - - r = BitReader(fileobj) - - try: - self.objectTypeIndication = r.bits(8) - self.streamType = r.bits(6) - self.upStream = r.bits(1) - self.reserved = r.bits(1) - self.bufferSizeDB = r.bits(24) - self.maxBitrate = r.bits(32) - self.avgBitrate = r.bits(32) - - if (self.objectTypeIndication, self.streamType) != (0x40, 0x5): - return - - # all from here is optional - if length * 8 == r.get_position(): - return - - tag = r.bits(8) - except BitReaderError as e: - raise DescriptorError(e) - - if tag == DecoderSpecificInfo.TAG: - assert r.is_aligned() - self.decSpecificInfo = DecoderSpecificInfo.parse(fileobj) - - @property - def codec_param(self): - """string""" - - param = u".%X" % self.objectTypeIndication - info = self.decSpecificInfo - if info is not None: - param += u".%d" % info.audioObjectType - return param - - @property - def codec_desc(self): - """string or None""" - - info = self.decSpecificInfo - desc = None - if info is not None: - desc = info.description - return desc - - -class DecoderSpecificInfo(BaseDescriptor): - - TAG = 0x5 - - _TYPE_NAMES = [ - None, "AAC MAIN", "AAC LC", "AAC SSR", "AAC LTP", "SBR", - "AAC scalable", "TwinVQ", "CELP", "HVXC", None, None, "TTSI", - "Main synthetic", "Wavetable synthesis", "General MIDI", - "Algorithmic Synthesis and Audio FX", "ER AAC LC", None, "ER AAC LTP", - "ER AAC scalable", "ER Twin VQ", "ER BSAC", "ER AAC LD", "ER CELP", - "ER HVXC", "ER HILN", "ER Parametric", "SSC", "PS", "MPEG Surround", - None, "Layer-1", "Layer-2", "Layer-3", "DST", "ALS", "SLS", - "SLS non-core", "ER AAC ELD", "SMR Simple", "SMR Main", "USAC", - "SAOC", "LD MPEG Surround", "USAC" - ] - - _FREQS = [ - 96000, 88200, 64000, 48000, 44100, 32000, 24000, 22050, 16000, - 12000, 11025, 8000, 7350, - ] - - @property - def description(self): - """string or None if unknown""" - - name = None - try: - name = self._TYPE_NAMES[self.audioObjectType] - except IndexError: - pass - if name is None: - return - if self.sbrPresentFlag == 1: - name += "+SBR" - if self.psPresentFlag == 1: - name += "+PS" - return text_type(name) - - @property - def sample_rate(self): - """0 means unknown""" - - if self.sbrPresentFlag == 1: - return self.extensionSamplingFrequency - elif self.sbrPresentFlag == 0: - return self.samplingFrequency - else: - # these are all types that support SBR - aot_can_sbr = (1, 2, 3, 4, 6, 17, 19, 20, 22) - if self.audioObjectType not in aot_can_sbr: - return self.samplingFrequency - # there shouldn't be SBR for > 48KHz - if self.samplingFrequency > 24000: - return self.samplingFrequency - # either samplingFrequency or samplingFrequency * 2 - return 0 - - @property - def channels(self): - """channel count or 0 for unknown""" - - # from ProgramConfigElement() - if hasattr(self, "pce_channels"): - return self.pce_channels - - conf = getattr( - self, "extensionChannelConfiguration", self.channelConfiguration) - - if conf == 1: - if self.psPresentFlag == -1: - return 0 - elif self.psPresentFlag == 1: - return 2 - else: - return 1 - elif conf == 7: - return 8 - elif conf > 7: - return 0 - else: - return conf - - def _get_audio_object_type(self, r): - """Raises BitReaderError""" - - audioObjectType = r.bits(5) - if audioObjectType == 31: - audioObjectTypeExt = r.bits(6) - audioObjectType = 32 + audioObjectTypeExt - return audioObjectType - - def _get_sampling_freq(self, r): - """Raises BitReaderError""" - - samplingFrequencyIndex = r.bits(4) - if samplingFrequencyIndex == 0xf: - samplingFrequency = r.bits(24) - else: - try: - samplingFrequency = self._FREQS[samplingFrequencyIndex] - except IndexError: - samplingFrequency = 0 - return samplingFrequency - - def __init__(self, fileobj, length): - """Raises DescriptorError""" - - r = BitReader(fileobj) - try: - self._parse(r, length) - except BitReaderError as e: - raise DescriptorError(e) - - def _parse(self, r, length): - """Raises BitReaderError""" - - def bits_left(): - return length * 8 - r.get_position() - - self.audioObjectType = self._get_audio_object_type(r) - self.samplingFrequency = self._get_sampling_freq(r) - self.channelConfiguration = r.bits(4) - - self.sbrPresentFlag = -1 - self.psPresentFlag = -1 - if self.audioObjectType in (5, 29): - self.extensionAudioObjectType = 5 - self.sbrPresentFlag = 1 - if self.audioObjectType == 29: - self.psPresentFlag = 1 - self.extensionSamplingFrequency = self._get_sampling_freq(r) - self.audioObjectType = self._get_audio_object_type(r) - if self.audioObjectType == 22: - self.extensionChannelConfiguration = r.bits(4) - else: - self.extensionAudioObjectType = 0 - - if self.audioObjectType in (1, 2, 3, 4, 6, 7, 17, 19, 20, 21, 22, 23): - try: - GASpecificConfig(r, self) - except NotImplementedError: - # unsupported, (warn?) - return - else: - # unsupported - return - - if self.audioObjectType in ( - 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 39): - epConfig = r.bits(2) - if epConfig in (2, 3): - # unsupported - return - - if self.extensionAudioObjectType != 5 and bits_left() >= 16: - syncExtensionType = r.bits(11) - if syncExtensionType == 0x2b7: - self.extensionAudioObjectType = self._get_audio_object_type(r) - - if self.extensionAudioObjectType == 5: - self.sbrPresentFlag = r.bits(1) - if self.sbrPresentFlag == 1: - self.extensionSamplingFrequency = \ - self._get_sampling_freq(r) - if bits_left() >= 12: - syncExtensionType = r.bits(11) - if syncExtensionType == 0x548: - self.psPresentFlag = r.bits(1) - - if self.extensionAudioObjectType == 22: - self.sbrPresentFlag = r.bits(1) - if self.sbrPresentFlag == 1: - self.extensionSamplingFrequency = \ - self._get_sampling_freq(r) - self.extensionChannelConfiguration = r.bits(4) - - -def GASpecificConfig(r, info): - """Reads GASpecificConfig which is needed to get the data after that - (there is no length defined to skip it) and to read program_config_element - which can contain channel counts. - - May raise BitReaderError on error or - NotImplementedError if some reserved data was set. - """ - - assert isinstance(info, DecoderSpecificInfo) - - r.skip(1) # frameLengthFlag - dependsOnCoreCoder = r.bits(1) - if dependsOnCoreCoder: - r.skip(14) - extensionFlag = r.bits(1) - if not info.channelConfiguration: - pce = ProgramConfigElement(r) - info.pce_channels = pce.channels - if info.audioObjectType == 6 or info.audioObjectType == 20: - r.skip(3) - if extensionFlag: - if info.audioObjectType == 22: - r.skip(5 + 11) - if info.audioObjectType in (17, 19, 20, 23): - r.skip(1 + 1 + 1) - extensionFlag3 = r.bits(1) - if extensionFlag3 != 0: - raise NotImplementedError("extensionFlag3 set") diff --git a/resources/lib/libraries/mutagen/mp4/_atom.py b/resources/lib/libraries/mutagen/mp4/_atom.py deleted file mode 100644 index f73eb556..00000000 --- a/resources/lib/libraries/mutagen/mp4/_atom.py +++ /dev/null @@ -1,194 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -import struct - -from mutagen._compat import PY2 - -# This is not an exhaustive list of container atoms, but just the -# ones this module needs to peek inside. -_CONTAINERS = [b"moov", b"udta", b"trak", b"mdia", b"meta", b"ilst", - b"stbl", b"minf", b"moof", b"traf"] -_SKIP_SIZE = {b"meta": 4} - - -class AtomError(Exception): - pass - - -class Atom(object): - """An individual atom. - - Attributes: - children -- list child atoms (or None for non-container atoms) - length -- length of this atom, including length and name - datalength = -- length of this atom without length, name - name -- four byte name of the atom, as a str - offset -- location in the constructor-given fileobj of this atom - - This structure should only be used internally by Mutagen. - """ - - children = None - - def __init__(self, fileobj, level=0): - """May raise AtomError""" - - self.offset = fileobj.tell() - try: - self.length, self.name = struct.unpack(">I4s", fileobj.read(8)) - except struct.error: - raise AtomError("truncated data") - self._dataoffset = self.offset + 8 - if self.length == 1: - try: - self.length, = struct.unpack(">Q", fileobj.read(8)) - except struct.error: - raise AtomError("truncated data") - self._dataoffset += 8 - if self.length < 16: - raise AtomError( - "64 bit atom length can only be 16 and higher") - elif self.length == 0: - if level != 0: - raise AtomError( - "only a top-level atom can have zero length") - # Only the last atom is supposed to have a zero-length, meaning it - # extends to the end of file. - fileobj.seek(0, 2) - self.length = fileobj.tell() - self.offset - fileobj.seek(self.offset + 8, 0) - elif self.length < 8: - raise AtomError( - "atom length can only be 0, 1 or 8 and higher") - - if self.name in _CONTAINERS: - self.children = [] - fileobj.seek(_SKIP_SIZE.get(self.name, 0), 1) - while fileobj.tell() < self.offset + self.length: - self.children.append(Atom(fileobj, level + 1)) - else: - fileobj.seek(self.offset + self.length, 0) - - @property - def datalength(self): - return self.length - (self._dataoffset - self.offset) - - def read(self, fileobj): - """Return if all data could be read and the atom payload""" - - fileobj.seek(self._dataoffset, 0) - data = fileobj.read(self.datalength) - return len(data) == self.datalength, data - - @staticmethod - def render(name, data): - """Render raw atom data.""" - # this raises OverflowError if Py_ssize_t can't handle the atom data - size = len(data) + 8 - if size <= 0xFFFFFFFF: - return struct.pack(">I4s", size, name) + data - else: - return struct.pack(">I4sQ", 1, name, size + 8) + data - - def findall(self, name, recursive=False): - """Recursively find all child atoms by specified name.""" - if self.children is not None: - for child in self.children: - if child.name == name: - yield child - if recursive: - for atom in child.findall(name, True): - yield atom - - def __getitem__(self, remaining): - """Look up a child atom, potentially recursively. - - e.g. atom['udta', 'meta'] => <Atom name='meta' ...> - """ - if not remaining: - return self - elif self.children is None: - raise KeyError("%r is not a container" % self.name) - for child in self.children: - if child.name == remaining[0]: - return child[remaining[1:]] - else: - raise KeyError("%r not found" % remaining[0]) - - def __repr__(self): - cls = self.__class__.__name__ - if self.children is None: - return "<%s name=%r length=%r offset=%r>" % ( - cls, self.name, self.length, self.offset) - else: - children = "\n".join([" " + line for child in self.children - for line in repr(child).splitlines()]) - return "<%s name=%r length=%r offset=%r\n%s>" % ( - cls, self.name, self.length, self.offset, children) - - -class Atoms(object): - """Root atoms in a given file. - - Attributes: - atoms -- a list of top-level atoms as Atom objects - - This structure should only be used internally by Mutagen. - """ - - def __init__(self, fileobj): - self.atoms = [] - fileobj.seek(0, 2) - end = fileobj.tell() - fileobj.seek(0) - while fileobj.tell() + 8 <= end: - self.atoms.append(Atom(fileobj)) - - def path(self, *names): - """Look up and return the complete path of an atom. - - For example, atoms.path('moov', 'udta', 'meta') will return a - list of three atoms, corresponding to the moov, udta, and meta - atoms. - """ - - path = [self] - for name in names: - path.append(path[-1][name, ]) - return path[1:] - - def __contains__(self, names): - try: - self[names] - except KeyError: - return False - return True - - def __getitem__(self, names): - """Look up a child atom. - - 'names' may be a list of atoms (['moov', 'udta']) or a string - specifying the complete path ('moov.udta'). - """ - - if PY2: - if isinstance(names, basestring): - names = names.split(b".") - else: - if isinstance(names, bytes): - names = names.split(b".") - - for child in self.atoms: - if child.name == names[0]: - return child[names[1:]] - else: - raise KeyError("%r not found" % names[0]) - - def __repr__(self): - return "\n".join([repr(child) for child in self.atoms]) diff --git a/resources/lib/libraries/mutagen/mp4/_util.py b/resources/lib/libraries/mutagen/mp4/_util.py deleted file mode 100644 index 9583334a..00000000 --- a/resources/lib/libraries/mutagen/mp4/_util.py +++ /dev/null @@ -1,21 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2014 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -from mutagen._util import cdata - - -def parse_full_atom(data): - """Some atoms are versioned. Split them up in (version, flags, payload). - Can raise ValueError. - """ - - if len(data) < 4: - raise ValueError("not enough data") - - version = ord(data[0:1]) - flags = cdata.uint_be(b"\x00" + data[1:4]) - return version, flags, data[4:] diff --git a/resources/lib/libraries/mutagen/musepack.py b/resources/lib/libraries/mutagen/musepack.py deleted file mode 100644 index 7880958b..00000000 --- a/resources/lib/libraries/mutagen/musepack.py +++ /dev/null @@ -1,270 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Lukas Lalinsky -# Copyright (C) 2012 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Musepack audio streams with APEv2 tags. - -Musepack is an audio format originally based on the MPEG-1 Layer-2 -algorithms. Stream versions 4 through 7 are supported. - -For more information, see http://www.musepack.net/. -""" - -__all__ = ["Musepack", "Open", "delete"] - -import struct - -from ._compat import endswith, xrange -from mutagen import StreamInfo -from mutagen.apev2 import APEv2File, error, delete -from mutagen.id3 import BitPaddedInt -from mutagen._util import cdata - - -class MusepackHeaderError(error): - pass - - -RATES = [44100, 48000, 37800, 32000] - - -def _parse_sv8_int(fileobj, limit=9): - """Reads (max limit) bytes from fileobj until the MSB is zero. - All 7 LSB will be merged to a big endian uint. - - Raises ValueError in case not MSB is zero, or EOFError in - case the file ended before limit is reached. - - Returns (parsed number, number of bytes read) - """ - - num = 0 - for i in xrange(limit): - c = fileobj.read(1) - if len(c) != 1: - raise EOFError - c = bytearray(c) - num = (num << 7) | (c[0] & 0x7F) - if not c[0] & 0x80: - return num, i + 1 - if limit > 0: - raise ValueError - return 0, 0 - - -def _calc_sv8_gain(gain): - # 64.82 taken from mpcdec - return 64.82 - gain / 256.0 - - -def _calc_sv8_peak(peak): - return (10 ** (peak / (256.0 * 20.0)) / 65535.0) - - -class MusepackInfo(StreamInfo): - """Musepack stream information. - - Attributes: - - * channels -- number of audio channels - * length -- file length in seconds, as a float - * sample_rate -- audio sampling rate in Hz - * bitrate -- audio bitrate, in bits per second - * version -- Musepack stream version - - Optional Attributes: - - * title_gain, title_peak -- Replay Gain and peak data for this song - * album_gain, album_peak -- Replay Gain and peak data for this album - - These attributes are only available in stream version 7/8. The - gains are a float, +/- some dB. The peaks are a percentage [0..1] of - the maximum amplitude. This means to get a number comparable to - VorbisGain, you must multiply the peak by 2. - """ - - def __init__(self, fileobj): - header = fileobj.read(4) - if len(header) != 4: - raise MusepackHeaderError("not a Musepack file") - - # Skip ID3v2 tags - if header[:3] == b"ID3": - header = fileobj.read(6) - if len(header) != 6: - raise MusepackHeaderError("not a Musepack file") - size = 10 + BitPaddedInt(header[2:6]) - fileobj.seek(size) - header = fileobj.read(4) - if len(header) != 4: - raise MusepackHeaderError("not a Musepack file") - - if header.startswith(b"MPCK"): - self.__parse_sv8(fileobj) - else: - self.__parse_sv467(fileobj) - - if not self.bitrate and self.length != 0: - fileobj.seek(0, 2) - self.bitrate = int(round(fileobj.tell() * 8 / self.length)) - - def __parse_sv8(self, fileobj): - # SV8 http://trac.musepack.net/trac/wiki/SV8Specification - - key_size = 2 - mandatory_packets = [b"SH", b"RG"] - - def check_frame_key(key): - if ((len(frame_type) != key_size) or - (not b'AA' <= frame_type <= b'ZZ')): - raise MusepackHeaderError("Invalid frame key.") - - frame_type = fileobj.read(key_size) - check_frame_key(frame_type) - - while frame_type not in (b"AP", b"SE") and mandatory_packets: - try: - frame_size, slen = _parse_sv8_int(fileobj) - except (EOFError, ValueError): - raise MusepackHeaderError("Invalid packet size.") - data_size = frame_size - key_size - slen - # packets can be at maximum data_size big and are padded with zeros - - if frame_type == b"SH": - mandatory_packets.remove(frame_type) - self.__parse_stream_header(fileobj, data_size) - elif frame_type == b"RG": - mandatory_packets.remove(frame_type) - self.__parse_replaygain_packet(fileobj, data_size) - else: - fileobj.seek(data_size, 1) - - frame_type = fileobj.read(key_size) - check_frame_key(frame_type) - - if mandatory_packets: - raise MusepackHeaderError("Missing mandatory packets: %s." % - ", ".join(map(repr, mandatory_packets))) - - self.length = float(self.samples) / self.sample_rate - self.bitrate = 0 - - def __parse_stream_header(self, fileobj, data_size): - # skip CRC - fileobj.seek(4, 1) - remaining_size = data_size - 4 - - try: - self.version = bytearray(fileobj.read(1))[0] - except TypeError: - raise MusepackHeaderError("SH packet ended unexpectedly.") - - remaining_size -= 1 - - try: - samples, l1 = _parse_sv8_int(fileobj) - samples_skip, l2 = _parse_sv8_int(fileobj) - except (EOFError, ValueError): - raise MusepackHeaderError( - "SH packet: Invalid sample counts.") - - self.samples = samples - samples_skip - remaining_size -= l1 + l2 - - data = fileobj.read(remaining_size) - if len(data) != remaining_size: - raise MusepackHeaderError("SH packet ended unexpectedly.") - self.sample_rate = RATES[bytearray(data)[0] >> 5] - self.channels = (bytearray(data)[1] >> 4) + 1 - - def __parse_replaygain_packet(self, fileobj, data_size): - data = fileobj.read(data_size) - if data_size < 9: - raise MusepackHeaderError("Invalid RG packet size.") - if len(data) != data_size: - raise MusepackHeaderError("RG packet ended unexpectedly.") - title_gain = cdata.short_be(data[1:3]) - title_peak = cdata.short_be(data[3:5]) - album_gain = cdata.short_be(data[5:7]) - album_peak = cdata.short_be(data[7:9]) - if title_gain: - self.title_gain = _calc_sv8_gain(title_gain) - if title_peak: - self.title_peak = _calc_sv8_peak(title_peak) - if album_gain: - self.album_gain = _calc_sv8_gain(album_gain) - if album_peak: - self.album_peak = _calc_sv8_peak(album_peak) - - def __parse_sv467(self, fileobj): - fileobj.seek(-4, 1) - header = fileobj.read(32) - if len(header) != 32: - raise MusepackHeaderError("not a Musepack file") - - # SV7 - if header.startswith(b"MP+"): - self.version = bytearray(header)[3] & 0xF - if self.version < 7: - raise MusepackHeaderError("not a Musepack file") - frames = cdata.uint_le(header[4:8]) - flags = cdata.uint_le(header[8:12]) - - self.title_peak, self.title_gain = struct.unpack( - "<Hh", header[12:16]) - self.album_peak, self.album_gain = struct.unpack( - "<Hh", header[16:20]) - self.title_gain /= 100.0 - self.album_gain /= 100.0 - self.title_peak /= 65535.0 - self.album_peak /= 65535.0 - - self.sample_rate = RATES[(flags >> 16) & 0x0003] - self.bitrate = 0 - # SV4-SV6 - else: - header_dword = cdata.uint_le(header[0:4]) - self.version = (header_dword >> 11) & 0x03FF - if self.version < 4 or self.version > 6: - raise MusepackHeaderError("not a Musepack file") - self.bitrate = (header_dword >> 23) & 0x01FF - self.sample_rate = 44100 - if self.version >= 5: - frames = cdata.uint_le(header[4:8]) - else: - frames = cdata.ushort_le(header[6:8]) - if self.version < 6: - frames -= 1 - self.channels = 2 - self.length = float(frames * 1152 - 576) / self.sample_rate - - def pprint(self): - rg_data = [] - if hasattr(self, "title_gain"): - rg_data.append(u"%+0.2f (title)" % self.title_gain) - if hasattr(self, "album_gain"): - rg_data.append(u"%+0.2f (album)" % self.album_gain) - rg_data = (rg_data and ", Gain: " + ", ".join(rg_data)) or "" - - return u"Musepack SV%d, %.2f seconds, %d Hz, %d bps%s" % ( - self.version, self.length, self.sample_rate, self.bitrate, rg_data) - - -class Musepack(APEv2File): - _Info = MusepackInfo - _mimes = ["audio/x-musepack", "audio/x-mpc"] - - @staticmethod - def score(filename, fileobj, header): - filename = filename.lower() - - return (header.startswith(b"MP+") + header.startswith(b"MPCK") + - endswith(filename, b".mpc")) - - -Open = Musepack diff --git a/resources/lib/libraries/mutagen/ogg.py b/resources/lib/libraries/mutagen/ogg.py deleted file mode 100644 index 9961a966..00000000 --- a/resources/lib/libraries/mutagen/ogg.py +++ /dev/null @@ -1,548 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg bitstreams and pages. - -This module reads and writes a subset of the Ogg bitstream format -version 0. It does *not* read or write Ogg Vorbis files! For that, -you should use mutagen.oggvorbis. - -This implementation is based on the RFC 3533 standard found at -http://www.xiph.org/ogg/doc/rfc3533.txt. -""" - -import struct -import sys -import zlib - -from mutagen import FileType -from mutagen._util import cdata, resize_bytes, MutagenError -from ._compat import cBytesIO, reraise, chr_, izip, xrange - - -class error(IOError, MutagenError): - """Ogg stream parsing errors.""" - - pass - - -class OggPage(object): - """A single Ogg page (not necessarily a single encoded packet). - - A page is a header of 26 bytes, followed by the length of the - data, followed by the data. - - The constructor is givin a file-like object pointing to the start - of an Ogg page. After the constructor is finished it is pointing - to the start of the next page. - - Attributes: - - * version -- stream structure version (currently always 0) - * position -- absolute stream position (default -1) - * serial -- logical stream serial number (default 0) - * sequence -- page sequence number within logical stream (default 0) - * offset -- offset this page was read from (default None) - * complete -- if the last packet on this page is complete (default True) - * packets -- list of raw packet data (default []) - - Note that if 'complete' is false, the next page's 'continued' - property must be true (so set both when constructing pages). - - If a file-like object is supplied to the constructor, the above - attributes will be filled in based on it. - """ - - version = 0 - __type_flags = 0 - position = 0 - serial = 0 - sequence = 0 - offset = None - complete = True - - def __init__(self, fileobj=None): - self.packets = [] - - if fileobj is None: - return - - self.offset = fileobj.tell() - - header = fileobj.read(27) - if len(header) == 0: - raise EOFError - - try: - (oggs, self.version, self.__type_flags, - self.position, self.serial, self.sequence, - crc, segments) = struct.unpack("<4sBBqIIiB", header) - except struct.error: - raise error("unable to read full header; got %r" % header) - - if oggs != b"OggS": - raise error("read %r, expected %r, at 0x%x" % ( - oggs, b"OggS", fileobj.tell() - 27)) - - if self.version != 0: - raise error("version %r unsupported" % self.version) - - total = 0 - lacings = [] - lacing_bytes = fileobj.read(segments) - if len(lacing_bytes) != segments: - raise error("unable to read %r lacing bytes" % segments) - for c in bytearray(lacing_bytes): - total += c - if c < 255: - lacings.append(total) - total = 0 - if total: - lacings.append(total) - self.complete = False - - self.packets = [fileobj.read(l) for l in lacings] - if [len(p) for p in self.packets] != lacings: - raise error("unable to read full data") - - def __eq__(self, other): - """Two Ogg pages are the same if they write the same data.""" - try: - return (self.write() == other.write()) - except AttributeError: - return False - - __hash__ = object.__hash__ - - def __repr__(self): - attrs = ['version', 'position', 'serial', 'sequence', 'offset', - 'complete', 'continued', 'first', 'last'] - values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs] - return "<%s %s, %d bytes in %d packets>" % ( - type(self).__name__, " ".join(values), sum(map(len, self.packets)), - len(self.packets)) - - def write(self): - """Return a string encoding of the page header and data. - - A ValueError is raised if the data is too big to fit in a - single page. - """ - - data = [ - struct.pack("<4sBBqIIi", b"OggS", self.version, self.__type_flags, - self.position, self.serial, self.sequence, 0) - ] - - lacing_data = [] - for datum in self.packets: - quot, rem = divmod(len(datum), 255) - lacing_data.append(b"\xff" * quot + chr_(rem)) - lacing_data = b"".join(lacing_data) - if not self.complete and lacing_data.endswith(b"\x00"): - lacing_data = lacing_data[:-1] - data.append(chr_(len(lacing_data))) - data.append(lacing_data) - data.extend(self.packets) - data = b"".join(data) - - # Python's CRC is swapped relative to Ogg's needs. - # crc32 returns uint prior to py2.6 on some platforms, so force uint - crc = (~zlib.crc32(data.translate(cdata.bitswap), -1)) & 0xffffffff - # Although we're using to_uint_be, this actually makes the CRC - # a proper le integer, since Python's CRC is byteswapped. - crc = cdata.to_uint_be(crc).translate(cdata.bitswap) - data = data[:22] + crc + data[26:] - return data - - @property - def size(self): - """Total frame size.""" - - size = 27 # Initial header size - for datum in self.packets: - quot, rem = divmod(len(datum), 255) - size += quot + 1 - if not self.complete and rem == 0: - # Packet contains a multiple of 255 bytes and is not - # terminated, so we don't have a \x00 at the end. - size -= 1 - size += sum(map(len, self.packets)) - return size - - def __set_flag(self, bit, val): - mask = 1 << bit - if val: - self.__type_flags |= mask - else: - self.__type_flags &= ~mask - - continued = property( - lambda self: cdata.test_bit(self.__type_flags, 0), - lambda self, v: self.__set_flag(0, v), - doc="The first packet is continued from the previous page.") - - first = property( - lambda self: cdata.test_bit(self.__type_flags, 1), - lambda self, v: self.__set_flag(1, v), - doc="This is the first page of a logical bitstream.") - - last = property( - lambda self: cdata.test_bit(self.__type_flags, 2), - lambda self, v: self.__set_flag(2, v), - doc="This is the last page of a logical bitstream.") - - @staticmethod - def renumber(fileobj, serial, start): - """Renumber pages belonging to a specified logical stream. - - fileobj must be opened with mode r+b or w+b. - - Starting at page number 'start', renumber all pages belonging - to logical stream 'serial'. Other pages will be ignored. - - fileobj must point to the start of a valid Ogg page; any - occuring after it and part of the specified logical stream - will be numbered. No adjustment will be made to the data in - the pages nor the granule position; only the page number, and - so also the CRC. - - If an error occurs (e.g. non-Ogg data is found), fileobj will - be left pointing to the place in the stream the error occured, - but the invalid data will be left intact (since this function - does not change the total file size). - """ - - number = start - while True: - try: - page = OggPage(fileobj) - except EOFError: - break - else: - if page.serial != serial: - # Wrong stream, skip this page. - continue - # Changing the number can't change the page size, - # so seeking back based on the current size is safe. - fileobj.seek(-page.size, 1) - page.sequence = number - fileobj.write(page.write()) - fileobj.seek(page.offset + page.size, 0) - number += 1 - - @staticmethod - def to_packets(pages, strict=False): - """Construct a list of packet data from a list of Ogg pages. - - If strict is true, the first page must start a new packet, - and the last page must end the last packet. - """ - - serial = pages[0].serial - sequence = pages[0].sequence - packets = [] - - if strict: - if pages[0].continued: - raise ValueError("first packet is continued") - if not pages[-1].complete: - raise ValueError("last packet does not complete") - elif pages and pages[0].continued: - packets.append([b""]) - - for page in pages: - if serial != page.serial: - raise ValueError("invalid serial number in %r" % page) - elif sequence != page.sequence: - raise ValueError("bad sequence number in %r" % page) - else: - sequence += 1 - - if page.continued: - packets[-1].append(page.packets[0]) - else: - packets.append([page.packets[0]]) - packets.extend([p] for p in page.packets[1:]) - - return [b"".join(p) for p in packets] - - @classmethod - def _from_packets_try_preserve(cls, packets, old_pages): - """Like from_packets but in case the size and number of the packets - is the same as in the given pages the layout of the pages will - be copied (the page size and number will match). - - If the packets don't match this behaves like:: - - OggPage.from_packets(packets, sequence=old_pages[0].sequence) - """ - - old_packets = cls.to_packets(old_pages) - - if [len(p) for p in packets] != [len(p) for p in old_packets]: - # doesn't match, fall back - return cls.from_packets(packets, old_pages[0].sequence) - - new_data = b"".join(packets) - new_pages = [] - for old in old_pages: - new = OggPage() - new.sequence = old.sequence - new.complete = old.complete - new.continued = old.continued - new.position = old.position - for p in old.packets: - data, new_data = new_data[:len(p)], new_data[len(p):] - new.packets.append(data) - new_pages.append(new) - assert not new_data - - return new_pages - - @staticmethod - def from_packets(packets, sequence=0, default_size=4096, - wiggle_room=2048): - """Construct a list of Ogg pages from a list of packet data. - - The algorithm will generate pages of approximately - default_size in size (rounded down to the nearest multiple of - 255). However, it will also allow pages to increase to - approximately default_size + wiggle_room if allowing the - wiggle room would finish a packet (only one packet will be - finished in this way per page; if the next packet would fit - into the wiggle room, it still starts on a new page). - - This method reduces packet fragmentation when packet sizes are - slightly larger than the default page size, while still - ensuring most pages are of the average size. - - Pages are numbered started at 'sequence'; other information is - uninitialized. - """ - - chunk_size = (default_size // 255) * 255 - - pages = [] - - page = OggPage() - page.sequence = sequence - - for packet in packets: - page.packets.append(b"") - while packet: - data, packet = packet[:chunk_size], packet[chunk_size:] - if page.size < default_size and len(page.packets) < 255: - page.packets[-1] += data - else: - # If we've put any packet data into this page yet, - # we need to mark it incomplete. However, we can - # also have just started this packet on an already - # full page, in which case, just start the new - # page with this packet. - if page.packets[-1]: - page.complete = False - if len(page.packets) == 1: - page.position = -1 - else: - page.packets.pop(-1) - pages.append(page) - page = OggPage() - page.continued = not pages[-1].complete - page.sequence = pages[-1].sequence + 1 - page.packets.append(data) - - if len(packet) < wiggle_room: - page.packets[-1] += packet - packet = b"" - - if page.packets: - pages.append(page) - - return pages - - @classmethod - def replace(cls, fileobj, old_pages, new_pages): - """Replace old_pages with new_pages within fileobj. - - old_pages must have come from reading fileobj originally. - new_pages are assumed to have the 'same' data as old_pages, - and so the serial and sequence numbers will be copied, as will - the flags for the first and last pages. - - fileobj will be resized and pages renumbered as necessary. As - such, it must be opened r+b or w+b. - """ - - if not len(old_pages) or not len(new_pages): - raise ValueError("empty pages list not allowed") - - # Number the new pages starting from the first old page. - first = old_pages[0].sequence - for page, seq in izip(new_pages, - xrange(first, first + len(new_pages))): - page.sequence = seq - page.serial = old_pages[0].serial - - new_pages[0].first = old_pages[0].first - new_pages[0].last = old_pages[0].last - new_pages[0].continued = old_pages[0].continued - - new_pages[-1].first = old_pages[-1].first - new_pages[-1].last = old_pages[-1].last - new_pages[-1].complete = old_pages[-1].complete - if not new_pages[-1].complete and len(new_pages[-1].packets) == 1: - new_pages[-1].position = -1 - - new_data = [cls.write(p) for p in new_pages] - - # Add dummy data or merge the remaining data together so multiple - # new pages replace an old one - pages_diff = len(old_pages) - len(new_data) - if pages_diff > 0: - new_data.extend([b""] * pages_diff) - elif pages_diff < 0: - new_data[pages_diff - 1:] = [b"".join(new_data[pages_diff - 1:])] - - # Replace pages one by one. If the sizes match no resize happens. - offset_adjust = 0 - new_data_end = None - assert len(old_pages) == len(new_data) - for old_page, data in izip(old_pages, new_data): - offset = old_page.offset + offset_adjust - data_size = len(data) - resize_bytes(fileobj, old_page.size, data_size, offset) - fileobj.seek(offset, 0) - fileobj.write(data) - new_data_end = offset + data_size - offset_adjust += (data_size - old_page.size) - - # Finally, if there's any discrepency in length, we need to - # renumber the pages for the logical stream. - if len(old_pages) != len(new_pages): - fileobj.seek(new_data_end, 0) - serial = new_pages[-1].serial - sequence = new_pages[-1].sequence + 1 - cls.renumber(fileobj, serial, sequence) - - @staticmethod - def find_last(fileobj, serial): - """Find the last page of the stream 'serial'. - - If the file is not multiplexed this function is fast. If it is, - it must read the whole the stream. - - This finds the last page in the actual file object, or the last - page in the stream (with eos set), whichever comes first. - """ - - # For non-muxed streams, look at the last page. - try: - fileobj.seek(-256 * 256, 2) - except IOError: - # The file is less than 64k in length. - fileobj.seek(0) - data = fileobj.read() - try: - index = data.rindex(b"OggS") - except ValueError: - raise error("unable to find final Ogg header") - bytesobj = cBytesIO(data[index:]) - best_page = None - try: - page = OggPage(bytesobj) - except error: - pass - else: - if page.serial == serial: - if page.last: - return page - else: - best_page = page - else: - best_page = None - - # The stream is muxed, so use the slow way. - fileobj.seek(0) - try: - page = OggPage(fileobj) - while not page.last: - page = OggPage(fileobj) - while page.serial != serial: - page = OggPage(fileobj) - best_page = page - return page - except error: - return best_page - except EOFError: - return best_page - - -class OggFileType(FileType): - """An generic Ogg file.""" - - _Info = None - _Tags = None - _Error = None - _mimes = ["application/ogg", "application/x-ogg"] - - def load(self, filename): - """Load file information from a filename.""" - - self.filename = filename - with open(filename, "rb") as fileobj: - try: - self.info = self._Info(fileobj) - self.tags = self._Tags(fileobj, self.info) - self.info._post_tags(fileobj) - except error as e: - reraise(self._Error, e, sys.exc_info()[2]) - except EOFError: - raise self._Error("no appropriate stream found") - - def delete(self, filename=None): - """Remove tags from a file. - - If no filename is given, the one most recently loaded is used. - """ - - if filename is None: - filename = self.filename - - self.tags.clear() - # TODO: we should delegate the deletion to the subclass and not through - # _inject. - with open(filename, "rb+") as fileobj: - try: - self.tags._inject(fileobj, lambda x: 0) - except error as e: - reraise(self._Error, e, sys.exc_info()[2]) - except EOFError: - raise self._Error("no appropriate stream found") - - def add_tags(self): - raise self._Error - - def save(self, filename=None, padding=None): - """Save a tag to a file. - - If no filename is given, the one most recently loaded is used. - """ - - if filename is None: - filename = self.filename - fileobj = open(filename, "rb+") - try: - try: - self.tags._inject(fileobj, padding) - except error as e: - reraise(self._Error, e, sys.exc_info()[2]) - except EOFError: - raise self._Error("no appropriate stream found") - finally: - fileobj.close() diff --git a/resources/lib/libraries/mutagen/oggflac.py b/resources/lib/libraries/mutagen/oggflac.py deleted file mode 100644 index b86226ca..00000000 --- a/resources/lib/libraries/mutagen/oggflac.py +++ /dev/null @@ -1,161 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg FLAC comments. - -This module handles FLAC files wrapped in an Ogg bitstream. The first -FLAC stream found is used. For 'naked' FLACs, see mutagen.flac. - -This module is based off the specification at -http://flac.sourceforge.net/ogg_mapping.html. -""" - -__all__ = ["OggFLAC", "Open", "delete"] - -import struct - -from ._compat import cBytesIO - -from mutagen import StreamInfo -from mutagen.flac import StreamInfo as FLACStreamInfo, error as FLACError -from mutagen._vorbis import VCommentDict -from mutagen.ogg import OggPage, OggFileType, error as OggError - - -class error(OggError): - pass - - -class OggFLACHeaderError(error): - pass - - -class OggFLACStreamInfo(StreamInfo): - """Ogg FLAC stream info.""" - - length = 0 - """File length in seconds, as a float""" - - channels = 0 - """Number of channels""" - - sample_rate = 0 - """Sample rate in Hz""" - - def __init__(self, fileobj): - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x7FFLAC"): - page = OggPage(fileobj) - major, minor, self.packets, flac = struct.unpack( - ">BBH4s", page.packets[0][5:13]) - if flac != b"fLaC": - raise OggFLACHeaderError("invalid FLAC marker (%r)" % flac) - elif (major, minor) != (1, 0): - raise OggFLACHeaderError( - "unknown mapping version: %d.%d" % (major, minor)) - self.serial = page.serial - - # Skip over the block header. - stringobj = cBytesIO(page.packets[0][17:]) - - try: - flac_info = FLACStreamInfo(stringobj) - except FLACError as e: - raise OggFLACHeaderError(e) - - for attr in ["min_blocksize", "max_blocksize", "sample_rate", - "channels", "bits_per_sample", "total_samples", "length"]: - setattr(self, attr, getattr(flac_info, attr)) - - def _post_tags(self, fileobj): - if self.length: - return - page = OggPage.find_last(fileobj, self.serial) - self.length = page.position / float(self.sample_rate) - - def pprint(self): - return u"Ogg FLAC, %.2f seconds, %d Hz" % ( - self.length, self.sample_rate) - - -class OggFLACVComment(VCommentDict): - - def __init__(self, fileobj, info): - # data should be pointing at the start of an Ogg page, after - # the first FLAC page. - pages = [] - complete = False - while not complete: - page = OggPage(fileobj) - if page.serial == info.serial: - pages.append(page) - complete = page.complete or (len(page.packets) > 1) - comment = cBytesIO(OggPage.to_packets(pages)[0][4:]) - super(OggFLACVComment, self).__init__(comment, framing=False) - - def _inject(self, fileobj, padding_func): - """Write tag data into the FLAC Vorbis comment packet/page.""" - - # Ogg FLAC has no convenient data marker like Vorbis, but the - # second packet - and second page - must be the comment data. - fileobj.seek(0) - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x7FFLAC"): - page = OggPage(fileobj) - - first_page = page - while not (page.sequence == 1 and page.serial == first_page.serial): - page = OggPage(fileobj) - - old_pages = [page] - while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): - page = OggPage(fileobj) - if page.serial == first_page.serial: - old_pages.append(page) - - packets = OggPage.to_packets(old_pages, strict=False) - - # Set the new comment block. - data = self.write(framing=False) - data = packets[0][:1] + struct.pack(">I", len(data))[-3:] + data - packets[0] = data - - new_pages = OggPage.from_packets(packets, old_pages[0].sequence) - OggPage.replace(fileobj, old_pages, new_pages) - - -class OggFLAC(OggFileType): - """An Ogg FLAC file.""" - - _Info = OggFLACStreamInfo - _Tags = OggFLACVComment - _Error = OggFLACHeaderError - _mimes = ["audio/x-oggflac"] - - info = None - """A `OggFLACStreamInfo`""" - - tags = None - """A `VCommentDict`""" - - def save(self, filename=None): - return super(OggFLAC, self).save(filename) - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"OggS") * ( - (b"FLAC" in header) + (b"fLaC" in header))) - - -Open = OggFLAC - - -def delete(filename): - """Remove tags from a file.""" - - OggFLAC(filename).delete() diff --git a/resources/lib/libraries/mutagen/oggopus.py b/resources/lib/libraries/mutagen/oggopus.py deleted file mode 100644 index 7154e479..00000000 --- a/resources/lib/libraries/mutagen/oggopus.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2012, 2013 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg Opus comments. - -This module handles Opus files wrapped in an Ogg bitstream. The -first Opus stream found is used. - -Based on http://tools.ietf.org/html/draft-terriberry-oggopus-01 -""" - -__all__ = ["OggOpus", "Open", "delete"] - -import struct - -from mutagen import StreamInfo -from mutagen._compat import BytesIO -from mutagen._util import get_size -from mutagen._tags import PaddingInfo -from mutagen._vorbis import VCommentDict -from mutagen.ogg import OggPage, OggFileType, error as OggError - - -class error(OggError): - pass - - -class OggOpusHeaderError(error): - pass - - -class OggOpusInfo(StreamInfo): - """Ogg Opus stream information.""" - - length = 0 - """File length in seconds, as a float""" - - channels = 0 - """Number of channels""" - - def __init__(self, fileobj): - page = OggPage(fileobj) - while not page.packets[0].startswith(b"OpusHead"): - page = OggPage(fileobj) - - self.serial = page.serial - - if not page.first: - raise OggOpusHeaderError( - "page has ID header, but doesn't start a stream") - - (version, self.channels, pre_skip, orig_sample_rate, output_gain, - channel_map) = struct.unpack("<BBHIhB", page.packets[0][8:19]) - - self.__pre_skip = pre_skip - - # only the higher 4 bits change on incombatible changes - major = version >> 4 - if major != 0: - raise OggOpusHeaderError("version %r unsupported" % major) - - def _post_tags(self, fileobj): - page = OggPage.find_last(fileobj, self.serial) - self.length = (page.position - self.__pre_skip) / float(48000) - - def pprint(self): - return u"Ogg Opus, %.2f seconds" % (self.length) - - -class OggOpusVComment(VCommentDict): - """Opus comments embedded in an Ogg bitstream.""" - - def __get_comment_pages(self, fileobj, info): - # find the first tags page with the right serial - page = OggPage(fileobj) - while ((info.serial != page.serial) or - not page.packets[0].startswith(b"OpusTags")): - page = OggPage(fileobj) - - # get all comment pages - pages = [page] - while not (pages[-1].complete or len(pages[-1].packets) > 1): - page = OggPage(fileobj) - if page.serial == pages[0].serial: - pages.append(page) - - return pages - - def __init__(self, fileobj, info): - pages = self.__get_comment_pages(fileobj, info) - data = OggPage.to_packets(pages)[0][8:] # Strip OpusTags - fileobj = BytesIO(data) - super(OggOpusVComment, self).__init__(fileobj, framing=False) - self._padding = len(data) - self._size - - # in case the LSB of the first byte after v-comment is 1, preserve the - # following data - padding_flag = fileobj.read(1) - if padding_flag and ord(padding_flag) & 0x1: - self._pad_data = padding_flag + fileobj.read() - self._padding = 0 # we have to preserve, so no padding - else: - self._pad_data = b"" - - def _inject(self, fileobj, padding_func): - fileobj.seek(0) - info = OggOpusInfo(fileobj) - old_pages = self.__get_comment_pages(fileobj, info) - - packets = OggPage.to_packets(old_pages) - vcomment_data = b"OpusTags" + self.write(framing=False) - - if self._pad_data: - # if we have padding data to preserver we can't add more padding - # as long as we don't know the structure of what follows - packets[0] = vcomment_data + self._pad_data - else: - content_size = get_size(fileobj) - len(packets[0]) # approx - padding_left = len(packets[0]) - len(vcomment_data) - info = PaddingInfo(padding_left, content_size) - new_padding = info._get_padding(padding_func) - packets[0] = vcomment_data + b"\x00" * new_padding - - new_pages = OggPage._from_packets_try_preserve(packets, old_pages) - OggPage.replace(fileobj, old_pages, new_pages) - - -class OggOpus(OggFileType): - """An Ogg Opus file.""" - - _Info = OggOpusInfo - _Tags = OggOpusVComment - _Error = OggOpusHeaderError - _mimes = ["audio/ogg", "audio/ogg; codecs=opus"] - - info = None - """A `OggOpusInfo`""" - - tags = None - """A `VCommentDict`""" - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"OggS") * (b"OpusHead" in header)) - - -Open = OggOpus - - -def delete(filename): - """Remove tags from a file.""" - - OggOpus(filename).delete() diff --git a/resources/lib/libraries/mutagen/oggspeex.py b/resources/lib/libraries/mutagen/oggspeex.py deleted file mode 100644 index 9b16930b..00000000 --- a/resources/lib/libraries/mutagen/oggspeex.py +++ /dev/null @@ -1,154 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg Speex comments. - -This module handles Speex files wrapped in an Ogg bitstream. The -first Speex stream found is used. - -Read more about Ogg Speex at http://www.speex.org/. This module is -based on the specification at http://www.speex.org/manual2/node7.html -and clarifications after personal communication with Jean-Marc, -http://lists.xiph.org/pipermail/speex-dev/2006-July/004676.html. -""" - -__all__ = ["OggSpeex", "Open", "delete"] - -from mutagen import StreamInfo -from mutagen._vorbis import VCommentDict -from mutagen.ogg import OggPage, OggFileType, error as OggError -from mutagen._util import cdata, get_size -from mutagen._tags import PaddingInfo - - -class error(OggError): - pass - - -class OggSpeexHeaderError(error): - pass - - -class OggSpeexInfo(StreamInfo): - """Ogg Speex stream information.""" - - length = 0 - """file length in seconds, as a float""" - - channels = 0 - """number of channels""" - - bitrate = 0 - """nominal bitrate in bits per second. - - The reference encoder does not set the bitrate; in this case, - the bitrate will be 0. - """ - - def __init__(self, fileobj): - page = OggPage(fileobj) - while not page.packets[0].startswith(b"Speex "): - page = OggPage(fileobj) - if not page.first: - raise OggSpeexHeaderError( - "page has ID header, but doesn't start a stream") - self.sample_rate = cdata.uint_le(page.packets[0][36:40]) - self.channels = cdata.uint_le(page.packets[0][48:52]) - self.bitrate = max(0, cdata.int_le(page.packets[0][52:56])) - self.serial = page.serial - - def _post_tags(self, fileobj): - page = OggPage.find_last(fileobj, self.serial) - self.length = page.position / float(self.sample_rate) - - def pprint(self): - return u"Ogg Speex, %.2f seconds" % self.length - - -class OggSpeexVComment(VCommentDict): - """Speex comments embedded in an Ogg bitstream.""" - - def __init__(self, fileobj, info): - pages = [] - complete = False - while not complete: - page = OggPage(fileobj) - if page.serial == info.serial: - pages.append(page) - complete = page.complete or (len(page.packets) > 1) - data = OggPage.to_packets(pages)[0] - super(OggSpeexVComment, self).__init__(data, framing=False) - self._padding = len(data) - self._size - - def _inject(self, fileobj, padding_func): - """Write tag data into the Speex comment packet/page.""" - - fileobj.seek(0) - - # Find the first header page, with the stream info. - # Use it to get the serial number. - page = OggPage(fileobj) - while not page.packets[0].startswith(b"Speex "): - page = OggPage(fileobj) - - # Look for the next page with that serial number, it'll start - # the comment packet. - serial = page.serial - page = OggPage(fileobj) - while page.serial != serial: - page = OggPage(fileobj) - - # Then find all the pages with the comment packet. - old_pages = [page] - while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): - page = OggPage(fileobj) - if page.serial == old_pages[0].serial: - old_pages.append(page) - - packets = OggPage.to_packets(old_pages, strict=False) - - content_size = get_size(fileobj) - len(packets[0]) # approx - vcomment_data = self.write(framing=False) - padding_left = len(packets[0]) - len(vcomment_data) - - info = PaddingInfo(padding_left, content_size) - new_padding = info._get_padding(padding_func) - - # Set the new comment packet. - packets[0] = vcomment_data + b"\x00" * new_padding - - new_pages = OggPage._from_packets_try_preserve(packets, old_pages) - OggPage.replace(fileobj, old_pages, new_pages) - - -class OggSpeex(OggFileType): - """An Ogg Speex file.""" - - _Info = OggSpeexInfo - _Tags = OggSpeexVComment - _Error = OggSpeexHeaderError - _mimes = ["audio/x-speex"] - - info = None - """A `OggSpeexInfo`""" - - tags = None - """A `VCommentDict`""" - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"OggS") * (b"Speex " in header)) - - -Open = OggSpeex - - -def delete(filename): - """Remove tags from a file.""" - - OggSpeex(filename).delete() diff --git a/resources/lib/libraries/mutagen/oggtheora.py b/resources/lib/libraries/mutagen/oggtheora.py deleted file mode 100644 index 122e7d4b..00000000 --- a/resources/lib/libraries/mutagen/oggtheora.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg Theora comments. - -This module handles Theora files wrapped in an Ogg bitstream. The -first Theora stream found is used. - -Based on the specification at http://theora.org/doc/Theora_I_spec.pdf. -""" - -__all__ = ["OggTheora", "Open", "delete"] - -import struct - -from mutagen import StreamInfo -from mutagen._vorbis import VCommentDict -from mutagen._util import cdata, get_size -from mutagen._tags import PaddingInfo -from mutagen.ogg import OggPage, OggFileType, error as OggError - - -class error(OggError): - pass - - -class OggTheoraHeaderError(error): - pass - - -class OggTheoraInfo(StreamInfo): - """Ogg Theora stream information.""" - - length = 0 - """File length in seconds, as a float""" - - fps = 0 - """Video frames per second, as a float""" - - bitrate = 0 - """Bitrate in bps (int)""" - - def __init__(self, fileobj): - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x80theora"): - page = OggPage(fileobj) - if not page.first: - raise OggTheoraHeaderError( - "page has ID header, but doesn't start a stream") - data = page.packets[0] - vmaj, vmin = struct.unpack("2B", data[7:9]) - if (vmaj, vmin) != (3, 2): - raise OggTheoraHeaderError( - "found Theora version %d.%d != 3.2" % (vmaj, vmin)) - fps_num, fps_den = struct.unpack(">2I", data[22:30]) - self.fps = fps_num / float(fps_den) - self.bitrate = cdata.uint_be(b"\x00" + data[37:40]) - self.granule_shift = (cdata.ushort_be(data[40:42]) >> 5) & 0x1F - self.serial = page.serial - - def _post_tags(self, fileobj): - page = OggPage.find_last(fileobj, self.serial) - position = page.position - mask = (1 << self.granule_shift) - 1 - frames = (position >> self.granule_shift) + (position & mask) - self.length = frames / float(self.fps) - - def pprint(self): - return u"Ogg Theora, %.2f seconds, %d bps" % (self.length, - self.bitrate) - - -class OggTheoraCommentDict(VCommentDict): - """Theora comments embedded in an Ogg bitstream.""" - - def __init__(self, fileobj, info): - pages = [] - complete = False - while not complete: - page = OggPage(fileobj) - if page.serial == info.serial: - pages.append(page) - complete = page.complete or (len(page.packets) > 1) - data = OggPage.to_packets(pages)[0][7:] - super(OggTheoraCommentDict, self).__init__(data, framing=False) - self._padding = len(data) - self._size - - def _inject(self, fileobj, padding_func): - """Write tag data into the Theora comment packet/page.""" - - fileobj.seek(0) - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x81theora"): - page = OggPage(fileobj) - - old_pages = [page] - while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): - page = OggPage(fileobj) - if page.serial == old_pages[0].serial: - old_pages.append(page) - - packets = OggPage.to_packets(old_pages, strict=False) - - content_size = get_size(fileobj) - len(packets[0]) # approx - vcomment_data = b"\x81theora" + self.write(framing=False) - padding_left = len(packets[0]) - len(vcomment_data) - - info = PaddingInfo(padding_left, content_size) - new_padding = info._get_padding(padding_func) - - packets[0] = vcomment_data + b"\x00" * new_padding - - new_pages = OggPage._from_packets_try_preserve(packets, old_pages) - OggPage.replace(fileobj, old_pages, new_pages) - - -class OggTheora(OggFileType): - """An Ogg Theora file.""" - - _Info = OggTheoraInfo - _Tags = OggTheoraCommentDict - _Error = OggTheoraHeaderError - _mimes = ["video/x-theora"] - - info = None - """A `OggTheoraInfo`""" - - tags = None - """A `VCommentDict`""" - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"OggS") * - ((b"\x80theora" in header) + (b"\x81theora" in header)) * 2) - - -Open = OggTheora - - -def delete(filename): - """Remove tags from a file.""" - - OggTheora(filename).delete() diff --git a/resources/lib/libraries/mutagen/oggvorbis.py b/resources/lib/libraries/mutagen/oggvorbis.py deleted file mode 100644 index b058a0c1..00000000 --- a/resources/lib/libraries/mutagen/oggvorbis.py +++ /dev/null @@ -1,159 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""Read and write Ogg Vorbis comments. - -This module handles Vorbis files wrapped in an Ogg bitstream. The -first Vorbis stream found is used. - -Read more about Ogg Vorbis at http://vorbis.com/. This module is based -on the specification at http://www.xiph.org/vorbis/doc/Vorbis_I_spec.html. -""" - -__all__ = ["OggVorbis", "Open", "delete"] - -import struct - -from mutagen import StreamInfo -from mutagen._vorbis import VCommentDict -from mutagen._util import get_size -from mutagen._tags import PaddingInfo -from mutagen.ogg import OggPage, OggFileType, error as OggError - - -class error(OggError): - pass - - -class OggVorbisHeaderError(error): - pass - - -class OggVorbisInfo(StreamInfo): - """Ogg Vorbis stream information.""" - - length = 0 - """File length in seconds, as a float""" - - channels = 0 - """Number of channels""" - - bitrate = 0 - """Nominal ('average') bitrate in bits per second, as an int""" - - sample_rate = 0 - """Sample rate in Hz""" - - def __init__(self, fileobj): - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x01vorbis"): - page = OggPage(fileobj) - if not page.first: - raise OggVorbisHeaderError( - "page has ID header, but doesn't start a stream") - (self.channels, self.sample_rate, max_bitrate, nominal_bitrate, - min_bitrate) = struct.unpack("<B4i", page.packets[0][11:28]) - self.serial = page.serial - - max_bitrate = max(0, max_bitrate) - min_bitrate = max(0, min_bitrate) - nominal_bitrate = max(0, nominal_bitrate) - - if nominal_bitrate == 0: - self.bitrate = (max_bitrate + min_bitrate) // 2 - elif max_bitrate and max_bitrate < nominal_bitrate: - # If the max bitrate is less than the nominal, we know - # the nominal is wrong. - self.bitrate = max_bitrate - elif min_bitrate > nominal_bitrate: - self.bitrate = min_bitrate - else: - self.bitrate = nominal_bitrate - - def _post_tags(self, fileobj): - page = OggPage.find_last(fileobj, self.serial) - self.length = page.position / float(self.sample_rate) - - def pprint(self): - return u"Ogg Vorbis, %.2f seconds, %d bps" % ( - self.length, self.bitrate) - - -class OggVCommentDict(VCommentDict): - """Vorbis comments embedded in an Ogg bitstream.""" - - def __init__(self, fileobj, info): - pages = [] - complete = False - while not complete: - page = OggPage(fileobj) - if page.serial == info.serial: - pages.append(page) - complete = page.complete or (len(page.packets) > 1) - data = OggPage.to_packets(pages)[0][7:] # Strip off "\x03vorbis". - super(OggVCommentDict, self).__init__(data) - self._padding = len(data) - self._size - - def _inject(self, fileobj, padding_func): - """Write tag data into the Vorbis comment packet/page.""" - - # Find the old pages in the file; we'll need to remove them, - # plus grab any stray setup packet data out of them. - fileobj.seek(0) - page = OggPage(fileobj) - while not page.packets[0].startswith(b"\x03vorbis"): - page = OggPage(fileobj) - - old_pages = [page] - while not (old_pages[-1].complete or len(old_pages[-1].packets) > 1): - page = OggPage(fileobj) - if page.serial == old_pages[0].serial: - old_pages.append(page) - - packets = OggPage.to_packets(old_pages, strict=False) - - content_size = get_size(fileobj) - len(packets[0]) # approx - vcomment_data = b"\x03vorbis" + self.write() - padding_left = len(packets[0]) - len(vcomment_data) - - info = PaddingInfo(padding_left, content_size) - new_padding = info._get_padding(padding_func) - - # Set the new comment packet. - packets[0] = vcomment_data + b"\x00" * new_padding - - new_pages = OggPage._from_packets_try_preserve(packets, old_pages) - OggPage.replace(fileobj, old_pages, new_pages) - - -class OggVorbis(OggFileType): - """An Ogg Vorbis file.""" - - _Info = OggVorbisInfo - _Tags = OggVCommentDict - _Error = OggVorbisHeaderError - _mimes = ["audio/vorbis", "audio/x-vorbis"] - - info = None - """A `OggVorbisInfo`""" - - tags = None - """A `VCommentDict`""" - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"OggS") * (b"\x01vorbis" in header)) - - -Open = OggVorbis - - -def delete(filename): - """Remove tags from a file.""" - - OggVorbis(filename).delete() diff --git a/resources/lib/libraries/mutagen/optimfrog.py b/resources/lib/libraries/mutagen/optimfrog.py deleted file mode 100644 index 0d85a818..00000000 --- a/resources/lib/libraries/mutagen/optimfrog.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Lukas Lalinsky -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""OptimFROG audio streams with APEv2 tags. - -OptimFROG is a lossless audio compression program. Its main goal is to -reduce at maximum the size of audio files, while permitting bit -identical restoration for all input. It is similar with the ZIP -compression, but it is highly specialized to compress audio data. - -Only versions 4.5 and higher are supported. - -For more information, see http://www.losslessaudio.org/ -""" - -__all__ = ["OptimFROG", "Open", "delete"] - -import struct - -from ._compat import endswith -from mutagen import StreamInfo -from mutagen.apev2 import APEv2File, error, delete - - -class OptimFROGHeaderError(error): - pass - - -class OptimFROGInfo(StreamInfo): - """OptimFROG stream information. - - Attributes: - - * channels - number of audio channels - * length - file length in seconds, as a float - * sample_rate - audio sampling rate in Hz - """ - - def __init__(self, fileobj): - header = fileobj.read(76) - if (len(header) != 76 or not header.startswith(b"OFR ") or - struct.unpack("<I", header[4:8])[0] not in [12, 15]): - raise OptimFROGHeaderError("not an OptimFROG file") - (total_samples, total_samples_high, sample_type, self.channels, - self.sample_rate) = struct.unpack("<IHBBI", header[8:20]) - total_samples += total_samples_high << 32 - self.channels += 1 - if self.sample_rate: - self.length = float(total_samples) / (self.channels * - self.sample_rate) - else: - self.length = 0.0 - - def pprint(self): - return u"OptimFROG, %.2f seconds, %d Hz" % (self.length, - self.sample_rate) - - -class OptimFROG(APEv2File): - _Info = OptimFROGInfo - - @staticmethod - def score(filename, fileobj, header): - filename = filename.lower() - - return (header.startswith(b"OFR") + endswith(filename, b".ofr") + - endswith(filename, b".ofs")) - -Open = OptimFROG diff --git a/resources/lib/libraries/mutagen/trueaudio.py b/resources/lib/libraries/mutagen/trueaudio.py deleted file mode 100644 index 1c8d56c4..00000000 --- a/resources/lib/libraries/mutagen/trueaudio.py +++ /dev/null @@ -1,84 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2006 Joe Wreschnig -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of version 2 of the GNU General Public License as -# published by the Free Software Foundation. - -"""True Audio audio stream information and tags. - -True Audio is a lossless format designed for real-time encoding and -decoding. This module is based on the documentation at -http://www.true-audio.com/TTA_Lossless_Audio_Codec\_-_Format_Description - -True Audio files use ID3 tags. -""" - -__all__ = ["TrueAudio", "Open", "delete", "EasyTrueAudio"] - -from ._compat import endswith -from mutagen import StreamInfo -from mutagen.id3 import ID3FileType, delete -from mutagen._util import cdata, MutagenError - - -class error(RuntimeError, MutagenError): - pass - - -class TrueAudioHeaderError(error, IOError): - pass - - -class TrueAudioInfo(StreamInfo): - """True Audio stream information. - - Attributes: - - * length - audio length, in seconds - * sample_rate - audio sample rate, in Hz - """ - - def __init__(self, fileobj, offset): - fileobj.seek(offset or 0) - header = fileobj.read(18) - if len(header) != 18 or not header.startswith(b"TTA"): - raise TrueAudioHeaderError("TTA header not found") - self.sample_rate = cdata.int_le(header[10:14]) - samples = cdata.uint_le(header[14:18]) - self.length = float(samples) / self.sample_rate - - def pprint(self): - return u"True Audio, %.2f seconds, %d Hz." % ( - self.length, self.sample_rate) - - -class TrueAudio(ID3FileType): - """A True Audio file. - - :ivar info: :class:`TrueAudioInfo` - :ivar tags: :class:`ID3 <mutagen.id3.ID3>` - """ - - _Info = TrueAudioInfo - _mimes = ["audio/x-tta"] - - @staticmethod - def score(filename, fileobj, header): - return (header.startswith(b"ID3") + header.startswith(b"TTA") + - endswith(filename.lower(), b".tta") * 2) - - -Open = TrueAudio - - -class EasyTrueAudio(TrueAudio): - """Like MP3, but uses EasyID3 for tags. - - :ivar info: :class:`TrueAudioInfo` - :ivar tags: :class:`EasyID3 <mutagen.easyid3.EasyID3>` - """ - - from mutagen.easyid3 import EasyID3 as ID3 - ID3 = ID3 diff --git a/resources/lib/libraries/mutagen/wavpack.py b/resources/lib/libraries/mutagen/wavpack.py deleted file mode 100644 index 80710f6d..00000000 --- a/resources/lib/libraries/mutagen/wavpack.py +++ /dev/null @@ -1,125 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2006 Joe Wreschnig -# 2014 Christoph Reiter -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 as -# published by the Free Software Foundation. - -"""WavPack reading and writing. - -WavPack is a lossless format that uses APEv2 tags. Read - -* http://www.wavpack.com/ -* http://www.wavpack.com/file_format.txt - -for more information. -""" - -__all__ = ["WavPack", "Open", "delete"] - -from mutagen import StreamInfo -from mutagen.apev2 import APEv2File, error, delete -from mutagen._util import cdata - - -class WavPackHeaderError(error): - pass - -RATES = [6000, 8000, 9600, 11025, 12000, 16000, 22050, 24000, 32000, 44100, - 48000, 64000, 88200, 96000, 192000] - - -class _WavPackHeader(object): - - def __init__(self, block_size, version, track_no, index_no, total_samples, - block_index, block_samples, flags, crc): - - self.block_size = block_size - self.version = version - self.track_no = track_no - self.index_no = index_no - self.total_samples = total_samples - self.block_index = block_index - self.block_samples = block_samples - self.flags = flags - self.crc = crc - - @classmethod - def from_fileobj(cls, fileobj): - """A new _WavPackHeader or raises WavPackHeaderError""" - - header = fileobj.read(32) - if len(header) != 32 or not header.startswith(b"wvpk"): - raise WavPackHeaderError("not a WavPack header: %r" % header) - - block_size = cdata.uint_le(header[4:8]) - version = cdata.ushort_le(header[8:10]) - track_no = ord(header[10:11]) - index_no = ord(header[11:12]) - samples = cdata.uint_le(header[12:16]) - if samples == 2 ** 32 - 1: - samples = -1 - block_index = cdata.uint_le(header[16:20]) - block_samples = cdata.uint_le(header[20:24]) - flags = cdata.uint_le(header[24:28]) - crc = cdata.uint_le(header[28:32]) - - return _WavPackHeader(block_size, version, track_no, index_no, - samples, block_index, block_samples, flags, crc) - - -class WavPackInfo(StreamInfo): - """WavPack stream information. - - Attributes: - - * channels - number of audio channels (1 or 2) - * length - file length in seconds, as a float - * sample_rate - audio sampling rate in Hz - * version - WavPack stream version - """ - - def __init__(self, fileobj): - try: - header = _WavPackHeader.from_fileobj(fileobj) - except WavPackHeaderError: - raise WavPackHeaderError("not a WavPack file") - - self.version = header.version - self.channels = bool(header.flags & 4) or 2 - self.sample_rate = RATES[(header.flags >> 23) & 0xF] - - if header.total_samples == -1 or header.block_index != 0: - # TODO: we could make this faster by using the tag size - # and search backwards for the last block, then do - # last.block_index + last.block_samples - initial.block_index - samples = header.block_samples - while 1: - fileobj.seek(header.block_size - 32 + 8, 1) - try: - header = _WavPackHeader.from_fileobj(fileobj) - except WavPackHeaderError: - break - samples += header.block_samples - else: - samples = header.total_samples - - self.length = float(samples) / self.sample_rate - - def pprint(self): - return u"WavPack, %.2f seconds, %d Hz" % (self.length, - self.sample_rate) - - -class WavPack(APEv2File): - _Info = WavPackInfo - _mimes = ["audio/x-wavpack"] - - @staticmethod - def score(filename, fileobj, header): - return header.startswith(b"wvpk") * 2 - - -Open = WavPack diff --git a/resources/lib/library.py b/resources/lib/library.py index 7330e357..a69098e4 100644 --- a/resources/lib/library.py +++ b/resources/lib/library.py @@ -55,11 +55,14 @@ class Library(threading.Thread): def __init__(self, monitor): + self.media = {'Movies': Movies, 'TVShows': TVShows, 'MusicVideos': MusicVideos, 'Music': Music} + self.MEDIA = MEDIA + self.direct_path = settings('useDirectPaths') == "1" self.progress_display = int(settings('syncProgress') or 50) self.monitor = monitor self.player = monitor.monitor.player - self.server = Emby() + self.server = Emby().get_client() self.updated_queue = Queue.Queue() self.userdata_queue = Queue.Queue() self.removed_queue = Queue.Queue() @@ -116,6 +119,14 @@ class Library(threading.Thread): LOG.warn("---<[ library ]") + def test_databases(self): + + ''' Open the databases to test if the file exists. + ''' + with Database('video') as kodidb: + with Database('music') as musicdb: + pass + @stop() def service(self): @@ -140,6 +151,7 @@ class Library(threading.Thread): self.worker_notify() if self.pending_refresh: + window('emby_sync.bool', True) if self.total_updates > self.progress_display: queue_size = self.worker_queue_size() @@ -165,6 +177,7 @@ class Library(threading.Thread): self.pending_refresh = False self.save_last_sync() self.total_updates = 0 + window('emby_sync', clear=True) if self.progress_updates: @@ -177,14 +190,24 @@ class Library(threading.Thread): set_screensaver(value=self.screensaver) self.screensaver = None - if xbmc.getCondVisibility('Container.Content(musicvideos)') or xbmc.getCondVisibility('Window.IsMedia'): # Prevent cursor from moving + if xbmc.getCondVisibility('Container.Content(musicvideos)'): # Prevent cursor from moving xbmc.executebuiltin('Container.Refresh') else: # Update widgets xbmc.executebuiltin('UpdateLibrary(video)') + if xbmc.getCondVisibility('Window.IsMedia'): + xbmc.executebuiltin('Container.Refresh') + def stop_client(self): self.stop_thread = True + def enable_pending_refresh(self): + + ''' When there's an active thread. Let the main thread know. + ''' + self.pending_refresh = True + window('emby_sync.bool', True) + def worker_queue_size(self): ''' Get how many items are queued up for worker threads. @@ -240,7 +263,7 @@ class Library(threading.Thread): new_thread.start() LOG.info("-->[ q:updated/%s/%s ]", queues, id(new_thread)) self.writer_threads['updated'].append(new_thread) - self.pending_refresh = True + self.enable_pending_refresh() def worker_userdata(self): @@ -259,7 +282,7 @@ class Library(threading.Thread): new_thread.start() LOG.info("-->[ q:userdata/%s/%s ]", queues, id(new_thread)) self.writer_threads['userdata'].append(new_thread) - self.pending_refresh = True + self.enable_pending_refresh() def worker_remove(self): @@ -278,7 +301,7 @@ class Library(threading.Thread): new_thread.start() LOG.info("-->[ q:removed/%s/%s ]", queues, id(new_thread)) self.writer_threads['removed'].append(new_thread) - self.pending_refresh = True + self.enable_pending_refresh() def worker_notify(self): @@ -294,8 +317,12 @@ class Library(threading.Thread): def startup(self): - ''' Run at startup. Will check for the server plugin. + ''' Run at startup. + Check databases. + Check for the server plugin. ''' + self.test_databases() + Views().get_views() Views().get_nodes() @@ -303,14 +330,18 @@ class Library(threading.Thread): if get_sync()['Libraries']: try: - FullSync(self) + with FullSync(self, self.server) as sync: + sync.libraries() + Views().get_nodes() except Exception as error: LOG.error(error) elif not settings('SyncInstallRunDone.bool'): - FullSync(self) + with FullSync(self, self.server) as sync: + sync.libraries() + Views().get_nodes() return True @@ -490,7 +521,8 @@ class Library(threading.Thread): def add_library(self, library_id, update=False): try: - FullSync(self, library_id, update=update) + with FullSync(self, server=self.server) as sync: + sync.libraries(library_id, update) except Exception as error: LOG.exception(error) @@ -500,64 +532,15 @@ class Library(threading.Thread): return True - @progress(_(33144)) - def remove_library(self, library_id, dialog): - + def remove_library(self, library_id): + try: - with Database('emby') as embydb: + with FullSync(self, self.server) as sync: + sync.remove_library(library_id) - db = emby_db.EmbyDatabase(embydb.cursor) - library = db.get_view(library_id.replace('Mixed:', "")) - items = db.get_item_by_media_folder(library_id.replace('Mixed:', "")) - media = 'music' if library[1] == 'music' else 'video' - - if media == 'music': - settings('MusicRescan.bool', False) - - if items: - count = 0 - - with self.music_database_lock if media == 'music' else self.database_lock: - with Database(media) as kodidb: - - if library[1] == 'mixed': - movies = [x for x in items if x[1] == 'Movie'] - tvshows = [x for x in items if x[1] == 'Series'] - - obj = MEDIA['Movie'](self.server, embydb, kodidb, self.direct_path)['Remove'] - - for item in movies: - obj(item[0]) - dialog.update(int((float(count) / float(len(items))*100)), heading="%s: %s" % (_('addon_name'), library[0])) - count += 1 - - obj = MEDIA['Series'](self.server, embydb, kodidb, self.direct_path)['Remove'] - - for item in tvshows: - obj(item[0]) - dialog.update(int((float(count) / float(len(items))*100)), heading="%s: %s" % (_('addon_name'), library[0])) - count += 1 - else: - obj = MEDIA[items[0][1]](self.server, embydb, kodidb, self.direct_path)['Remove'] - - for item in items: - obj(item[0]) - dialog.update(int((float(count) / float(len(items))*100)), heading="%s: %s" % (_('addon_name'), library[0])) - count += 1 - - sync = get_sync() - - if library_id in sync['Whitelist']: - sync['Whitelist'].remove(library_id) - elif 'Mixed:%s' % library_id in sync['Whitelist']: - sync['Whitelist'].remove('Mixed:%s' % library_id) - - save_sync(sync) Views().remove_library(library_id) except Exception as error: - LOG.exception(error) - dialog.close() return False diff --git a/resources/lib/monitor.py b/resources/lib/monitor.py index ba9f15ad..d488092b 100644 --- a/resources/lib/monitor.py +++ b/resources/lib/monitor.py @@ -18,6 +18,7 @@ from client import get_device_id from objects import Actions, PlaylistWorker, on_play, on_update, special_listener from helper import _, settings, window, dialog, event, api, JSONRPC from emby import Emby +from webservice import WebService ################################################################################################# @@ -37,6 +38,8 @@ class Monitor(xbmc.Monitor): self.device_id = get_device_id() self.listener = Listener(self) self.listener.start() + self.webservice = WebService() + self.webservice.start() xbmc.Monitor.__init__(self) def onScanStarted(self, library): @@ -76,6 +79,18 @@ class Monitor(xbmc.Monitor): data = json.loads(binascii.unhexlify(data[0])) else: if method not in ('Player.OnPlay', 'VideoLibrary.OnUpdate', 'Player.OnAVChange'): + + ''' We have to clear the playlist if it was stopped before it has been played completely. + Otherwise the next played item will be added the previous queue. + ''' + if method == "Player.OnStop": + xbmc.sleep(3000) # let's wait for the player so we don't clear the canceled playlist by mistake. + + if xbmc.getCondVisibility("!Player.HasMedia + !Window.IsVisible(busydialog)"): + + xbmc.executebuiltin("Playlist.Clear") + LOG.info("[ playlist ] cleared") + return data = json.loads(data) @@ -179,7 +194,7 @@ class Monitor(xbmc.Monitor): elif method == 'Browse': result = downloader.get_filtered_section(data.get('Id'), data.get('Media'), data.get('Limit'), - data.get('Recursive'), data.get('Sort'), data.get('SortOrder'), + data.get('Recursive'), data.get('Sort'), data.get('SortOrder'), data.get('Filters'), data.get('Params'), data.get('ServerId')) self.void_responder(data, result) @@ -227,10 +242,9 @@ class Monitor(xbmc.Monitor): elif method == 'Play': - item = server['api'].get_item(data['ItemIds'].pop(0)) - data['ItemIds'].insert(0, item) + items = server['api'].get_items(data['ItemIds']) - PlaylistWorker(data.get('ServerId'), data['ItemIds'], data['PlayCommand'] == 'PlayNow', + PlaylistWorker(data.get('ServerId'), items, data['PlayCommand'] == 'PlayNow', data.get('StartPositionTicks', 0), data.get('AudioStreamIndex'), data.get('SubtitleStreamIndex')).start() diff --git a/resources/lib/objects/__init__.py b/resources/lib/objects/__init__.py index 6cf757ac..77d65e37 100644 --- a/resources/lib/objects/__init__.py +++ b/resources/lib/objects/__init__.py @@ -1,4 +1,4 @@ -version = "171076022" +version = "171076031" from movies import Movies from musicvideos import MusicVideos diff --git a/resources/lib/objects/actions.py b/resources/lib/objects/actions.py index e9ba96a0..e2f550cf 100644 --- a/resources/lib/objects/actions.py +++ b/resources/lib/objects/actions.py @@ -101,7 +101,7 @@ class Actions(object): if transcode and not seektime: choice = self.resume_dialog(api.API(item, self.server).adjust_resume((resume or 0) / 10000000.0)) - + if choice is None: raise Exception("User backed out of resume dialog.") @@ -118,7 +118,7 @@ class Actions(object): self._set_additional_parts(item['Id']) def _set_intros(self, item): - + ''' if we have any play them when the movie/show is not being resumed. ''' intros = TheVoid('GetIntros', {'ServerId': self.server_id, 'Id': item['Id']}).get() @@ -174,11 +174,17 @@ class Actions(object): ''' Play a list of items. Creates a new playlist. Add additional items as plugin listing. ''' - item = items[0] + item = items['Items'][0] playlist = self.get_playlist(item) + player = xbmc.Player() + + #xbmc.executebuiltin("Playlist.Clear") # Clear playlist to remove the previous item from playlist position no.2 if clear: - playlist.clear() + if player.isPlaying(): + player.stop() + + xbmc.executebuiltin('ActivateWindow(busydialognocancel)') index = 0 else: index = max(playlist.getposition(), 0) + 1 # Can return -1 @@ -201,14 +207,17 @@ class Actions(object): index += 1 if clear: - xbmc.Player().play(playlist) + xbmc.executebuiltin('Dialog.Close(busydialognocancel)') + player.play(playlist) - for item in items[1:]: + for item in items['Items'][1:]: listitem = xbmcgui.ListItem() - LOG.info("[ playlist/%s ]", item) - path = "plugin://plugin.video.emby/?mode=play&id=%s&playlist=true" % item + LOG.info("[ playlist/%s ] %s", item['Id'], item['Name']) + self.set_listitem(item, listitem, None, False) + path = "plugin://plugin.video.emby/?mode=play&id=%s&playlist=true" % item['Id'] listitem.setPath(path) + playlist.add(path, listitem, index) index += 1 @@ -244,7 +253,7 @@ class Actions(object): if intro: obj['Artwork']['Primary'] = "&KodiCinemaMode=true" - self.listitem_video(obj, listitem, item, seektime) + self.listitem_video(obj, listitem, item, seektime, intro) if 'PlaybackInfo' in item: @@ -269,7 +278,7 @@ class Actions(object): listitem.setContentLookup(False) - def listitem_video(self, obj, listitem, item, seektime=None): + def listitem_video(self, obj, listitem, item, seektime=None, intro=False): ''' Set listitem for video content. That also include streams. ''' @@ -296,12 +305,26 @@ class Actions(object): obj['Video'] = API.video_streams(obj['Video'] or [], obj['Container']) obj['Audio'] = API.audio_streams(obj['Audio'] or []) obj['Streams'] = API.media_streams(obj['Video'], obj['Audio'], obj['Subtitles']) - obj['Artwork']['Primary'] = obj['Artwork']['Primary'] or "special://home/addons/plugin.video.emby/icon.png" - obj['Artwork']['Thumb'] = obj['Artwork']['Thumb'] or "special://home/addons/plugin.video.emby/fanart.jpg" - obj['Artwork']['Backdrop'] = obj['Artwork']['Backdrop'] or ["special://home/addons/plugin.video.emby/fanart.jpg"] obj['ChildCount'] = obj['ChildCount'] or 0 obj['RecursiveCount'] = obj['RecursiveCount'] or 0 obj['Unwatched'] = obj['Unwatched'] or 0 + obj['Artwork']['Backdrop'] = obj['Artwork']['Backdrop'] or [] + obj['Artwork']['Thumb'] = obj['Artwork']['Thumb'] or "" + + if not intro and not obj['Type'] == 'Trailer': + obj['Artwork']['Primary'] = obj['Artwork']['Primary'] or "special://home/addons/plugin.video.emby/icon.png" + else: + obj['Artwork']['Primary'] = obj['Artwork']['Primary'] or obj['Artwork']['Thumb'] or (obj['Artwork']['Backdrop'][0] if len(obj['Artwork']['Backdrop']) else "special://home/addons/plugin.video.emby/fanart.jpg") + obj['Artwork']['Primary'] += "&KodiTrailer=true" if obj['Type'] == 'Trailer' else "&KodiCinemaMode=true" + obj['Artwork']['Backdrop'] = [obj['Artwork']['Primary']] + + self.set_artwork(obj['Artwork'], listitem, obj['Type']) + + if intro or obj['Type'] == 'Trailer': + listitem.setArt({'poster': ""}) # Clear the poster value for intros / trailers to prevent issues in skins + + listitem.setIconImage('DefaultVideo.png') + listitem.setThumbnailImage(obj['Artwork']['Primary']) if obj['Premiere']: obj['Premiere'] = obj['Premiere'].split('T')[0] @@ -327,28 +350,17 @@ class Actions(object): 'tagline': obj['Tagline'], 'writer': obj['Writers'], 'premiered': obj['Premiere'], - 'aired': obj['Premiere'], 'votes': obj['Votes'], 'dateadded': obj['DateAdded'], + 'aired': obj['Year'], 'date': obj['FileDate'], 'dbid': obj['DbId'] } listitem.setCast(API.get_actors()) - listitem.setIconImage(obj['Artwork']['Thumb']) - listitem.setThumbnailImage(obj['Artwork']['Primary']) - self.set_artwork(obj['Artwork'], listitem, obj['Type']) - - if obj['Artwork']['Primary']: - listitem.setThumbnailImage(obj['Artwork']['Primary']) - - if not obj['Artwork']['Backdrop']: - listitem.setArt({'fanart': obj['Artwork']['Primary']}) if obj['Premiere']: - metadata['premieredate'] = obj['Premiere'] metadata['date'] = obj['Premiere'] - if obj['Type'] == 'Episode': metadata.update({ 'mediatype': "episode", @@ -358,7 +370,8 @@ class Actions(object): 'episode': obj['Index'] or 0, 'sortepisode': obj['Index'] or 0, 'lastplayed': obj['DatePlayed'], - 'duration': obj['Runtime'] + 'duration': obj['Runtime'], + 'aired': obj['Premiere'], }) elif obj['Type'] == 'Season': @@ -374,9 +387,14 @@ class Actions(object): listitem.setProperty('IsFolder', 'true') elif obj['Type'] == 'Series': + + if obj['Status'] != 'Ended': + obj['Status'] = None + metadata.update({ 'mediatype': "tvshow", - 'tvshowtitle': obj['Title'] + 'tvshowtitle': obj['Title'], + 'status': obj['Status'] }) listitem.setProperty('TotalSeasons', str(obj['ChildCount'])) listitem.setProperty('TotalEpisodes', str(obj['RecursiveCount'])) @@ -389,7 +407,8 @@ class Actions(object): 'mediatype': "movie", 'imdbnumber': obj['UniqueId'], 'lastplayed': obj['DatePlayed'], - 'duration': obj['Runtime'] + 'duration': obj['Runtime'], + 'userrating': obj['CriticRating'] }) elif obj['Type'] == 'MusicVideo': @@ -400,7 +419,7 @@ class Actions(object): 'lastplayed': obj['DatePlayed'], 'duration': obj['Runtime'] }) - + elif obj['Type'] == 'BoxSet': metadata['mediatype'] = "set" listitem.setProperty('IsFolder', 'true') @@ -408,10 +427,10 @@ class Actions(object): metadata.update({ 'mediatype': "video", 'lastplayed': obj['DatePlayed'], + 'year': obj['Year'], 'duration': obj['Runtime'] }) - if is_video: listitem.setProperty('totaltime', str(obj['Runtime'])) @@ -550,8 +569,6 @@ class Actions(object): } listitem.setProperty('path', obj['Artwork']['Primary']) listitem.setThumbnailImage(obj['Artwork']['Primary']) - listitem.setIconImage(obj['Artwork']['Primary'] or "special://home/addons/plugin.video.emby/icon.png") - listitem.setArt({'fanart': obj['Artwork']['Primary'] or "special://home/addons/plugin.video.emby/fanart.jpg"}) if obj['Type'] == 'Photo': metadata.update({ @@ -567,11 +584,10 @@ class Actions(object): }) listitem.setProperty('plot', obj['Overview']) listitem.setProperty('IsFolder', 'false') + listitem.setIconImage('DefaultPicture.png') else: - if obj['Artwork']['Backdrop']: - listitem.setArt({'fanart': obj['Artwork']['Backdrop'][0]}) - listitem.setProperty('IsFolder', 'true') + listitem.setIconImage('DefaultFolder.png') listitem.setProperty('IsPlayable', 'false') listitem.setLabel(obj['Title']) @@ -631,7 +647,7 @@ class Actions(object): 'medium_landscape', 'medium_poster', 'small_fanartimage', 'medium_fanartimage', 'fanart_noindicators', 'discart', 'tvshow.poster'): - + listitem.setProperty(art, path) else: listitem.setArt({art: path}) @@ -688,14 +704,14 @@ class PlaylistWorker(threading.Thread): def on_update(data, server): - + ''' Only for manually marking as watched/unwatched ''' try: kodi_id = data['item']['id'] media = data['item']['type'] playcount = int(data['playcount']) - LOG.info(" [ update/%s ] kodi_id: %s media: %s", playcount, kodi_id, media) + LOG.info(" [ update/%s ] kodi_id: %s media: %s", playcount, kodi_id, media) except (KeyError, TypeError): LOG.debug("Invalid playstate update") @@ -735,7 +751,7 @@ def on_play(data, server): kodi_id = item['id'] media = item['type'] - LOG.info(" [ play ] kodi_id: %s media: %s", kodi_id, media) + LOG.info(" [ play ] kodi_id: %s media: %s", kodi_id, media) except (KeyError, TypeError): LOG.debug("Invalid playstate update") diff --git a/resources/lib/objects/kodi/artwork.py b/resources/lib/objects/kodi/artwork.py index 4a2593fc..15144e99 100644 --- a/resources/lib/objects/kodi/artwork.py +++ b/resources/lib/objects/kodi/artwork.py @@ -13,7 +13,7 @@ import xbmcvfs import queries as QU import queries_texture as QUTEX from helper import window, settings -from libraries import requests +import requests ################################################################################################## @@ -151,7 +151,7 @@ class Artwork(object): if thread.is_done: self.threads.remove(thread) - if self.queue.qsize() and len(self.threads) < 3: + if self.queue.qsize() and len(self.threads) < 2: new_thread = GetArtworkWorker(self.kodi, self.queue) new_thread.start() diff --git a/resources/lib/objects/kodi/queries.py b/resources/lib/objects/kodi/queries.py index cd040f91..d2c37110 100644 --- a/resources/lib/objects/kodi/queries.py +++ b/resources/lib/objects/kodi/queries.py @@ -279,13 +279,13 @@ add_art = """ INSERT INTO art(media_id, media_type, type, url) VALUES (?, ?, ?, ?) """ add_movie = """ INSERT INTO movie(idMovie, idFile, c00, c01, c02, c03, c04, c05, c06, c07, - c09, c10, c11, c12, c14, c15, c16, c18, c19, c21, premiered) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + c09, c10, c11, c12, c14, c15, c16, c18, c19, c21, userrating, premiered) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """ add_movie_obj = [ "{MovieId}","{FileId}","{Title}","{Plot}","{ShortPlot}","{Tagline}", "{Votes}","{RatingId}","{Writers}","{Year}","{Unique}","{SortTitle}", "{Runtime}","{Mpaa}","{Genre}","{Directors}","{Title}","{Studio}", - "{Trailer}","{Country}","{Year}" + "{Trailer}","{Country}","{CriticRating}","{Year}" ] add_rating = """ INSERT INTO rating(rating_id, media_id, media_type, rating_type, rating, votes) VALUES (?, ?, ?, ?, ?, ?) @@ -320,11 +320,11 @@ add_musicvideo = """ INSERT INTO musicvideo(idMVideo,idFile, c00, c04, c05, add_musicvideo_obj = [ "{MvideoId}","{FileId}","{Title}","{Runtime}","{Directors}","{Studio}","{Year}", "{Plot}","{Album}","{Artists}","{Genre}","{Index}","{Premiere}" ] -add_tvshow = """ INSERT INTO tvshow(idShow, c00, c01, c04, c05, c08, c09, c12, c13, c14, c15) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) +add_tvshow = """ INSERT INTO tvshow(idShow, c00, c01, c02, c04, c05, c08, c09, c10, c12, c13, c14, c15) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """ -add_tvshow_obj = [ "{ShowId}","{Title}","{Plot}","{RatingId}","{Premiere}","{Genre}","{Title}", - "{Unique}","{Mpaa}","{Studio}","{SortTitle}" +add_tvshow_obj = [ "{ShowId}","{Title}","{Plot}","{Status}","{RatingId}","{Premiere}","{Genre}","{Title}", + "disintegrate browse bug", "{Unique}","{Mpaa}","{Studio}","{SortTitle}" ] add_season = """ INSERT INTO seasons(idSeason, idShow, season) VALUES (?, ?, ?) @@ -355,7 +355,7 @@ update_path_tvshow_obj = [ "{Path}",None,None,1,"{PathId}" ] update_path_episode_obj = [ "{Path}",None,None,1,"{PathId}" ] -update_path_mvideo_obj = [ "{Path}","musicvideos","metadata.local",1,"{PathId}" +update_path_mvideo_obj = [ "{Path}","musicvideos",None,1,"{PathId}" ] update_file = """ UPDATE files SET idPath = ?, strFilename = ?, dateAdded = ? @@ -392,13 +392,13 @@ update_link = """ INSERT OR REPLACE INTO {LinkType}(actor_id, media_id, medi update_movie = """ UPDATE movie SET c00 = ?, c01 = ?, c02 = ?, c03 = ?, c04 = ?, c05 = ?, c06 = ?, c07 = ?, c09 = ?, c10 = ?, c11 = ?, c12 = ?, c14 = ?, c15 = ?, - c16 = ?, c18 = ?, c19 = ?, c21 = ?, premiered = ? + c16 = ?, c18 = ?, c19 = ?, c21 = ?, userrating = ?, premiered = ? WHERE idMovie = ? """ update_movie_obj = [ "{Title}","{Plot}","{ShortPlot}","{Tagline}","{Votes}","{RatingId}", "{Writers}","{Year}","{Unique}","{SortTitle}","{Runtime}", "{Mpaa}","{Genre}","{Directors}","{Title}","{Studio}","{Trailer}", - "{Country}","{Year}","{MovieId}" + "{Country}","{CriticRating}","{Year}","{MovieId}" ] update_rating = """ UPDATE rating SET media_id = ?, media_type = ?, rating_type = ?, rating = ?, votes = ? @@ -446,12 +446,12 @@ update_musicvideo_obj = [ "{Title}","{Runtime}","{Directors}","{Studio}"," "{Artists}","{Genre}","{Index}","{Premiere}","{MvideoId}" ] update_tvshow = """ UPDATE tvshow - SET c00 = ?, c01 = ?, c04 = ?, c05 = ?, c08 = ?, c09 = ?, + SET c00 = ?, c01 = ?, c02 = ?, c04 = ?, c05 = ?, c08 = ?, c09 = ?, c10 = ?, c12 = ?, c13 = ?, c14 = ?, c15 = ? WHERE idShow = ? """ -update_tvshow_obj = [ "{Title}","{Plot}","{RatingId}","{Premiere}","{Genre}","{Title}", - "{Unique}","{Mpaa}","{Studio}","{SortTitle}","{ShowId}" +update_tvshow_obj = [ "{Title}","{Plot}","{Status}","{RatingId}","{Premiere}","{Genre}","{Title}", + "disintegrate browse bug","{Unique}","{Mpaa}","{Studio}","{SortTitle}","{ShowId}" ] update_tvshow_link = """ INSERT OR REPLACE INTO tvshowlinkpath(idShow, idPath) VALUES (?, ?) diff --git a/resources/lib/objects/movies.py b/resources/lib/objects/movies.py index 359b25ce..cf32699c 100644 --- a/resources/lib/objects/movies.py +++ b/resources/lib/objects/movies.py @@ -10,7 +10,7 @@ import downloader as server from obj import Objects from kodi import Movies as KodiDb, queries as QU from database import emby_db, queries as QUEM -from helper import api, catch, stop, validate, emby_item, library_check, values +from helper import api, catch, stop, validate, emby_item, library_check, values, settings, Local ################################################################################################## @@ -72,6 +72,8 @@ class Movies(KodiDb): update = False LOG.info("MovieId %s missing from kodi. repairing the entry.", obj['MovieId']) + if not settings('syncRottenTomatoes.bool'): + obj['CriticRating'] = None obj['Path'] = API.get_file_path(obj['Path']) obj['LibraryId'] = library['Id'] @@ -87,8 +89,8 @@ class Movies(KodiDb): obj['Resume'] = API.adjust_resume((obj['Resume'] or 0) / 10000000.0) obj['Runtime'] = round(float((obj['Runtime'] or 0) / 10000000.0), 6) obj['People'] = API.get_people_artwork(obj['People']) - obj['DateAdded'] = obj['DateAdded'].split('.')[0].replace('T', " ") - obj['DatePlayed'] = None if not obj['DatePlayed'] else obj['DatePlayed'].split('.')[0].replace('T', " ") + obj['DateAdded'] = Local(obj['DateAdded']).split('.')[0].replace('T', " ") + obj['DatePlayed'] = None if not obj['DatePlayed'] else Local(obj['DatePlayed']).split('.')[0].replace('T', " ") obj['PlayCount'] = API.get_playcount(obj['Played'], obj['PlayCount']) obj['Artwork'] = API.get_all_artwork(self.objects.map(item, 'Artwork')) obj['Video'] = API.video_streams(obj['Video'] or [], obj['Container']) @@ -302,7 +304,7 @@ class Movies(KodiDb): obj['PlayCount'] = API.get_playcount(obj['Played'], obj['PlayCount']) if obj['DatePlayed']: - obj['DatePlayed'] = obj['DatePlayed'].split('.')[0].replace('T', " ") + obj['DatePlayed'] = Local(obj['DatePlayed']).split('.')[0].replace('T', " ") if obj['Favorite']: self.get_tag(*values(obj, QU.get_tag_movie_obj)) diff --git a/resources/lib/objects/music.py b/resources/lib/objects/music.py index fbc43480..a80b0d33 100644 --- a/resources/lib/objects/music.py +++ b/resources/lib/objects/music.py @@ -10,7 +10,7 @@ import urllib from obj import Objects from kodi import Music as KodiDb, queries_music as QU from database import emby_db, queries as QUEM -from helper import api, catch, stop, validate, emby_item, values, library_check +from helper import api, catch, stop, validate, emby_item, values, library_check, settings, Local ################################################################################################## @@ -268,10 +268,10 @@ class Music(KodiDb): obj['Artwork'] = API.get_all_artwork(self.objects.map(item, 'ArtworkMusic'), True) if obj['DateAdded']: - obj['DateAdded'] = obj['DateAdded'].split('.')[0].replace('T', " ") + obj['DateAdded'] = Local(obj['DateAdded']).split('.')[0].replace('T', " ") if obj['DatePlayed']: - obj['DatePlayed'] = obj['DatePlayed'].split('.')[0].replace('T', " ") + obj['DatePlayed'] = Local(obj['DatePlayed']).split('.')[0].replace('T', " ") if obj['Disc'] != 1: obj['Index'] = obj['Disc'] * 2 ** 16 + obj['Index'] @@ -442,7 +442,7 @@ class Music(KodiDb): if obj['Media'] == 'song': if obj['DatePlayed']: - obj['DatePlayed'] = obj['DatePlayed'].split('.')[0].replace('T', " ") + obj['DatePlayed'] = Local(obj['DatePlayed']).split('.')[0].replace('T', " ") self.rate_song(*values(obj, QU.update_song_rating_obj)) diff --git a/resources/lib/objects/musicvideos.py b/resources/lib/objects/musicvideos.py index 98bae32a..1a528ba4 100644 --- a/resources/lib/objects/musicvideos.py +++ b/resources/lib/objects/musicvideos.py @@ -10,7 +10,7 @@ import urllib from obj import Objects from kodi import MusicVideos as KodiDb, queries as QU from database import emby_db, queries as QUEM -from helper import api, catch, stop, validate, library_check, emby_item, values +from helper import api, catch, stop, validate, library_check, emby_item, values, Local ################################################################################################## @@ -80,12 +80,12 @@ class MusicVideos(KodiDb): obj['ArtistItems'] = obj['ArtistItems'] or [] obj['Studios'] = [API.validate_studio(studio) for studio in (obj['Studios'] or [])] obj['Plot'] = API.get_overview(obj['Plot']) - obj['DateAdded'] = obj['DateAdded'].split('.')[0].replace('T', " ") - obj['DatePlayed'] = None if not obj['DatePlayed'] else obj['DatePlayed'].split('.')[0].replace('T', " ") + obj['DateAdded'] = Local(obj['DateAdded']).split('.')[0].replace('T', " ") + obj['DatePlayed'] = None if not obj['DatePlayed'] else Local(obj['DatePlayed']).split('.')[0].replace('T', " ") obj['PlayCount'] = API.get_playcount(obj['Played'], obj['PlayCount']) obj['Resume'] = API.adjust_resume((obj['Resume'] or 0) / 10000000.0) obj['Runtime'] = round(float((obj['Runtime'] or 0) / 10000000.0), 6) - obj['Premiere'] = obj['Premiere'] or datetime.date(obj['Year'] or 2021, 1, 1) + obj['Premiere'] = Local(obj['Premiere']) if obj['Premiere'] else datetime.date(obj['Year'] or 2021, 1, 1) obj['Genre'] = " / ".join(obj['Genres']) obj['Studio'] = " / ".join(obj['Studios']) obj['Artists'] = " / ".join(obj['Artists'] or []) @@ -205,7 +205,7 @@ class MusicVideos(KodiDb): obj['PlayCount'] = API.get_playcount(obj['Played'], obj['PlayCount']) if obj['DatePlayed']: - obj['DatePlayed'] = obj['DatePlayed'].split('.')[0].replace('T', " ") + obj['DatePlayed'] = Local(obj['DatePlayed']).split('.')[0].replace('T', " ") if obj['Favorite']: self.get_tag(*values(obj, QU.get_tag_mvideo_obj)) diff --git a/resources/lib/objects/obj.py b/resources/lib/objects/obj.py index 6ff845ac..e5f76964 100644 --- a/resources/lib/objects/obj.py +++ b/resources/lib/objects/obj.py @@ -100,7 +100,7 @@ class Objects(object): if not self.__filters__(obj, obj_filters): obj = None - if not obj and len(params) != params.index(param): + if obj is None and len(params) != params.index(param): continue if obj_key: diff --git a/resources/lib/objects/obj_map.json b/resources/lib/objects/obj_map.json index d9d63e87..420c0120 100644 --- a/resources/lib/objects/obj_map.json +++ b/resources/lib/objects/obj_map.json @@ -40,7 +40,8 @@ "Audio": "MediaSources/0/MediaStreams:?Type=Audio", "Video": "MediaSources/0/MediaStreams:?Type=Video", "Container": "MediaSources/0/Container", - "EmbyParentId": "ParentId" + "EmbyParentId": "ParentId", + "CriticRating": "CriticRating" }, "MovieUserData": { "Id": "Id", @@ -76,7 +77,8 @@ "Tags": "Tags", "Favorite": "UserData/IsFavorite", "RecursiveCount": "RecursiveItemCount", - "EmbyParentId": "ParentId" + "EmbyParentId": "ParentId", + "Status": "Status" }, "Season": { "Id": "Id", @@ -299,7 +301,9 @@ "Unwatched": "UserData/UnplayedItemCount", "ChildCount": "ChildCount", "RecursiveCount": "RecursiveItemCount", - "MediaType": "MediaType" + "MediaType": "MediaType", + "CriticRating": "CriticRating", + "Status": "Status" }, "BrowseAudio": { "Id": "Id", diff --git a/resources/lib/objects/tvshows.py b/resources/lib/objects/tvshows.py index d55ef93e..5e8c4ec3 100644 --- a/resources/lib/objects/tvshows.py +++ b/resources/lib/objects/tvshows.py @@ -12,7 +12,7 @@ from obj import Objects from kodi import TVShows as KodiDb, queries as QU import downloader as server from database import emby_db, queries as QUEM -from helper import api, catch, stop, validate, emby_item, library_check, settings, values +from helper import api, catch, stop, validate, emby_item, library_check, settings, values, Local ################################################################################################## @@ -101,10 +101,13 @@ class TVShows(KodiDb): obj['Studio'] = " / ".join(obj['Studios']) obj['Artwork'] = API.get_all_artwork(self.objects.map(item, 'Artwork')) + if obj['Status'] != 'Ended': + obj['Status'] = None + self.get_path_filename(obj) if obj['Premiere']: - obj['Premiere'] = str(obj['Premiere']).split('.')[0].replace('T', " ") + obj['Premiere'] = str(Local(obj['Premiere'])).split('.')[0].replace('T', " ") tags = [] tags.extend(obj['Tags'] or []) @@ -299,8 +302,8 @@ class TVShows(KodiDb): obj['Resume'] = API.adjust_resume((obj['Resume'] or 0) / 10000000.0) obj['Runtime'] = round(float((obj['Runtime'] or 0) / 10000000.0), 6) obj['People'] = API.get_people_artwork(obj['People'] or []) - obj['DateAdded'] = obj['DateAdded'].split('.')[0].replace('T', " ") - obj['DatePlayed'] = None if not obj['DatePlayed'] else obj['DatePlayed'].split('.')[0].replace('T', " ") + obj['DateAdded'] = Local(obj['DateAdded']).split('.')[0].replace('T', " ") + obj['DatePlayed'] = None if not obj['DatePlayed'] else Local(obj['DatePlayed']).split('.')[0].replace('T', " ") obj['PlayCount'] = API.get_playcount(obj['Played'], obj['PlayCount']) obj['Artwork'] = API.get_all_artwork(self.objects.map(item, 'Artwork')) obj['Video'] = API.video_streams(obj['Video'] or [], obj['Container']) @@ -310,7 +313,7 @@ class TVShows(KodiDb): self.get_episode_path_filename(obj) if obj['Premiere']: - obj['Premiere'] = obj['Premiere'].split('.')[0].replace('T', " ") + obj['Premiere'] = Local(obj['Premiere']).split('.')[0].replace('T', " ") if obj['Season'] is None: if obj['AbsoluteNumber']: @@ -479,10 +482,10 @@ class TVShows(KodiDb): obj['PlayCount'] = API.get_playcount(obj['Played'], obj['PlayCount']) if obj['DatePlayed']: - obj['DatePlayed'] = obj['DatePlayed'].split('.')[0].replace('T', " ") + obj['DatePlayed'] = Local(obj['DatePlayed']).split('.')[0].replace('T', " ") if obj['DateAdded']: - obj['DateAdded'] = obj['DateAdded'].split('.')[0].replace('T', " ") + obj['DateAdded'] = Local(obj['DateAdded']).split('.')[0].replace('T', " ") self.add_playstate(*values(obj, QU.add_bookmark_obj)) diff --git a/resources/lib/player.py b/resources/lib/player.py index a10aaafe..2f522008 100644 --- a/resources/lib/player.py +++ b/resources/lib/player.py @@ -22,14 +22,10 @@ LOG = logging.getLogger("EMBY."+__name__) class Player(xbmc.Player): - # Borg - multiple instances, shared state - _shared_state = {} played = {} up_next = False def __init__(self): - - self.__dict__ = self._shared_state xbmc.Player.__init__(self) @silent_catch() @@ -47,7 +43,9 @@ class Player(xbmc.Player): ''' We may need to wait for info to be set in kodi monitor. Accounts for scenario where Kodi starts playback and exits immediately. + First, ensure previous playback terminated correctly in Emby. ''' + self.stop_playback() self.up_next = False count = 0 monitor = xbmc.Monitor() @@ -118,7 +116,8 @@ class Player(xbmc.Player): if monitor.waitForAbort(2): return - self.set_audio_subs(item['AudioStreamIndex'], item['SubtitleStreamIndex']) + if item['PlayOption'] == 'Addon': + self.set_audio_subs(item['AudioStreamIndex'], item['SubtitleStreamIndex']) def set_item(self, file, item): @@ -149,7 +148,7 @@ class Player(xbmc.Player): 'CurrentPosition': item.get('CurrentPosition') or int(seektime), 'Muted': muted, 'Volume': volume, - 'Server': Emby(item['ServerId']), + 'Server': Emby(item['ServerId']).get_client(), 'Paused': False }) @@ -368,14 +367,14 @@ class Player(xbmc.Player): ''' window('emby_play', clear=True) self.stop_playback() - LOG.debug("--<[ playback ]") + LOG.info("--<[ playback ]") def onPlayBackEnded(self): ''' Will be called when kodi stops playing a file. ''' self.stop_playback() - LOG.debug("--<<[ playback ]") + LOG.info("--<<[ playback ]") def stop_playback(self): diff --git a/resources/lib/setup.py b/resources/lib/setup.py index abb847e0..6701ecbc 100644 --- a/resources/lib/setup.py +++ b/resources/lib/setup.py @@ -6,7 +6,7 @@ import logging import xbmc -from helper import _, settings, dialog, JSONRPC +from helper import _, settings, dialog, JSONRPC, compare_version ################################################################################################# @@ -65,17 +65,29 @@ class Setup(object): def setup(self): - minimum = "3.0.23" + minimum = "3.0.24" + cached = settings('MinimumSetup') - if settings('MinimumSetup') == minimum: + if cached == minimum: return - self._is_mode() - LOG.info("Add-on playback: %s", settings('useDirectPaths') == "0") - self._is_artwork_caching() - LOG.info("Artwork caching: %s", settings('enableTextureCache.bool')) - self._is_empty_shows() - LOG.info("Sync empty shows: %s", settings('syncEmptyShows.bool')) + if not cached: + + self._is_mode() + LOG.info("Add-on playback: %s", settings('useDirectPaths') == "0") + self._is_artwork_caching() + LOG.info("Artwork caching: %s", settings('enableTextureCache.bool')) + self._is_empty_shows() + LOG.info("Sync empty shows: %s", settings('syncEmptyShows.bool')) + self._is_rotten_tomatoes() + LOG.info("Sync rotten tomatoes: %s", settings('syncRottenTomatoes.bool')) + + """ + if compare_version(cached or minimum, "3.0.24") <= 0: + + self._is_rotten_tomatoes() + LOG.info("Sync rotten tomatoes: %s", settings('syncRottenTomatoes.bool')) + """ # Setup completed settings('MinimumSetup', minimum) @@ -105,6 +117,11 @@ class Setup(object): value = dialog("yesno", heading="{emby}", line1=_(33100)) settings('syncEmptyShows.bool', value) + def _is_rotten_tomatoes(self): + + value = dialog("yesno", heading="{emby}", line1=_(33188)) + settings('syncRottenTomatoes.bool', value) + def _is_music(self): value = dialog("yesno", heading="{emby}", line1=_(33039)) diff --git a/resources/lib/views.py b/resources/lib/views.py index 850a0832..d75b91d6 100644 --- a/resources/lib/views.py +++ b/resources/lib/views.py @@ -169,24 +169,29 @@ class Views(object): libraries = self.server['api'].get_media_folders()['Items'] views = self.server['api'].get_views()['Items'] except Exception as error: - LOG.error("Unable to process libraries: %s", error) - - return [] + raise IndexError("Unable to retrieve libraries: %s" % error) libraries.extend([x for x in views if x['Id'] not in [y['Id'] for y in libraries]]) return libraries def get_views(self): - - ''' Get the media folders. Add or remove them. + + ''' Get the media folders. Add or remove them. Do not proceed if issue getting libraries. ''' media = { 'movies': "Movie", 'tvshows': "Series", 'musicvideos': "MusicVideo" } - libraries = self.get_libraries() + + try: + libraries = self.get_libraries() + except IndexError as error: + LOG.error(error) + + return + self.sync['SortedViews'] = [x['Id'] for x in libraries] for library in libraries: @@ -207,14 +212,14 @@ class Views(object): if view[0] not in self.sync['SortedViews']: removed.append(view[0]) - - if removed: + + if removed: event('RemoveLibrary', {'Id': ','.join(removed)}) save_sync(self.sync) def get_nodes(self): - + ''' Set up playlists, video nodes, window prop. ''' node_path = xbmc.translatePath("special://profile/library/video").decode('utf-8') @@ -253,18 +258,18 @@ class Views(object): for single in [{'Name': _('fav_movies'), 'Tag': "Favorite movies", 'Media': "movies"}, {'Name': _('fav_tvshows'), 'Tag': "Favorite tvshows", 'Media': "tvshows"}, {'Name': _('fav_episodes'), 'Tag': "Favorite episodes", 'Media': "episodes"}]: - + self.add_single_node(node_path, index, "favorites", single) index += 1 self.window_nodes() def add_playlist(self, path, view, mixed=False): - + ''' Create or update the xps file. ''' file = os.path.join(path, "emby%s%s.xsp" % (view['Media'], view['Id'])) - + try: xml = etree.parse(file).getroot() except Exception: @@ -518,7 +523,7 @@ class Views(object): break else: etree.SubElement(root, 'group').text = "genres" - + def node_unwatched(self, root): for rule in root.findall('.//order'): @@ -576,7 +581,7 @@ class Views(object): break else: etree.SubElement(root, 'limit').text = str(self.limit) - + for rule in root.findall('.//rule'): if rule.attrib['field'] == 'playcount': rule.find('value').text = "0" @@ -625,7 +630,7 @@ class Views(object): break else: etree.SubElement(root, 'limit').text = str(self.limit) - + for rule in root.findall('.//rule'): if rule.attrib['field'] == 'inprogress': break @@ -684,6 +689,11 @@ class Views(object): index = 0 windex = 0 + try: + self.media_folders = self.get_libraries() + except IndexError as error: + LOG.error(error) + for library in (libraries or []): view = {'Id': library[0], 'Name': library[1], 'Tag': library[1], 'Media': library[2]} @@ -711,14 +721,14 @@ class Views(object): if view['Media'] in ('movies', 'tvshows'): self.window_wnode(windex, view, *node) - + if view['Media'] in ('movies', 'tvshows'): windex += 1 elif view['Media'] == 'music': self.window_node(index, view, 'music') else: # Dynamic entry - if view['Media'] in ('homevideos', 'books', 'audiobooks'): + if view['Media'] in ('homevideos', 'books', 'playlists'): self.window_wnode(windex, view, 'browse') windex += 1 @@ -729,7 +739,7 @@ class Views(object): for single in [{'Name': _('fav_movies'), 'Tag': "Favorite movies", 'Media': "movies"}, {'Name': _('fav_tvshows'), 'Tag': "Favorite tvshows", 'Media': "tvshows"}, {'Name': _('fav_episodes'), 'Tag': "Favorite episodes", 'Media': "episodes"}]: - + self.window_single_node(index, "favorites", single) index += 1 @@ -796,16 +806,16 @@ class Views(object): window('%s.type' % window_prop, item_type) def window_wnode(self, index, view, node=None, node_label=None): - + ''' Similar to window_node, but does not contain music, musicvideos. Contains books, audiobooks. ''' - if view['Media'] in ('homevideos', 'photos', 'books', 'audiobooks'): + if view['Media'] in ('homevideos', 'photos', 'books', 'playlists'): path = self.window_browse(view, None if node in ('all', 'browse') else node) else: path = self.window_path(view, node) - if node in ('browse', 'homevideos', 'photos', 'books', 'audiobooks'): + if node in ('browse', 'homevideos', 'photos', 'books', 'playlists'): window_path = path else: window_path = "ActivateWindow(Videos,%s,return)" % path @@ -842,11 +852,7 @@ class Views(object): if not self.server['connected']: window('%s.artwork' % prop, clear=True) - elif self.server['connected']: - - if self.media_folders is None: - self.media_folders = self.get_libraries() - + elif self.server['connected'] and self.media_folders is not None: for library in self.media_folders: if library['Id'] == view_id and 'Primary' in library.get('ImageTags', {}): @@ -894,7 +900,7 @@ class Views(object): ''' total = int(window((name or 'Emby.nodes') + '.total') or 0) props = [ - + "index","id","path","artwork","title","content","type" "inprogress.content","inprogress.title", "inprogress.content","inprogress.path", @@ -919,7 +925,7 @@ class Views(object): LOG.info("DELETE playlist %s", path) def delete_playlists(self): - + ''' Remove all emby playlists. ''' path = xbmc.translatePath("special://profile/playlists/video/").decode('utf-8') diff --git a/resources/lib/webservice.py b/resources/lib/webservice.py new file mode 100644 index 00000000..9e5f9d33 --- /dev/null +++ b/resources/lib/webservice.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +################################################################################################# + +import BaseHTTPServer +import logging +import httplib +import threading +import urlparse + +import xbmc + +################################################################################################# + +PORT = 57578 +LOG = logging.getLogger("EMBY."+__name__) + +################################################################################################# + +class WebService(threading.Thread): + + ''' Run a webservice to trigger playback. + ''' + def __init__(self): + threading.Thread.__init__(self) + + def stop(self): + + ''' Called when the thread needs to stop + ''' + try: + conn = httplib.HTTPConnection("127.0.0.1:%d" % PORT) + conn.request("QUIT", "/") + conn.getresponse() + except Exception as error: + pass + + def run(self): + + ''' Called to start the webservice. + ''' + LOG.info("--->[ webservice/%s ]", PORT) + + try: + server = HttpServer(('127.0.0.1', PORT), requestHandler) + server.serve_forever() + except Exception as error: + + if '10053' not in error: # ignore host diconnected errors + LOG.exception(error) + + LOG.info("---<[ webservice ]") + + +class HttpServer(BaseHTTPServer.HTTPServer): + + ''' Http server that reacts to self.stop flag. + ''' + def serve_forever(self): + + ''' Handle one request at a time until stopped. + ''' + self.stop = False + + while not self.stop: + self.handle_request() + + +class requestHandler(BaseHTTPServer.BaseHTTPRequestHandler): + + ''' Http request handler. Do not use LOG here, + it will hang requests in Kodi > show information dialog. + ''' + + def log_message(self, format, *args): + + ''' Mute the webservice requests. + ''' + pass + + def do_QUIT(self): + + ''' send 200 OK response, and set server.stop to True + ''' + self.send_response(200) + self.end_headers() + self.server.stop = True + + def get_params(self): + + ''' Get the params + ''' + try: + path = self.path[1:] + + if '?' in path: + path = path.split('?', 1)[1] + + params = dict(urlparse.parse_qsl(path)) + except Exception: + params = {} + + return params + + def do_HEAD(self): + + ''' Called on HEAD requests + ''' + self.send_response(200) + self.end_headers() + + return + + def do_GET(self): + + ''' Return plugin path + ''' + try: + params = self.get_params() + + if not params: + raise IndexError("Incomplete URL format") + + if not params.get('Id').isdigit(): + raise IndexError("Incorrect Id format %s" % params.get('Id')) + + xbmc.log("[ webservice ] path: %s params: %s" % (str(self.path), str(params)), xbmc.LOGWARNING) + + path = ("plugin://plugin.video.emby?mode=play&id=%s&dbid=%s&filename=%s&transcode=%s" + % (params.get('Id'), params.get('KodiId'), params.get('Name'), params.get('transcode') or False)) + + self.send_response(200) + self.send_header('Content-type','text/html') + self.end_headers() + self.wfile.write(path) + + except IndexError as error: + + xbmc.log(str(error), xbmc.LOGWARNING) + self.send_error(404, "Exception occurred: %s" % error) + + except Exception as error: + + xbmc.log(str(error), xbmc.LOGWARNING) + self.send_error(500, "Exception occurred: %s" % error) + + return + diff --git a/resources/settings.xml b/resources/settings.xml index 1008d65a..27853e48 100644 --- a/resources/settings.xml +++ b/resources/settings.xml @@ -21,7 +21,7 @@ <category label="30506"><!-- Sync Options --> <setting label="33186" type="lsep" /> <setting label="33137" id="kodiCompanion" type="bool" default="true" /> - <setting label="30507" id="syncIndicator" type="number" default="99" visible="eq(-1,true)" subsetting="true"/> + <setting label="30507" id="syncIndicator" type="number" default="999" visible="eq(-1,true)" subsetting="true"/> <setting label="33185" id="syncDuringPlay" type="bool" default="true" /> <setting label="30536" id="dbSyncScreensaver" type="bool" default="true" /> <setting label="33111" type="lsep" /> @@ -92,13 +92,16 @@ <category label="30022"><!-- Advanced --> <setting label="30004" id="logLevel" type="enum" values="Disabled|Info|Debug" default="1" /> <setting label="33164" id="maskInfo" type="bool" default="true" /> - <setting label="30529" id="startupDelay" type="number" default="0" option="int" /> <setting label="30239" type="action" action="RunPlugin(plugin://plugin.video.emby?mode=reset)" option="close" /> <setting label="30535" type="action" action="RunPlugin(plugin://plugin.video.emby?mode=deviceid)" option="close" /> - <setting label="33161" type="action" action="RunPlugin(plugin://plugin.video.emby?mode=checkupdate)" option="close" /> + <setting label="33196" type="lsep" /> + <setting label="33195" id="enableAddon" type="bool" default="true" /> <setting label="33180" type="action" action="RunPlugin(plugin://plugin.video.emby?mode=restartservice)" option="close" /> + <setting label="30529" id="startupDelay" type="number" default="0" option="int" /> + <setting label="33161" type="action" action="RunPlugin(plugin://plugin.video.emby?mode=checkupdate)" option="close" /> + <setting label="Developer mode" id="devMode" type="bool" default="false" /> - <setting type="sep"/> + <setting type="sep" /> <setting label="33104" type="lsep"/> <setting label="33093" type="folder" id="backupPath" option="writeable" /> <setting label="33092" type="action" action="RunPlugin(plugin://plugin.video.emby?mode=backup)" visible="!eq(-1,)" option="close" /> diff --git a/service.py b/service.py index 0cc7aacc..3292a122 100644 --- a/service.py +++ b/service.py @@ -4,6 +4,7 @@ import logging import os +import threading import sys import xbmc @@ -14,9 +15,12 @@ import xbmcaddon __addon__ = xbmcaddon.Addon(id='plugin.video.emby') __base__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'resources', 'lib')).decode('utf-8') +__libraries__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'libraries')).decode('utf-8') __pcache__ = xbmc.translatePath(os.path.join(__addon__.getAddonInfo('profile'), 'emby')).decode('utf-8') __cache__ = xbmc.translatePath('special://temp/emby').decode('utf-8') +sys.path.insert(0, __libraries__) + if not xbmcvfs.exists(__pcache__ + '/'): from resources.lib.helper.utils import copytree @@ -35,11 +39,44 @@ from emby import Emby ################################################################################################# LOG = logging.getLogger("EMBY.service") -DELAY = int(settings('startupDelay') or 0) +DELAY = int(settings('startupDelay') if settings('SyncInstallRunDone.bool') else 4 or 0) ################################################################################################# +class ServiceManager(threading.Thread): + + ''' Service thread. + To allow to restart and reload modules internally. + ''' + exception = None + + def __init__(self): + threading.Thread.__init__(self) + + def run(self): + service = None + + try: + service = Service() + + if DELAY and xbmc.Monitor().waitForAbort(DELAY): + raise Exception("Aborted during startup delay") + + service.service() + except Exception as error: + + if service is not None: + + if not 'ExitService' in error: + service.shutdown() + + if 'RestartService' in error: + service.reload_objects() + + self.exception = error + + if __name__ == "__main__": LOG.warn("-->[ service ]") @@ -47,29 +84,24 @@ if __name__ == "__main__": while True: + if not settings('enableAddon.bool'): + LOG.warn("Emby for Kodi is not enabled.") + + break + try: - session = Service() + session = ServiceManager() + session.start() + session.join() # Block until the thread exits. - try: - if DELAY and xbmc.Monitor().waitForAbort(DELAY): - raise Exception("Aborted during startup delay") - - session.service() - except Exception as error: # TODO, build exceptions - - LOG.exception(error) - session.shutdown() - - if 'RestartService' in error: - continue + if 'RestartService' in session.exception: + continue except Exception as error: ''' Issue initializing the service. ''' LOG.exception(error) - break - break LOG.warn("--<[ service ]")