commit ac4f79bbf48162a776b799d31efc9d6e34f5480e Author: sicheng Date: Fri Jul 29 23:38:54 2022 +0800 clone from danieldin95 diff --git a/.gitignore b/.gitignore new file mode 100755 index 0000000..083bfce --- /dev/null +++ b/.gitignore @@ -0,0 +1,26 @@ +# Binaries for programs and plugins +*.exe~ +*.dll +*.so +*.dylib + +*.exe +*.x86_64 +*.rpm +*.zip +# Test binary, build with `go test -c` +*.test +*.idea + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +/build/ +/py/build/ +/py/dist/ +/core/build/ +/core/cmake-build-debug/ + +confd-idl.h +confd-idl.c +confd-idl.ovsidl diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..56b5850 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,12 @@ +[submodule "3rd/openvpn"] + path = 3rd/openvpn + url = https://github.com/OpenVPN/openvpn + branch = release/2.4 +[submodule "dist/resource/cert"] + path = dist/resource/cert + url = https://github.com/danieldin95/freecert.git + branch = master +[submodule "3rd/ovs"] + path = 3rd/ovs + url = https://github.com/openvswitch/ovs.git + branch = branch-2.12 diff --git a/3rd/README.md b/3rd/README.md new file mode 100755 index 0000000..14909a6 --- /dev/null +++ b/3rd/README.md @@ -0,0 +1,9 @@ +## Compile +If you want to build OpenVPN and OpenvSwitch, the following SHOULD be installing. +``` +[CentOS] + yum install -y lzo-devel pam-devel + +[Ubuntu] + apt-get install -y liblzo2-dev libpam-dev +``` diff --git a/3rd/auto.sh b/3rd/auto.sh new file mode 100755 index 0000000..45fdd21 --- /dev/null +++ b/3rd/auto.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +set -ex + +action=$1 +version=$(cat VERSION) +cd $(dirname $0) + +build_openvswitch() { + obj_dir=$(pwd)/../build/obj + cd ovs && { + [ -e './configure' ] || ./boot.sh + [ -e './Makefile' ] || ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-libcapng + make -j4 && make install DESTDIR=$obj_dir + cd - + } +} + +build_openvpn() { + obj_dir=$(pwd)/../build/obj + cd openvpn && { + [ -e './configure' ] || autoreconf -i -v -f + [ -e './Makefile' ] || ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var + make -j4 && make install DESTDIR=$obj_dir + cd - + } +} + + +clean_openvswitch() { + cd ovs && { + if [ -e Makefile ]; then + make clean + rm ./Makefile + fi + cd - + } +} + +clean_openvpn() { + cd openvpn && { + if [ -e Makefile ]; then + make clean + rm ./Makefile + fi + cd - + } +} + + +if [ "$action"x == "build"x ] || [ "$action"x == ""x ]; then + build_openvswitch + build_openvpn +elif [ "$action"x == "clean"x ]; then + clean_openvswitch + clean_openvpn +fi diff --git a/3rd/openvpn b/3rd/openvpn new file mode 160000 index 0000000..058407a --- /dev/null +++ b/3rd/openvpn @@ -0,0 +1 @@ +Subproject commit 058407a89cb812115b383570b12f4c2fde500d39 diff --git a/3rd/ovs b/3rd/ovs new file mode 160000 index 0000000..7a7e4db --- /dev/null +++ b/3rd/ovs @@ -0,0 +1 @@ +Subproject commit 7a7e4db590aa6fb60f781f340bd6fbaa81e307c3 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Makefile b/Makefile new file mode 100755 index 0000000..e2dd0d2 --- /dev/null +++ b/Makefile @@ -0,0 +1,193 @@ +SHELL := /bin/bash + +.ONESHELL: +.PHONY: linux linux-rpm darwin darwin-zip windows windows-zip test vendor + +## version +LSB = $(shell lsb_release -i -s)$(shell lsb_release -r -s) +VER = $(shell cat VERSION) +ARCH = $(shell uname -m) + +## declare directory +SD = $(shell pwd) +BD = "$(SD)/build" +LINUX_DIR ?= "openlan-linux-$(VER).$(ARCH)" +WIN_DIR ?= "openlan-windows-$(VER).$(ARCH)" +MAC_DIR ?= "openlan-darwin-$(VER).$(ARCH)" + +## declare flags +MOD = github.com/luscis/openlan/pkg/libol +LDFLAGS += -X $(MOD).Date=$(shell date +%FT%T%z) +LDFLAGS += -X $(MOD).Version=$(VER) + +build: test pkg + +pkg: clean linux-rpm linux-bin windows-gz darwin-gz ## build all plaftorm packages + +gz: linux-gz windows-gz darwin-gz + +help: ## show make targets + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);\ + printf " \033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +## all platform +bin: linux windows darwin ## build all platform binary + +# +## docker run --network host --privileged \ +## -v /var/run:/var/run -v /etc/openlan/switch:/etc/openlan/switch \ +## openlan-switch:5.8.13 +docker: pkg + docker build -t openlan-switch:$(VER) --build-arg VERSION=$(VER) -f ./dist/openlan-switch.docker . + +## upgrade +upgrade: + ansible-playbook ./misc/playbook/upgrade.yaml -e "version=$(VER)" + +clean: ## clean cache + rm -rvf ./build + rm -rvf ./core/build + rm -rvf ./core/cmake-build-debug + ./core/auto.sh clean + ./3rd/auto.sh clean + +## prepare environment +update: + git submodule init + git submodule update + +vendor: update + go clean -modcache + go mod tidy + go mod vendor -v + +env: + @mkdir -p $(BD) + @go version + @gofmt -w -s ./pkg ./cmd ./misc + +## linux platform +linux: linux-proxy linux-point linux-switch + +core: env + ./3rd/auto.sh build + ./core/auto.sh build + cd $(BD) && cmake $(SD)/core && make + +rpm: env ## build rpm packages + mkdir -p ~/rpmbuild/SPECS + mkdir -p ~/rpmbuild/SOURCES + sed -e "s/Version:.*/Version:\ $(VER)/" $(SD)/dist/openlan.spec.in > ~/rpmbuild/SPECS/openlan.spec + @dist/spec.sh + rpmbuild -ba ~/rpmbuild/SPECS/openlan.spec + +## compile command line +cmd: env + go build -mod=vendor -ldflags "$(LDFLAGS)" -o $(BD)/openlan ./cmd/main.go + +linux: linux-point linux-switch linux-proxy ## build all linux binary + +linux-point: env + go build -mod=vendor -ldflags "$(LDFLAGS)" -o $(BD)/openlan-point ./cmd/point_linux + +linux-switch: env cmd core + go build -mod=vendor -ldflags "$(LDFLAGS)" -o $(BD)/openlan-switch ./cmd/switch + +linux-proxy: env + go build -mod=vendor -ldflags "$(LDFLAGS)" -o $(BD)/openlan-proxy ./cmd/proxy + + +linux-gz: install ## build linux packages + @rm -rf $(LINUX_DIR).tar.gz + tar -cf $(LINUX_DIR).tar $(LINUX_DIR) && mv $(LINUX_DIR).tar $(BD) + @rm -rf $(LINUX_DIR) + gzip -f $(BD)/$(LINUX_DIR).tar + +linux-bin: linux-gz ## build linux install binary + cat $(SD)/dist/script/install.sh > $(BD)/$(LINUX_DIR).bin && \ + echo "__ARCHIVE_BELOW__:" >> $(BD)/$(LINUX_DIR).bin && \ + cat $(BD)/$(LINUX_DIR).tar.gz >> $(BD)/$(LINUX_DIR).bin && \ + chmod +x $(BD)/$(LINUX_DIR).bin + +install: env linux-point linux-switch linux-proxy ## install packages + @mkdir -p $(LINUX_DIR)/etc/sysctl.d + @cp -rvf $(SD)/dist/resource/90-openlan.conf $(LINUX_DIR)/etc/sysctl.d + @mkdir -p $(LINUX_DIR)/etc/openlan + @cp -rvf $(SD)/dist/resource/point.json.example $(LINUX_DIR)/etc/openlan + @cp -rvf $(SD)/dist/resource/proxy.json.example $(LINUX_DIR)/etc/openlan + @mkdir -p $(LINUX_DIR)/etc/openlan/switch + @cp -rvf $(SD)/dist/resource/confd.schema.json $(LINUX_DIR)/etc/openlan/switch + @cp -rvf $(SD)/dist/resource/switch.json.example $(LINUX_DIR)/etc/openlan/switch + @mkdir -p $(LINUX_DIR)/etc/openlan/switch/network + @cp -rvf $(SD)/dist/resource/acl-1.json.example $(LINUX_DIR)/etc/openlan/switch/network + @cp -rvf $(SD)/dist/resource/default.json.example $(LINUX_DIR)/etc/openlan/switch/network + @cp -rvf $(SD)/dist/resource/network.json.example $(LINUX_DIR)/etc/openlan/switch/network + @cp -rvf $(SD)/dist/resource/ipsec.json.example $(LINUX_DIR)/etc/openlan/switch/network + @cp -rvf $(SD)/dist/resource/v1024.json.example $(LINUX_DIR)/etc/openlan/switch/network + @cp -rvf $(SD)/dist/resource/fabric.json.example $(LINUX_DIR)/etc/openlan/switch/network + @mkdir -p $(LINUX_DIR)/usr/bin + @cp -rvf $(BD)/openudp $(LINUX_DIR)/usr/bin + @cp -rvf $(BD)/openlan $(LINUX_DIR)/usr/bin + @cp -rvf $(BD)/openlan-proxy $(LINUX_DIR)/usr/bin + @cp -rvf $(BD)/openlan-point $(LINUX_DIR)/usr/bin + @cp -rvf $(BD)/openlan-switch $(LINUX_DIR)/usr/bin + @mkdir -p $(LINUX_DIR)/var/openlan + @cp -rvf $(SD)/dist/resource/cert/openlan/cert $(LINUX_DIR)/var/openlan + @cp -rvf $(SD)/dist/script $(LINUX_DIR)/var/openlan + @cp -rvf $(SD)/pkg/public $(LINUX_DIR)/var/openlan + @cp -rvf $(SD)/dist/resource/cert/openlan/ca/ca.crt $(LINUX_DIR)/var/openlan/cert + @mkdir -p $(LINUX_DIR)/var/openlan/point + @mkdir -p $(LINUX_DIR)/var/openlan/openvpn + @mkdir -p $(LINUX_DIR)/var/openlan/dhcp + @mkdir -p $(LINUX_DIR)/etc/sysconfig/openlan + @cp -rvf $(SD)/dist/resource/point.cfg $(LINUX_DIR)/etc/sysconfig/openlan + @cp -rvf $(SD)/dist/resource/proxy.cfg $(LINUX_DIR)/etc/sysconfig/openlan + @cp -rvf $(SD)/dist/resource/switch.cfg $(LINUX_DIR)/etc/sysconfig/openlan + @mkdir -p $(LINUX_DIR)//usr/lib/systemd/system + @cp -rvf $(SD)/dist/resource/openlan-point@.service $(LINUX_DIR)/usr/lib/systemd/system + @cp -rvf $(SD)/dist/resource/openlan-proxy.service $(LINUX_DIR)/usr/lib/systemd/system + @cp -rvf $(SD)/dist/resource/openlan-confd.service $(LINUX_DIR)/usr/lib/systemd/system + @cp -rvf $(SD)/dist/resource/openlan-switch.service $(LINUX_DIR)/usr/lib/systemd/system + +## cross build for windows +windows: windows-point ## build windows binary + +windows-point: env + GOOS=windows GOARCH=amd64 go build -mod=vendor -ldflags "$(LDFLAGS)" -o $(BD)/openlan-point.exe ./cmd/point_windows + +windows-gz: env windows ## build windows packages + @rm -rf $(WIN_DIR) && mkdir -p $(WIN_DIR) + @rm -rf $(WIN_DIR).tar.gz + + @cp -rvf $(SD)/dist/resource/point.json.example $(WIN_DIR)/point.json + @cp -rvf $(BD)/openlan-point.exe $(WIN_DIR) + + tar -cf $(WIN_DIR).tar $(WIN_DIR) && mv $(WIN_DIR).tar $(BD) + @rm -rf $(WIN_DIR) + gzip -f $(BD)/$(WIN_DIR).tar + +windows-syso: ## build windows syso + rsrc -manifest ./cmd/point_windows/main.manifest -ico ./cmd/point_windows/main.ico -o ./cmd/point_windows/main.syso + +## cross build for osx +osx: darwin + +darwin: env ## build darwin binary + GOOS=darwin GOARCH=amd64 go build -mod=vendor -ldflags "$(LDFLAGS)" -o $(BD)/openlan-point.darwin ./cmd/point_darwin + +darwin-gz: env darwin ## build darwin packages + @rm -rf $(MAC_DIR) && mkdir -p $(MAC_DIR) + @rm -rf $(MAC_DIR).tar.gz + + @cp -rvf $(SD)/dist/resource/point.json.example $(MAC_DIR)/point.json + @cp -rvf $(BD)/openlan-point.darwin $(MAC_DIR) + + tar -cf $(MAC_DIR).tar $(MAC_DIR) && mv $(MAC_DIR).tar $(BD) + @rm -rf $(MAC_DIR) + gzip -f $(BD)/$(MAC_DIR).tar + +## unit test +test: ## execute unit test + go test -v -mod=vendor -bench=. github.com/luscis/openlan/pkg/access + go test -v -mod=vendor -bench=. github.com/luscis/openlan/pkg/libol + go test -v -mod=vendor -bench=. github.com/luscis/openlan/pkg/models diff --git a/README.en.md b/README.en.md new file mode 100755 index 0000000..f71425f --- /dev/null +++ b/README.en.md @@ -0,0 +1,48 @@ +# Overview +[![Build Status](https://travis-ci.org/luscis/openlan.svg?branch=master)](https://travis-ci.org/luscis/openlan) +[![Go Report Card](https://goreportcard.com/badge/github.com/luscis/openlan)](https://goreportcard.com/report/luscis/openlan) +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) + +The OpenLAN project help you to build a local area network via the Internet. + +## Terminology + +* OLSW: OpenLAN Switch +* OLAP: OpenLAN Access Point +* NAT: Network Address translation + +## Branch Access + + OLSW(Central) - 10.1.2.10/24 + ^ + | + Wifi(DNAT) + | + | + ----------------------Internet------------------------- + ^ ^ ^ + | | | + Branch 1 Branch 2 Branch 3 + | | | + OLAP OLAP OLAP + 10.1.2.11/24 10.1.2.12/24 10.1.2.13/24 + +## Multiple Area + + 192.168.1.20/24 192.168.1.22/24 + | | + OLAP ---- Wifi ---> OLSW(NanJing) <---- Wifi --- OLAP + | + | + Internet + | + | + OLSW(ShangHai) - 192.168.1.10/24 + | + ------------------------------------------------------ + ^ ^ ^ + | | | + Office Wifi Home Wifi Hotel Wifi + | | | + OLAP OLAP OLAP + 192.168.1.11/24 192.168.1.12/24 192.168.1.13/24 diff --git a/README.md b/README.md new file mode 100755 index 0000000..11e54fb --- /dev/null +++ b/README.md @@ -0,0 +1,128 @@ + +简体中文 | [English](./README.en.md) + +# 概述 +[![Build Status](https://app.travis-ci.com/luscis/openlan.svg?token=3tnnPDv6XvR5zsJsD4kC&branch=master)](https://app.travis-ci.com/github/luscis/openlan) +[![Go Report Card](https://goreportcard.com/badge/github.com/luscis/openlan)](https://goreportcard.com/report/luscis/openlan-go) +[![GPL 3.0 License](https://img.shields.io/badge/License-GPL%203.0-blue.svg)](LICENSE) + +OpenLAN提供一种局域网数据报文在广域网的传输实现,并能够建立多个用户空间的虚拟以太网络。 + +## 缩略语 + +* OLSW: OpenLAN Switch,开放局域网交换机 +* OLAP: OpenLAN Access Point,开放局域网接入点 +* NAT: Network Address Translation, 网络地址转换 +* VxLAN: Virtual eXtensible Local Area Network,虚拟扩展局域网 +* STT: Stateless Transport Tunneling,无状态传输隧道 + +## 功能清单 + +* 支持多个网络空间划分,为不同的业务提供逻辑网络隔离; +* 支持OLAP或者OpenVPN接入,提供网桥把局域网共享出去; +* 支持IPSec隧道网络,以及基于VxLAN/STT的租户网络划分; +* 支持基于用户名密码的接入认证,使用预共享密约对数据报文进行加密; +* 支持TCP/TLS,UDP/KCP,WS/WSS等多种传输协议实现,TCP模式具有较高的性能; +* 支持HTTP/HTTPS,以及SOCKS5等HTTP的正向代理技术,灵活配置代理进行网络穿透; +* 支持基于TCP的端口转发,为防火墙下的主机提供TCP端口代理。 + + +## 分支中心接入 + + OLSW(企业中心) - 10.16.1.10/24 + ^ + | + Wifi(DNAT) + | + | + ----------------------Internet------------------------- + ^ ^ ^ + | | | + 分支1 分支2 分支3 + | | | + OLAP OLAP OLAP + 10.16.1.11/24 10.16.1.12/24 10.16.1.13/24 + + +## 多区域互联 + + 192.168.1.20/24 192.168.1.21/24 + | | + OLAP -- 酒店 Wifi --> OLSW(南京) <--- 其他 Wifi --- OLAP + | + | + 互联网 + | + | + OLSW(上海) - 192.168.1.10/24 + | + | + ------------------------------------------------------ + ^ ^ ^ + | | | + 办公 Wifi 家庭 Wifi 酒店 Wifi + | | | + OLAP OLAP OLAP + 192.168.1.11/24 192.168.1.12/24 192.168.1.13/24 + +## 数据中心全互联网络 + +* Underlay for VxLAN over Internet by IPSec. + + 47.example.com + | + | + | + +-------+ + | vps-47| -- 100.65.0.117 + +-------+ + / \ + / \ + SPI-117118 / \ SPI-117119 + / \ + / \ + +-------+ +-------+ + | vps-92| -------------- | vps-12| + +-------+ +-------+ + / | | \ + / | SPI-118119 | \ + 100.65.0.118 | | 100.65.0.119 + | | + 92.example.com 12.example.com + + + +* DCI Subnet: 192.168.x.x over IPSec Network: 100.65.0.x. + + 100.65.0.117 + | + eth1.200 --- | --- eth1.100 + \ | / + +--------+ + | vps-47 | + +--------+ + / \ + / \ + / \ + / \ + enp2s4.100 --- / \ --- eth4.30 + \ / \ / + +--------+ +--------+ + | vps-92 | --------------- | vps-12 | + +--------+ +--------+ + / | | \ + enp2s4.101 --- | | --- eth4.200 + | | + 100.65.0.118 100.65.0.119 + + + VNI-1023 192.168.30.0/24 [vps-47_eth1.100, vps-92_enp2s4.100, vps-12_eth4.30] + VNI-1024 192.168.40.0/24 [vps-47_eth1.200, vps-92_enp2s4.101, vps-12_eth4.200] + + +## 文档 +- [软件安装](docs/install.md) +- [分支接入](docs/central.md) +- [多区域互联](docs/multiarea.md) +- [全互连网络](docs/fabric.md) +- [IPSec网络](docs/ipsec.md) diff --git a/VERSION b/VERSION new file mode 100755 index 0000000..403c1cf --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +5.10.4 diff --git a/cmd/api/app.go b/cmd/api/app.go new file mode 100755 index 0000000..fd9f9b7 --- /dev/null +++ b/cmd/api/app.go @@ -0,0 +1,123 @@ +package api + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/urfave/cli/v2" +) + +const ( + ConfSockFile = "unix:/var/openlan/confd.sock" + ConfDatabase = "OpenLAN_Switch" + AdminTokenFile = "/etc/openlan/switch/token" +) + +var ( + Version = "v5" + Url = "https://localhost:10000" + Token = "" + Server = ConfSockFile + Database = ConfDatabase + Verbose = false +) + +type App struct { + cli *cli.App + Before func(c *cli.Context) error + After func(c *cli.Context) error +} + +func (a *App) Flags() []cli.Flag { + var flags []cli.Flag + + switch Version { + case "v6": + flags = append(flags, + &cli.StringFlag{ + Name: "format", + Aliases: []string{"f"}, + Usage: "output format: json|yaml", + Value: "yaml", + }) + flags = append(flags, + &cli.StringFlag{ + Name: "conf", + Aliases: []string{"c"}, + Usage: "confd server connection", + Value: Server, + }) + flags = append(flags, + &cli.StringFlag{ + Name: "database", + Aliases: []string{"d"}, + Usage: "confd database", + Value: Database, + }) + default: + flags = append(flags, + &cli.StringFlag{ + Name: "format", + Aliases: []string{"f"}, + Usage: "output format: json|yaml", + Value: "table", + }) + flags = append(flags, + &cli.StringFlag{ + Name: "token", + Aliases: []string{"t"}, + Usage: "admin token", + Value: Token, + }) + flags = append(flags, + &cli.StringFlag{ + Name: "url", + Aliases: []string{"l"}, + Usage: "server url", + Value: Url, + }) + } + flags = append(flags, + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + Usage: "enable verbose", + Value: false, + }) + return flags +} + +func (a *App) New() *cli.App { + app := &cli.App{ + Usage: "OpenLAN switch utility", + Flags: a.Flags(), + Commands: []*cli.Command{}, + Before: func(c *cli.Context) error { + if c.Bool("verbose") { + Verbose = true + libol.SetLogger("", libol.DEBUG) + } else { + Verbose = false + libol.SetLogger("", libol.INFO) + } + if a.Before == nil { + return nil + } + return a.Before(c) + }, + After: func(c *cli.Context) error { + if a.After == nil { + return nil + } + return a.After(c) + }, + } + a.cli = app + return a.cli +} + +func (a *App) Command(cmd *cli.Command) { + a.cli.Commands = append(a.cli.Commands, cmd) +} + +func (a *App) Run(args []string) error { + return a.cli.Run(args) +} diff --git a/cmd/api/out.go b/cmd/api/out.go new file mode 100755 index 0000000..a25783b --- /dev/null +++ b/cmd/api/out.go @@ -0,0 +1,84 @@ +package api + +import ( + "fmt" + "github.com/luscis/openlan/pkg/libol" + "gopkg.in/yaml.v2" + "os" + "strconv" + "text/template" +) + +func OutJson(data interface{}) error { + if out, err := libol.Marshal(data, true); err == nil { + fmt.Println(string(out)) + } else { + return err + } + return nil +} + +func OutYaml(data interface{}) error { + if out, err := yaml.Marshal(data); err == nil { + fmt.Println(string(out)) + } else { + return err + } + return nil +} + +func OutTable(data interface{}, tmpl string) error { + funcMap := template.FuncMap{ + "ps": func(space int, args ...interface{}) string { + format := "%" + strconv.Itoa(space) + "s" + if space < 0 { + format = "%-" + strconv.Itoa(space) + "s" + } + return fmt.Sprintf(format, args...) + }, + "pi": func(space int, args ...interface{}) string { + format := "%" + strconv.Itoa(space) + "d" + if space < 0 { + format = "%-" + strconv.Itoa(space) + "d" + } + return fmt.Sprintf(format, args...) + }, + "pu": func(space int, args ...interface{}) string { + format := "%" + strconv.Itoa(space) + "u" + if space < 0 { + format = "%-" + strconv.Itoa(space) + "u" + } + return fmt.Sprintf(format, args...) + }, + "pt": func(value int64) string { + return libol.PrettyTime(value) + }, + "p2": func(space int, format, key1, key2 string) string { + value := fmt.Sprintf(format, key1, key2) + format = "%" + strconv.Itoa(space) + "s" + if space < 0 { + format = "%-" + strconv.Itoa(space) + "s" + } + return fmt.Sprintf(format, value) + }, + } + if tmpl, err := template.New("main").Funcs(funcMap).Parse(tmpl); err != nil { + return err + } else { + if err := tmpl.Execute(os.Stdout, data); err != nil { + return err + } + } + return nil +} + +func Out(data interface{}, format string, tmpl string) error { + switch format { + case "json": + return OutJson(data) + case "yaml": + return OutYaml(data) + default: + return OutTable(data, tmpl) + } +} diff --git a/cmd/api/utils.go b/cmd/api/utils.go new file mode 100755 index 0000000..e7debce --- /dev/null +++ b/cmd/api/utils.go @@ -0,0 +1,11 @@ +package api + +import "os" + +func GetEnv(key, value string) string { + val := os.Getenv(key) + if val == "" { + return value + } + return val +} diff --git a/cmd/api/v5/acl.go b/cmd/api/v5/acl.go new file mode 100755 index 0000000..09b65f3 --- /dev/null +++ b/cmd/api/v5/acl.go @@ -0,0 +1,137 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/urfave/cli/v2" +) + +type ACL struct { + Cmd +} + +func (u ACL) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/acl" + } else { + return prefix + "/api/acl/" + name + } +} + +func (u ACL) Add(c *cli.Context) error { + return nil +} + +func (u ACL) Remove(c *cli.Context) error { + return nil +} + +func (u ACL) List(c *cli.Context) error { + return nil +} + +func (u ACL) Apply(c *cli.Context) error { + return nil +} + +func (u ACL) Commands(app *api.App) { + rule := ACLRule{} + app.Command(&cli.Command{ + Name: "acl", + Usage: "Access control list", + Flags: []cli.Flag{ + &cli.StringFlag{Name: "name", Aliases: []string{"n"}}, + }, + Subcommands: []*cli.Command{ + { + Name: "add", + Usage: "Add a new acl", + Action: u.Add, + }, + { + Name: "remove", + Usage: "Remove an existing acl", + Aliases: []string{"ls"}, + Action: u.Remove, + }, + { + Name: "list", + Usage: "Display all acl", + Aliases: []string{"ls"}, + Action: u.List, + }, + rule.Commands(), + { + Name: "apply", + Usage: "Apply a new acl", + Flags: []cli.Flag{ + &cli.StringFlag{Name: "network", Aliases: []string{"net"}}, + }, + Action: u.Apply, + }, + }, + }) +} + +type ACLRule struct { + Cmd +} + +func (u ACLRule) Url(prefix, acl, name string) string { + if name == "" { + return prefix + "/api/acl/" + acl + } else { + return prefix + "/api/acl/" + acl + "/" + name + } +} + +func (u ACLRule) Add(c *cli.Context) error { + return nil +} + +func (u ACLRule) Remove(c *cli.Context) error { + return nil +} + +func (u ACLRule) List(c *cli.Context) error { + return nil +} + +func (u ACLRule) Commands() *cli.Command { + return &cli.Command{ + Name: "rule", + Usage: "Access control list rule", + Subcommands: []*cli.Command{ + { + Name: "add", + Usage: "Add a new acl rule", + Flags: []cli.Flag{ + &cli.StringFlag{Name: "src", Aliases: []string{"s"}}, + &cli.StringFlag{Name: "dst", Aliases: []string{"d"}}, + &cli.StringFlag{Name: "proto", Aliases: []string{"p"}}, + &cli.StringFlag{Name: "sport", Aliases: []string{"dp"}}, + &cli.StringFlag{Name: "dport", Aliases: []string{"sp"}}, + }, + Action: u.Add, + }, + { + Name: "remove", + Usage: "remove a new acl rule", + Aliases: []string{"rm"}, + Flags: []cli.Flag{ + &cli.StringFlag{Name: "src", Aliases: []string{"s"}}, + &cli.StringFlag{Name: "dst", Aliases: []string{"d"}}, + &cli.StringFlag{Name: "proto", Aliases: []string{"p"}}, + &cli.StringFlag{Name: "sport", Aliases: []string{"dp"}}, + &cli.StringFlag{Name: "dport", Aliases: []string{"sp"}}, + }, + Action: u.Remove, + }, + { + Name: "list", + Usage: "Display all acl rules", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + } +} diff --git a/cmd/api/v5/client.go b/cmd/api/v5/client.go new file mode 100755 index 0000000..a8c1937 --- /dev/null +++ b/cmd/api/v5/client.go @@ -0,0 +1,129 @@ +package v5 + +import ( + "bytes" + "encoding/json" + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/libol" + "io/ioutil" + "net/http" +) + +type Client struct { + Auth libol.Auth + Host string +} + +func (cl Client) NewRequest(url string) *libol.HttpClient { + client := &libol.HttpClient{ + Auth: libol.Auth{ + Type: "basic", + Username: cl.Auth.Username, + Password: cl.Auth.Password, + }, + Url: url, + } + return client +} + +func (cl Client) GetBody(url string) ([]byte, error) { + client := cl.NewRequest(url) + r, err := client.Do() + if err != nil { + return nil, err + } + if r.StatusCode != http.StatusOK { + return nil, libol.NewErr(r.Status) + } + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + return body, nil +} + +func (cl Client) JSON(client *libol.HttpClient, i, o interface{}) error { + out := cl.Log() + data, err := json.Marshal(i) + if err != nil { + return err + } + out.Debug("Client.JSON -> %s %s", client.Method, client.Url) + out.Debug("Client.JSON -> %s", string(data)) + client.Payload = bytes.NewReader(data) + if r, err := client.Do(); err != nil { + return err + } else { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + out.Debug("client.JSON <- %s", string(body)) + if r.StatusCode != http.StatusOK { + return libol.NewErr("%s %s", r.Status, body) + } else if o != nil { + if err := json.Unmarshal(body, o); err != nil { + return err + } + } + } + return nil +} + +func (cl Client) GetJSON(url string, v interface{}) error { + client := cl.NewRequest(url) + client.Method = "GET" + return cl.JSON(client, nil, v) +} + +func (cl Client) PostJSON(url string, i, o interface{}) error { + client := cl.NewRequest(url) + client.Method = "POST" + return cl.JSON(client, i, o) +} + +func (cl Client) PutJSON(url string, i, o interface{}) error { + client := cl.NewRequest(url) + client.Method = "PUT" + return cl.JSON(client, i, o) +} + +func (cl Client) DeleteJSON(url string, i, o interface{}) error { + client := cl.NewRequest(url) + client.Method = "DELETE" + return cl.JSON(client, i, o) +} + +func (cl Client) Log() *libol.SubLogger { + return libol.NewSubLogger("cli") +} + +type Cmd struct { +} + +func (c Cmd) NewHttp(token string) Client { + client := Client{ + Auth: libol.Auth{ + Username: token, + }, + } + return client +} + +func (c Cmd) Url(prefix, name string) string { + return "" +} + +func (c Cmd) Tmpl() string { + return "" +} + +func (c Cmd) Out(data interface{}, format string, tmpl string) error { + return api.Out(data, format, tmpl) +} + +func (c Cmd) Log() *libol.SubLogger { + return libol.NewSubLogger("cli") +} diff --git a/cmd/api/v5/cmd.go b/cmd/api/v5/cmd.go new file mode 100755 index 0000000..9627912 --- /dev/null +++ b/cmd/api/v5/cmd.go @@ -0,0 +1,45 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/urfave/cli/v2" + "io/ioutil" + "strings" +) + +func Before(c *cli.Context) error { + token := c.String("token") + if token == "" { + tokenFile := api.AdminTokenFile + if data, err := ioutil.ReadFile(tokenFile); err == nil { + token = strings.TrimSpace(string(data)) + } + _ = c.Set("token", token) + } + return nil +} + +func After(c *cli.Context) error { + return nil +} + +func Commands(app *api.App) { + app.After = After + app.Before = Before + User{}.Commands(app) + ACL{}.Commands(app) + Device{}.Commands(app) + Lease{}.Commands(app) + Config{}.Commands(app) + Point{}.Commands(app) + VPNClient{}.Commands(app) + Link{}.Commands(app) + Server{}.Commands(app) + Network{}.Commands(app) + PProf{}.Commands(app) + Esp{}.Commands(app) + VxLAN{}.Commands(app) + State{}.Commands(app) + Policy{}.Commands(app) + Version{}.Commands(app) +} diff --git a/cmd/api/v5/config.go b/cmd/api/v5/config.go new file mode 100755 index 0000000..01385c1 --- /dev/null +++ b/cmd/api/v5/config.go @@ -0,0 +1,202 @@ +package v5 + +import ( + "fmt" + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" + "gopkg.in/yaml.v2" + "path/filepath" +) + +type Config struct { + Cmd +} + +func (u Config) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/config" + } + return prefix + "/api/config/" + name +} + +func (u Config) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + cfg := &config.Switch{} + if err := clt.GetJSON(url, cfg); err == nil { + var data []byte + if c.String("format") == "yaml" { + data, _ = yaml.Marshal(cfg) + } else { + data, _ = libol.Marshal(cfg, true) + } + fmt.Println(string(data)) + return nil + } else { + return err + } +} + +func (u Config) Check(c *cli.Context) error { + out := u.Log() + dir := c.String("dir") + // Check proxy configurations. + out.Info("%15s: %s", "check", "proxy") + file := filepath.Join(dir, "proxy.json") + if err := libol.FileExist(file); err == nil { + obj := &config.Proxy{} + if err := libol.UnmarshalLoad(obj, file); err != nil { + out.Warn("%15s: %s", filepath.Base(file), err) + } else { + out.Info("%15s: %s", filepath.Base(file), "success") + } + } + // Check OLAP configurations. + out.Info("%15s: %s", "check", "point") + file = filepath.Join(dir, "point.json") + if err := libol.FileExist(file); err == nil { + obj := &config.Point{} + if err := libol.UnmarshalLoad(obj, file); err != nil { + out.Warn("%15s: %s", filepath.Base(file), err) + } else { + out.Info("%15s: %s", filepath.Base(file), "success") + } + } + // Check OLSW configurations. + out.Info("%15s: %s", "check", "switch") + file = filepath.Join(dir, "switch", "switch.json") + if err := libol.FileExist(file); err == nil { + obj := &config.Switch{} + if err := libol.UnmarshalLoad(obj, file); err != nil { + out.Warn("%15s: %s", filepath.Base(file), err) + } else { + out.Info("%15s: %s", filepath.Base(file), "success") + } + } + // Check network configurations. + out.Info("%15s: %s", "check", "network") + pattern := filepath.Join(dir, "switch", "network", "*.json") + if files, err := filepath.Glob(pattern); err == nil { + for _, file := range files { + obj := &config.Network{} + if err := libol.UnmarshalLoad(obj, file); err != nil { + out.Warn("%15s: %s", filepath.Base(file), err) + } else { + out.Info("%15s: %s", filepath.Base(file), "success") + } + } + } + // Check ACL configurations. + out.Info("%15s: %s", "check", "acl") + pattern = filepath.Join(dir, "switch", "acl", "*.json") + if files, err := filepath.Glob(pattern); err == nil { + for _, file := range files { + obj := &config.ACL{} + if err := libol.UnmarshalLoad(obj, file); err != nil { + out.Warn("%15s: %s", filepath.Base(file), err) + } else { + out.Info("%15s: %s", filepath.Base(file), "success") + } + } + } + // Check links configurations. + out.Info("%15s: %s", "check", "link") + pattern = filepath.Join(dir, "switch", "link", "*.json") + if files, err := filepath.Glob(pattern); err == nil { + for _, file := range files { + var obj []config.Point + if err := libol.UnmarshalLoad(&obj, file); err != nil { + out.Warn("%15s: %s", filepath.Base(file), err) + } else { + out.Info("%15s: %s", filepath.Base(file), "success") + } + } + } + // Check routes configurations. + out.Info("%15s: %s", "check", "route") + pattern = filepath.Join(dir, "switch", "route", "*.json") + if files, err := filepath.Glob(pattern); err == nil { + for _, file := range files { + var obj []config.PrefixRoute + if err := libol.UnmarshalLoad(&obj, file); err != nil { + out.Warn("%15s: %s", filepath.Base(file), err) + } else { + out.Info("%15s: %s", filepath.Base(file), "success") + } + } + } + return nil +} + +func (u Config) Reload(c *cli.Context) error { + url := u.Url(c.String("url"), "reload") + clt := u.NewHttp(c.String("token")) + data := &schema.Message{} + if err := clt.PutJSON(url, nil, data); err == nil { + fmt.Println(data.Message) + return nil + } else { + return err + } +} + +func (u Config) Save(c *cli.Context) error { + url := u.Url(c.String("url"), "save") + clt := u.NewHttp(c.String("token")) + data := &schema.Message{} + if err := clt.PutJSON(url, nil, data); err == nil { + fmt.Println(data.Message) + return nil + } else { + return err + } +} + +func (u Config) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "config", + Aliases: []string{"cfg"}, + Usage: "Switch configuration", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display all configuration", + Aliases: []string{"ls"}, + Flags: []cli.Flag{ + &cli.StringFlag{Name: "format", Value: "json"}, + }, + Action: u.List, + }, + { + Name: "check", + Usage: "Check all configuration", + Aliases: []string{"co"}, + Flags: []cli.Flag{ + &cli.StringFlag{Name: "dir", Value: "/etc/openlan"}, + }, + Action: u.Check, + }, + { + Name: "reload", + Usage: "Reload configuration", + Aliases: []string{"re"}, + Flags: []cli.Flag{ + &cli.StringFlag{Name: "dir", Value: "/etc/openlan"}, + }, + Action: u.Reload, + }, + { + Name: "save", + Usage: "Save configuration", + Aliases: []string{"sa"}, + Flags: []cli.Flag{ + &cli.StringFlag{Name: "dir", Value: "/etc/openlan"}, + }, + Action: u.Save, + }, + }, + }) +} diff --git a/cmd/api/v5/device.go b/cmd/api/v5/device.go new file mode 100755 index 0000000..c2d5296 --- /dev/null +++ b/cmd/api/v5/device.go @@ -0,0 +1,54 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" +) + +type Device struct { + Cmd +} + +func (u Device) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/device" + } else { + return prefix + "/api/device/" + name + } +} + +func (u Device) Tmpl() string { + return `# total {{ len . }} +{{ps -13 "name"}} {{ps -13 "mtu"}} {{ps -16 "mac"}} {{ps -6 "provider"}} +{{- range . }} +{{ps -13 .Name}} {{pi -13 .Mtu}} {{ps -16 .Mac}} {{ps -6 .Provider}} +{{- end }} +` +} + +func (u Device) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + var items []schema.Device + if err := clt.GetJSON(url, &items); err != nil { + return err + } + return u.Out(items, c.String("format"), u.Tmpl()) +} + +func (u Device) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "device", + Aliases: []string{"dev"}, + Usage: "linux network device", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display all devices", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + }) +} diff --git a/cmd/api/v5/esp.go b/cmd/api/v5/esp.go new file mode 100755 index 0000000..65bf27f --- /dev/null +++ b/cmd/api/v5/esp.go @@ -0,0 +1,54 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" +) + +type Esp struct { + Cmd +} + +func (u Esp) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/esp" + } else { + return prefix + "/api/esp/" + name + } +} + +func (u Esp) Tmpl() string { + return `# total {{ len . }} +{{ps -16 "name"}} {{ps -16 "address"}} +{{- range . }} +{{ps -16 .Name}} {{ps -16 .Address}} +{{- end }} +` +} + +func (u Esp) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + var items []schema.Esp + if err := clt.GetJSON(url, &items); err != nil { + return err + } + return u.Out(items, c.String("format"), u.Tmpl()) +} + +func (u Esp) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "esp", + Aliases: []string{"esp"}, + Usage: "IPSec ESP configuration", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display all esp", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + }) +} diff --git a/cmd/api/v5/lease.go b/cmd/api/v5/lease.go new file mode 100755 index 0000000..8decc7a --- /dev/null +++ b/cmd/api/v5/lease.go @@ -0,0 +1,54 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" +) + +type Lease struct { + Cmd +} + +func (u Lease) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/lease" + } else { + return prefix + "/api/lease/" + name + } +} + +func (u Lease) Tmpl() string { + return `# total {{ len . }} +{{ps -16 "uuid"}} {{ps -16 "alias"}} {{ ps -16 "address" }} {{ps -22 "client"}} {{ps -8 "network"}} {{ ps -6 "type"}} +{{- range . }} +{{ps -16 .UUID}} {{ps -16 .Alias}} {{ ps -16 .Address}} {{ps -22 .Client}} {{ps -8 .Network}} {{ ps -6 .Type}} +{{- end }} +` +} + +func (u Lease) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + var items []schema.Lease + if err := clt.GetJSON(url, &items); err != nil { + return err + } + return u.Out(items, c.String("format"), u.Tmpl()) +} + +func (u Lease) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "lease", + Aliases: []string{"le"}, + Usage: "DHCP address lease", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display all lease", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + }) +} diff --git a/cmd/api/v5/link.go b/cmd/api/v5/link.go new file mode 100755 index 0000000..d8cf92d --- /dev/null +++ b/cmd/api/v5/link.go @@ -0,0 +1,54 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" +) + +type Link struct { + Cmd +} + +func (u Link) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/link" + } else { + return prefix + "/api/link/" + name + } +} + +func (u Link) Tmpl() string { + return `# total {{ len . }} +{{ps -16 "uuid"}} {{ps -8 "alive"}} {{ ps -8 "device" }} {{ps -8 "user"}} {{ps -22 "server"}} {{ps -8 "network"}} {{ ps -6 "state"}} +{{- range . }} +{{ps -16 .UUID}} {{pt .AliveTime | ps -8}} {{ ps -8 .Device}} {{ps -8 .User}} {{ps -22 .Server}} {{ps -8 .Network}} {{ ps -6 .State}} +{{- end }} +` +} + +func (u Link) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + var items []schema.Link + if err := clt.GetJSON(url, &items); err != nil { + return err + } + return u.Out(items, c.String("format"), u.Tmpl()) +} + +func (u Link) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "link", + Aliases: []string{"ln"}, + Usage: "Link connect to others", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display all links", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + }) +} diff --git a/cmd/api/v5/network.go b/cmd/api/v5/network.go new file mode 100755 index 0000000..0ba7039 --- /dev/null +++ b/cmd/api/v5/network.go @@ -0,0 +1,43 @@ +package v5 + +import ( + "fmt" + "github.com/luscis/openlan/cmd/api" + "github.com/urfave/cli/v2" +) + +type Network struct { + Cmd +} + +func (u Network) Url(prefix, name string) string { + return prefix + "/api/network" +} + +func (u Network) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + url += "?format=" + c.String("format") + clt := u.NewHttp(c.String("token")) + if data, err := clt.GetBody(url); err == nil { + fmt.Println(string(data)) + return nil + } else { + return err + } +} + +func (u Network) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "network", + Aliases: []string{"net"}, + Usage: "Logical network", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display all network", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + }) +} diff --git a/cmd/api/v5/openvpn.go b/cmd/api/v5/openvpn.go new file mode 100755 index 0000000..288b39f --- /dev/null +++ b/cmd/api/v5/openvpn.go @@ -0,0 +1,54 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" +) + +type VPNClient struct { + Cmd +} + +func (u VPNClient) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/vpn/client" + } else { + return prefix + "/api/vpn/client/" + name + } +} + +func (u VPNClient) Tmpl() string { + return `# total {{ len . }} +{{ps -8 "alive"}} {{ps -16 "address"}} {{ ps -13 "device" }} {{ps -15 "name"}} {{ps -22 "remote"}} {{ ps -6 "state"}} +{{- range . }} +{{pt .AliveTime | ps -8}} {{ps -16 .Address}} {{ ps -13 .Device }} {{ps -15 .Name}} {{ps -22 .Remote}} {{ ps -6 .State}} +{{- end }} +` +} + +func (u VPNClient) List(c *cli.Context) error { + url := u.Url(c.String("url"), c.String("network")) + clt := u.NewHttp(c.String("token")) + var items []schema.VPNClient + if err := clt.GetJSON(url, &items); err != nil { + return err + } + return u.Out(items, c.String("format"), u.Tmpl()) +} + +func (u VPNClient) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "client", + Aliases: []string{"cl"}, + Usage: "Connected client by OpenVPN", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display all clients", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + }) +} diff --git a/cmd/api/v5/point.go b/cmd/api/v5/point.go new file mode 100755 index 0000000..5268055 --- /dev/null +++ b/cmd/api/v5/point.go @@ -0,0 +1,54 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" +) + +type Point struct { + Cmd +} + +func (u Point) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/point" + } else { + return prefix + "/api/point/" + name + } +} + +func (u Point) Tmpl() string { + return `# total {{ len . }} +{{ps -16 "uuid"}} {{ps -8 "alive"}} {{ ps -8 "device" }} {{ps -16 "alias"}} {{ps -8 "user"}} {{ps -22 "remote"}} {{ps -8 "network"}} {{ ps -6 "state"}} +{{- range . }} +{{ps -16 .UUID}} {{pt .AliveTime | ps -8}} {{ ps -8 .Device}} {{ps -16 .Alias}} {{ps -8 .User}} {{ps -22 .Remote}} {{ps -8 .Network}} {{ ps -6 .State}} +{{- end }} +` +} + +func (u Point) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + var items []schema.Point + if err := clt.GetJSON(url, &items); err != nil { + return err + } + return u.Out(items, c.String("format"), u.Tmpl()) +} + +func (u Point) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "point", + Aliases: []string{"ap"}, + Usage: "Point connected to this", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display all points", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + }) +} diff --git a/cmd/api/v5/policy.go b/cmd/api/v5/policy.go new file mode 100755 index 0000000..7e3c11d --- /dev/null +++ b/cmd/api/v5/policy.go @@ -0,0 +1,60 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" + "sort" +) + +type Policy struct { + Cmd +} + +func (u Policy) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/policy" + } else { + return prefix + "/api/policy/" + name + } +} + +func (u Policy) Tmpl() string { + return `# total {{ len . }} +{{ps -16 "name"}} {{ ps -20 "source" }} {{ ps -20 "destination" }} +{{- range . }} +{{ps -16 .Name}} {{ ps -20 .Source }} {{ ps -20 .Dest }} +{{- end }} +` +} + +func (u Policy) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + var items []schema.EspPolicy + if err := clt.GetJSON(url, &items); err != nil { + return err + } + sort.SliceStable(items, func(i, j int) bool { + ii := items[i] + jj := items[j] + return ii.Name+ii.Source > jj.Name+jj.Source + }) + return u.Out(items, c.String("format"), u.Tmpl()) +} + +func (u Policy) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "policy", + Aliases: []string{"po"}, + Usage: "IPSec policy configuration", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display all xfrm policy", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + }) +} diff --git a/cmd/api/v5/pprof.go b/cmd/api/v5/pprof.go new file mode 100755 index 0000000..cd65c7b --- /dev/null +++ b/cmd/api/v5/pprof.go @@ -0,0 +1,76 @@ +package v5 + +import ( + "fmt" + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" +) + +type PProf struct { + Cmd +} + +func (u PProf) Url(prefix, name string) string { + return prefix + "/api/pprof" +} + +func (u PProf) Add(c *cli.Context) error { + pp := schema.PProf{ + Listen: c.String("listen"), + } + if pp.Listen == "" { + return libol.NewErr("listen value is empty") + } + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + if err := clt.PostJSON(url, pp, nil); err != nil { + return err + } + return nil +} + +func (u PProf) Del(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + if err := clt.DeleteJSON(url, nil, nil); err != nil { + return err + } + return nil +} + +func (u PProf) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + var pp schema.PProf + if err := clt.GetJSON(url, &pp); err != nil { + return err + } + fmt.Println(pp.Listen) + return nil +} + +func (u PProf) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "pprof", + Aliases: []string{"pp"}, + Usage: "Configure pprof tool", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Show configuration", + Aliases: []string{"ls"}, + Action: u.List, + }, + { + Name: "enable", + Usage: "Enable pprof tool", + Flags: []cli.Flag{ + &cli.StringFlag{Name: "listen", Value: "127.0.0.1:6060"}, + }, + Action: u.Add, + }, + }, + }) +} diff --git a/cmd/api/v5/server.go b/cmd/api/v5/server.go new file mode 100755 index 0000000..c62bcf0 --- /dev/null +++ b/cmd/api/v5/server.go @@ -0,0 +1,43 @@ +package v5 + +import ( + "fmt" + "github.com/luscis/openlan/cmd/api" + "github.com/urfave/cli/v2" +) + +type Server struct { + Cmd +} + +func (u Server) Url(prefix, name string) string { + return prefix + "/api/server" +} + +func (u Server) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + url += "?format=" + c.String("format") + clt := u.NewHttp(c.String("token")) + if data, err := clt.GetBody(url); err == nil { + fmt.Println(string(data)) + return nil + } else { + return err + } +} + +func (u Server) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "server", + Aliases: []string{"sr"}, + Usage: "Socket server status", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display server status", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + }) +} diff --git a/cmd/api/v5/state.go b/cmd/api/v5/state.go new file mode 100644 index 0000000..6059429 --- /dev/null +++ b/cmd/api/v5/state.go @@ -0,0 +1,60 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" + "sort" +) + +type State struct { + Cmd +} + +func (u State) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/state" + } else { + return prefix + "/api/state/" + name + } +} + +func (u State) Tmpl() string { + return `# total {{ len . }} +{{ps -16 "name"}} {{ps -8 "spi"}} {{ ps -16 "local" }} {{ ps -16 "remote" }} {{ ps -12 "rx bytes" }} {{ ps -12 "tx bytes" }} {{ ps -12 "rx packages" }} {{ ps -12 "tx packages" }} +{{- range . }} +{{ps -16 .Name}} {{pi -8 .Spi }} {{ ps -16 .Local }} {{ ps -16 .Remote }} {{ pi -12 .RxBytes }} {{ pi -12 .TxBytes }} {{ pi -12 .RxPackages }} {{ pi -12 .TxPackages }} +{{- end }} +` +} + +func (u State) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + var items []schema.EspState + if err := clt.GetJSON(url, &items); err != nil { + return err + } + sort.SliceStable(items, func(i, j int) bool { + ii := items[i] + jj := items[j] + return ii.Spi > jj.Spi + }) + return u.Out(items, c.String("format"), u.Tmpl()) +} + +func (u State) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "state", + Aliases: []string{"se"}, + Usage: "IPSec state configuration", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display all xfrm state", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + }) +} diff --git a/cmd/api/v5/user.go b/cmd/api/v5/user.go new file mode 100755 index 0000000..a1c59b7 --- /dev/null +++ b/cmd/api/v5/user.go @@ -0,0 +1,198 @@ +package v5 + +import ( + "fmt" + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" + "os" + "strings" + "time" +) + +type User struct { + Cmd +} + +func (u User) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/user" + } else { + return prefix + "/api/user/" + name + } +} + +func (u User) Add(c *cli.Context) error { + username := c.String("name") + user := &schema.User{ + Name: username, + Password: c.String("password"), + Role: c.String("role"), + Lease: c.String("lease"), + } + if user.Name == "" { + return libol.NewErr("name is empty") + } + if !strings.Contains(username, "@") { + return libol.NewErr("name not contains network") + } + values := strings.SplitN(username, "@", 2) + user.Name = values[0] + user.Network = values[1] + url := u.Url(c.String("url"), user.Name) + clt := u.NewHttp(c.String("token")) + if err := clt.PostJSON(url, user, nil); err != nil { + return err + } + return nil +} + +func (u User) Remove(c *cli.Context) error { + username := c.String("name") + url := u.Url(c.String("url"), username) + clt := u.NewHttp(c.String("token")) + if err := clt.DeleteJSON(url, nil, nil); err != nil { + return err + } + return nil +} + +func (u User) Tmpl() string { + return `# total {{ len . }} +{{ps -24 "username"}} {{ps -24 "password"}} {{ps -6 "role"}} {{ps -15 "lease"}} +{{- range . }} +{{p2 -24 "%s@%s" .Name .Network}} {{ps -24 .Password}} {{ps -6 .Role}} {{ps -15 .Lease }} +{{- end }} +` +} + +func (u User) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + var items []schema.User + if err := clt.GetJSON(url, &items); err != nil { + return err + } + return u.Out(items, c.String("format"), u.Tmpl()) +} + +func (u User) Get(c *cli.Context) error { + username := c.String("name") + url := u.Url(c.String("url"), username) + client := u.NewHttp(c.String("token")) + items := []schema.User{{}} + if err := client.GetJSON(url, &items[0]); err != nil { + return err + } + return u.Out(items, c.String("format"), u.Tmpl()) +} + +func (u User) Check(c *cli.Context) error { + netFromO := c.String("network") + nameFromE := c.String("name") + passFromE := c.String("password") + if nameFromE == "" { + nameFromE = os.Getenv("username") + passFromE = os.Getenv("password") + } + netFromE := "default" + if strings.Contains(nameFromE, "@") { + netFromE = strings.Split(nameFromE, "@")[1] + } + fullName := nameFromE + if !strings.Contains(nameFromE, "@") { + fullName = nameFromE + "@" + netFromE + } + if netFromO != "" && netFromE != netFromO { + return libol.NewErr("wrong: zo=%s, us=%s", netFromO, nameFromE) + } + alias := "" + if ip, ok := os.LookupEnv("untrusted_ip"); ok { + alias = ip + ":" + os.Getenv("untrusted_port") + } + url := u.Url(c.String("url"), fullName) + url += "/check" + client := u.NewHttp(c.String("token")) + data := &schema.User{ + Name: fullName, + Password: passFromE, + Alias: alias, + } + if err := client.PostJSON(url, data, nil); err == nil { + fmt.Printf("success: us=%s\n", nameFromE) + return nil + } else { + return err + } +} + +func (u User) Commands(app *api.App) { + lease := time.Now().AddDate(99, 0, 0) + app.Command(&cli.Command{ + Name: "user", + Aliases: []string{"us"}, + Usage: "User authentication", + Subcommands: []*cli.Command{ + { + Name: "add", + Usage: "Add a new user", + Flags: []cli.Flag{ + &cli.StringFlag{Name: "name"}, + &cli.StringFlag{Name: "password", Value: libol.GenRandom(24)}, + &cli.StringFlag{Name: "role", Value: "guest"}, + &cli.StringFlag{Name: "lease", Value: lease.Format(libol.LeaseTime)}, + }, + Action: u.Add, + }, + { + Name: "set", + Usage: "Update a user", + Flags: []cli.Flag{ + &cli.StringFlag{Name: "name"}, + &cli.StringFlag{Name: "password"}, + &cli.StringFlag{Name: "role"}, + &cli.StringFlag{Name: "lease"}, + }, + Action: u.Add, + }, + { + Name: "remove", + Usage: "Remove an existing user", + Aliases: []string{"rm"}, + Flags: []cli.Flag{ + &cli.StringFlag{Name: "name"}, + }, + Action: u.Remove, + }, + { + Name: "list", + Usage: "Display all users", + Aliases: []string{"ls"}, + Flags: []cli.Flag{ + &cli.StringFlag{Name: "network"}, + }, + Action: u.List, + }, + { + Name: "get", + Usage: "Get an user", + Flags: []cli.Flag{ + &cli.StringFlag{Name: "name"}, + }, + Action: u.Get, + }, + { + Name: "check", + Usage: "Check an user", + Aliases: []string{"co"}, + Flags: []cli.Flag{ + &cli.StringFlag{Name: "name"}, + &cli.StringFlag{Name: "password"}, + &cli.StringFlag{Name: "network"}, + }, + Action: u.Check, + }, + }, + }) +} diff --git a/cmd/api/v5/version.go b/cmd/api/v5/version.go new file mode 100755 index 0000000..73e9f06 --- /dev/null +++ b/cmd/api/v5/version.go @@ -0,0 +1,40 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" +) + +type Version struct { + Cmd +} + +func (v Version) Url(prefix, name string) string { + return prefix + "/api/version" +} + +func (v Version) Tmpl() string { + return `Version : {{ .Version }} +Build at: {{ .Date}} +` +} + +func (v Version) List(c *cli.Context) error { + url := v.Url(c.String("url"), "") + clt := v.NewHttp(c.String("token")) + var item schema.Version + if err := clt.GetJSON(url, &item); err != nil { + return err + } + return v.Out(item, c.String("format"), v.Tmpl()) +} + +func (v Version) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "version", + Aliases: []string{"v"}, + Usage: "show version information", + Action: v.List, + }) +} diff --git a/cmd/api/v5/vxlan.go b/cmd/api/v5/vxlan.go new file mode 100755 index 0000000..10a42f3 --- /dev/null +++ b/cmd/api/v5/vxlan.go @@ -0,0 +1,54 @@ +package v5 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/schema" + "github.com/urfave/cli/v2" +) + +type VxLAN struct { + Cmd +} + +func (u VxLAN) Url(prefix, name string) string { + if name == "" { + return prefix + "/api/vxlan" + } else { + return prefix + "/api/vxlan/" + name + } +} + +func (u VxLAN) Tmpl() string { + return `# total {{ len . }} +{{ps -16 "name"}} {{ps -15 "bridge"}} {{ ps -16 "address" }} {{ps -16 "vni"}} {{ps -16 "local"}} {{ps -22 "remote"}} +{{- range . }} +{{ps -16 .UUID}} {{pt .AliveTime | ps -8}} {{ ps -8 .Device}} {{ps -16 .Alias}} {{ps -8 .User}} {{ps -22 .Remote}} +{{- end }} +` +} + +func (u VxLAN) List(c *cli.Context) error { + url := u.Url(c.String("url"), "") + clt := u.NewHttp(c.String("token")) + var items []schema.VxLAN + if err := clt.GetJSON(url, &items); err != nil { + return err + } + return u.Out(items, c.String("format"), u.Tmpl()) +} + +func (u VxLAN) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "vxlan", + Aliases: []string{"vx"}, + Usage: "VxLAN configuration", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "Display all vxlan", + Aliases: []string{"ls"}, + Action: u.List, + }, + }, + }) +} diff --git a/cmd/api/v6/cmd.go b/cmd/api/v6/cmd.go new file mode 100755 index 0000000..d4435f2 --- /dev/null +++ b/cmd/api/v6/cmd.go @@ -0,0 +1,29 @@ +package v6 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/database" + "github.com/urfave/cli/v2" +) + +func Before(c *cli.Context) error { + if _, err := database.NewDBClient(nil); err == nil { + return nil + } else { + return err + } +} + +func After(c *cli.Context) error { + return nil +} + +func Commands(app *api.App) { + app.After = After + app.Before = Before + Switch{}.Commands(app) + Network{}.Commands(app) + Link{}.Commands(app) + Name{}.Commands(app) + Prefix{}.Commands(app) +} diff --git a/cmd/api/v6/link.go b/cmd/api/v6/link.go new file mode 100755 index 0000000..37b2793 --- /dev/null +++ b/cmd/api/v6/link.go @@ -0,0 +1,262 @@ +package v6 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/database" + "github.com/luscis/openlan/pkg/libol" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/urfave/cli/v2" + "sort" + "strings" +) + +type Link struct { +} + +func (l Link) List(c *cli.Context) error { + var lsLn []database.VirtualLink + network := c.String("network") + if err := database.Client.WhereList( + func(l *database.VirtualLink) bool { + return network == "" || l.Network == network + }, &lsLn); err != nil { + return err + } else { + sort.SliceStable(lsLn, func(i, j int) bool { + ii := lsLn[i] + jj := lsLn[j] + return ii.Network+ii.UUID > jj.Network+jj.UUID + }) + return api.Out(lsLn, c.String("format"), "") + } +} + +func GetUserPassword(auth string) (string, string) { + values := strings.SplitN(auth, ":", 2) + if len(values) == 2 { + return values[0], values[1] + } + return auth, auth +} + +func GetDeviceName(conn, device string) string { + if libol.GetPrefix(conn, 4) == "spi:" { + return conn + } else { + return device + } +} + +func (l Link) Add(c *cli.Context) error { + auth := c.String("authentication") + connection := c.String("connection") + device := c.String("device") + lsLn := database.VirtualLink{ + UUID: c.String("uuid"), + Network: c.String("network"), + Connection: connection, + Device: device, + } + remoteAddr := c.String("remote-address") + user, pass := GetUserPassword(auth) + if err := database.Client.Get(&lsLn); err == nil { + lsVn := database.VirtualNetwork{ + Name: lsLn.Network, + } + if lsVn.Name == "" { + return libol.NewErr("network is nil") + } + if err := database.Client.Get(&lsVn); err != nil { + return libol.NewErr("find network %s: %s", lsVn.Name, err) + } + newLn := lsLn + if connection != "" { + newLn.Connection = connection + } + if user != "" { + newLn.Authentication["username"] = user + } + if pass != "" { + newLn.Authentication["password"] = pass + } + if remoteAddr != "" { + newLn.OtherConfig["remote_address"] = remoteAddr + } + if device != "" { + newLn.Device = device + } + ops, err := database.Client.Where(&lsLn).Update(&newLn) + if err != nil { + return err + } + if ret, err := database.Client.Transact(ops...); err != nil { + return err + } else { + database.PrintError(ret) + } + } else { + lsVn := database.VirtualNetwork{ + Name: c.String("network"), + } + if lsVn.Name == "" { + return libol.NewErr("network is nil") + } + if err := database.Client.Get(&lsVn); err != nil { + return libol.NewErr("find network %s: %s", lsVn.Name, err) + } + uuid := c.String("uuid") + if uuid == "" { + uuid = database.GenUUID() + } + newLn := database.VirtualLink{ + Network: lsLn.Network, + Connection: lsLn.Connection, + UUID: uuid, + Device: GetDeviceName(connection, device), + Authentication: map[string]string{ + "username": user, + "password": pass, + }, + OtherConfig: map[string]string{ + "local_address": lsVn.Address, + "remote_address": remoteAddr, + }, + } + ops, err := database.Client.Create(&newLn) + if err != nil { + return err + } + libol.Debug("Link.Add %s %s", ops, lsVn) + database.Client.Execute(ops) + ops, err = database.Client.Where(&lsVn).Mutate(&lsVn, model.Mutation{ + Field: &lsVn.LocalLinks, + Mutator: ovsdb.MutateOperationInsert, + Value: []string{newLn.UUID}, + }) + if err != nil { + return err + } + libol.Debug("Link.Add %s", ops) + database.Client.Execute(ops) + if ret, err := database.Client.Commit(); err != nil { + return err + } else { + database.PrintError(ret) + } + } + return nil +} + +func (l Link) Remove(c *cli.Context) error { + lsLn := database.VirtualLink{ + Network: c.String("network"), + Connection: c.String("connection"), + UUID: c.String("uuid"), + } + if err := database.Client.Get(&lsLn); err != nil { + return err + } + lsVn := database.VirtualNetwork{ + Name: lsLn.Network, + } + if err := database.Client.Get(&lsVn); err != nil { + return libol.NewErr("find network %s: %s", lsVn.Name, err) + } + if err := database.Client.Get(&lsLn); err != nil { + return err + } + ops, err := database.Client.Where(&lsLn).Delete() + if err != nil { + return err + } + libol.Debug("Link.Remove %s", ops) + database.Client.Execute(ops) + ops, err = database.Client.Where(&lsVn).Mutate(&lsVn, model.Mutation{ + Field: &lsVn.LocalLinks, + Mutator: ovsdb.MutateOperationDelete, + Value: []string{lsLn.UUID}, + }) + if err != nil { + return err + } + libol.Debug("Link.Remove %s", ops) + database.Client.Execute(ops) + if ret, err := database.Client.Commit(); err != nil { + return err + } else { + database.PrintError(ret) + } + return nil +} + +func (l Link) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "link", + Aliases: []string{"li"}, + Usage: "Virtual Link", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "List virtual links", + Aliases: []string{"ls"}, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "network", + Usage: "the network name", + }, + }, + Action: l.List, + }, + { + Name: "add", + Usage: "Add a virtual link", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "uuid", + }, + &cli.StringFlag{ + Name: "network", + Usage: "the network name", + }, + &cli.StringFlag{ + Name: "connection", + Value: "any", + Usage: "connection for remote server", + }, + &cli.StringFlag{ + Name: "device", + Usage: "the device name, like spi:10", + }, + &cli.StringFlag{ + Name: "authentication", + Usage: "user and password for authentication", + }, + &cli.StringFlag{ + Name: "remote-address", + Usage: "remote address in this link", + }, + }, + Action: l.Add, + }, + { + Name: "del", + Usage: "Del a virtual link", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "uuid", + }, + &cli.StringFlag{ + Name: "network", + Usage: "the network name", + }, + &cli.StringFlag{ + Name: "connection", + Usage: "connection for remote server", + }, + }, + Action: l.Remove, + }, + }, + }) +} diff --git a/cmd/api/v6/name.go b/cmd/api/v6/name.go new file mode 100755 index 0000000..a42d45a --- /dev/null +++ b/cmd/api/v6/name.go @@ -0,0 +1,146 @@ +package v6 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/database" + "github.com/luscis/openlan/pkg/libol" + "github.com/urfave/cli/v2" + "net" + "sort" + "time" +) + +type Name struct { +} + +func (u Name) List(c *cli.Context) error { + var listNa []database.NameCache + if err := database.Client.List(&listNa); err != nil { + return err + } else { + sort.SliceStable(listNa, func(i, j int) bool { + ii := listNa[i] + jj := listNa[j] + return ii.UUID > jj.UUID + }) + return api.Out(listNa, c.String("format"), "") + } +} + +func (u Name) Add(c *cli.Context) error { + name := c.String("name") + lsNa := database.NameCache{ + Name: name, + UUID: c.String("uuid"), + } + if lsNa.Name == "" && lsNa.UUID == "" { + return libol.NewErr("Name is nil") + } + address := c.String("address") + if address == "" { + addrIps, _ := net.LookupIP(lsNa.Name) + if len(addrIps) > 0 { + address = addrIps[0].String() + } + } + newNa := lsNa + if name != "" { + newNa.Name = name + } + if address != "" { + newNa.Address = address + } + newNa.UpdateAt = time.Now().Format("2006-01-02T15:04") + if err := database.Client.Get(&lsNa); err == nil { + if lsNa.Address != address { + ops, err := database.Client.Where(&lsNa).Update(&newNa) + if err != nil { + return err + } + if ret, err := database.Client.Transact(ops...); err != nil { + return err + } else { + database.PrintError(ret) + } + } + } else { + ops, err := database.Client.Create(&newNa) + if err != nil { + return err + } + libol.Debug("Name.Add %s", ops) + if ret, err := database.Client.Transact(ops...); err != nil { + return err + } else { + database.PrintError(ret) + } + } + return nil +} + +func (u Name) Remove(c *cli.Context) error { + lsNa := database.NameCache{ + Name: c.String("name"), + UUID: c.String("uuid"), + } + if err := database.Client.Get(&lsNa); err != nil { + return nil + } + ops, err := database.Client.Where(&lsNa).Delete() + if err != nil { + return err + } + libol.Debug("Name.Remove %s", ops) + database.Client.Execute(ops) + if ret, err := database.Client.Commit(); err != nil { + return err + } else { + database.PrintError(ret) + } + return nil +} + +func (u Name) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "name", + Aliases: []string{"na"}, + Usage: "Name cache", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "List name cache", + Aliases: []string{"ls"}, + Action: u.List, + }, + { + Name: "add", + Usage: "Add or update name cache", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "uuid", + }, + &cli.StringFlag{ + Name: "name", + }, + &cli.StringFlag{ + Name: "address", + }, + }, + Action: u.Add, + }, + { + Name: "del", + Usage: "Delete a name cache", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "uuid", + }, + &cli.StringFlag{ + Name: "name", + }, + }, + Action: u.Remove, + }, + }, + }) +} diff --git a/cmd/api/v6/network.go b/cmd/api/v6/network.go new file mode 100755 index 0000000..6d5db48 --- /dev/null +++ b/cmd/api/v6/network.go @@ -0,0 +1,154 @@ +package v6 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/database" + "github.com/luscis/openlan/pkg/libol" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/urfave/cli/v2" + "sort" +) + +type Network struct { +} + +func (u Network) List(c *cli.Context) error { + var listVn []database.VirtualNetwork + err := database.Client.List(&listVn) + if err != nil { + return err + } + sort.SliceStable(listVn, func(i, j int) bool { + ii := listVn[i] + jj := listVn[j] + return ii.UUID > jj.UUID + }) + return api.Out(listVn, c.String("format"), "") +} + +func (u Network) Add(c *cli.Context) error { + name := c.String("name") + if name == "" { + return libol.NewErr("name is nil") + } + oldVn := database.VirtualNetwork{Name: name} + if err := database.Client.Get(&oldVn); err == nil { + return libol.NewErr("network %s already existed.", oldVn.Name) + } + address := c.String("address") + provider := c.String("provider") + newVn := database.VirtualNetwork{ + Name: name, + Address: address, + Bridge: "br-" + name, + UUID: database.GenUUID(), + Provider: provider, + } + ops, err := database.Client.Create(&newVn) + if err != nil { + return err + } + libol.Debug("Network.Add %s", ops) + database.Client.Execute(ops) + sw, err := database.Client.Switch() + if err != nil { + return err + } + ops, err = database.Client.Where(sw).Mutate(sw, model.Mutation{ + Field: &sw.VirtualNetworks, + Mutator: ovsdb.MutateOperationInsert, + Value: []string{newVn.UUID}, + }) + if err != nil { + return err + } + libol.Debug("Network.Add %s", ops) + database.Client.Execute(ops) + if ret, err := database.Client.Commit(); err != nil { + return err + } else { + database.PrintError(ret) + } + return nil +} + +func (u Network) Remove(c *cli.Context) error { + name := c.String("name") + oldVn := database.VirtualNetwork{ + Name: name, + } + if err := database.Client.Get(&oldVn); err != nil { + return err + } + ops, err := database.Client.Where(&oldVn).Delete() + if err != nil { + return err + } + libol.Debug("Switch.Remove %s", ops) + database.Client.Execute(ops) + sw, err := database.Client.Switch() + if err != nil { + return err + } + ops, err = database.Client.Where(sw).Mutate(sw, model.Mutation{ + Field: &sw.VirtualNetworks, + Mutator: ovsdb.MutateOperationDelete, + Value: []string{oldVn.UUID}, + }) + if err != nil { + return err + } + libol.Debug("Network.Remove %s", ops) + database.Client.Execute(ops) + if ret, err := database.Client.Commit(); err != nil { + return err + } else { + database.PrintError(ret) + } + return nil +} + +func (u Network) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "network", + Aliases: []string{"ne"}, + Usage: "Virtual network", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "List virtual networks", + Aliases: []string{"ls"}, + Action: u.List, + }, + { + Name: "add", + Usage: "Add a virtual network", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "name", + Usage: "unique name with short long"}, + &cli.StringFlag{ + Name: "provider", + Value: "openlan", + Usage: "provider name"}, + &cli.StringFlag{ + Name: "address", + Value: "169.255.169.0/24", + Usage: "ip address"}, + }, + Action: u.Add, + }, + { + Name: "del", + Usage: "Del a virtual network", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "name", + Usage: "unique name with short long"}, + }, + Action: u.Remove, + }, + }, + }) +} diff --git a/cmd/api/v6/prefix.go b/cmd/api/v6/prefix.go new file mode 100755 index 0000000..fce00c9 --- /dev/null +++ b/cmd/api/v6/prefix.go @@ -0,0 +1,171 @@ +package v6 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/database" + "github.com/luscis/openlan/pkg/libol" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "github.com/urfave/cli/v2" + "sort" +) + +type Prefix struct { +} + +func (u Prefix) List(c *cli.Context) error { + var list []database.PrefixRoute + if err := database.Client.List(&list); err != nil { + return err + } else { + sort.SliceStable(list, func(i, j int) bool { + ii := list[i] + jj := list[j] + return ii.UUID > jj.UUID + }) + return api.Out(list, c.String("format"), "") + } +} + +func (u Prefix) Add(c *cli.Context) error { + lsVn := database.VirtualNetwork{ + Name: c.String("network"), + } + if lsVn.Name == "" { + return libol.NewErr("network is nil") + } + if err := database.Client.Get(&lsVn); err != nil { + return libol.NewErr("find network %s: %s", lsVn.Name, err) + } + newPf := database.PrefixRoute{ + UUID: database.GenUUID(), + Network: lsVn.Name, + Source: c.String("source"), + Prefix: c.String("prefix"), + Gateway: c.String("gateway"), + Mode: c.String("mode"), + } + ops, err := database.Client.Create(&newPf) + if err != nil { + return err + } + libol.Debug("Prefix.Add %s %s", ops, lsVn) + database.Client.Execute(ops) + ops, err = database.Client.Where(&lsVn).Mutate(&lsVn, model.Mutation{ + Field: &lsVn.PrefixRoutes, + Mutator: ovsdb.MutateOperationInsert, + Value: []string{newPf.UUID}, + }) + if err != nil { + return err + } + libol.Debug("Prefix.Add %s", ops) + database.Client.Execute(ops) + if ret, err := database.Client.Commit(); err != nil { + return err + } else { + database.PrintError(ret) + } + return nil +} + +func (u Prefix) Remove(c *cli.Context) error { + lsPf := database.PrefixRoute{ + Network: c.String("network"), + Prefix: c.String("prefix"), + UUID: c.String("uuid"), + } + if err := database.Client.Get(&lsPf); err != nil { + return err + } + lsVn := database.VirtualNetwork{ + Name: lsPf.Network, + } + if err := database.Client.Get(&lsVn); err != nil { + return libol.NewErr("find network %s: %s", lsVn.Name, err) + } + if err := database.Client.Get(&lsPf); err != nil { + return err + } + ops, err := database.Client.Where(&lsPf).Delete() + if err != nil { + return err + } + libol.Debug("Prefix.Remove %s", ops) + database.Client.Execute(ops) + ops, err = database.Client.Where(&lsVn).Mutate(&lsVn, model.Mutation{ + Field: &lsVn.PrefixRoutes, + Mutator: ovsdb.MutateOperationDelete, + Value: []string{lsPf.UUID}, + }) + if err != nil { + return err + } + libol.Debug("Prefix.Remove %s", ops) + database.Client.Execute(ops) + if ret, err := database.Client.Commit(); err != nil { + return err + } else { + database.PrintError(ret) + } + return nil +} + +func (u Prefix) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "route", + Aliases: []string{"ro"}, + Usage: "Prefix route", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "List prefix routes", + Aliases: []string{"ls"}, + Action: u.List, + }, + { + Name: "add", + Usage: "Add a prefix route", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "network", + Usage: "the network name", + }, + &cli.StringFlag{ + Name: "prefix", + }, + &cli.StringFlag{ + Name: "source", + Value: "0.0.0.0/0", + }, + &cli.StringFlag{ + Name: "gateway", + Value: "local", + }, + &cli.StringFlag{ + Name: "mode", + Value: "direct", + }, + }, + Action: u.Add, + }, + { + Name: "del", + Usage: "delete a prefix route", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "uuid", + }, + &cli.StringFlag{ + Name: "network", + Usage: "the network name", + }, + &cli.StringFlag{ + Name: "prefix", + }, + }, + Action: u.Remove, + }, + }, + }) +} diff --git a/cmd/api/v6/switch.go b/cmd/api/v6/switch.go new file mode 100755 index 0000000..b6d349f --- /dev/null +++ b/cmd/api/v6/switch.go @@ -0,0 +1,85 @@ +package v6 + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/database" + "github.com/luscis/openlan/pkg/libol" + "github.com/urfave/cli/v2" +) + +type Switch struct { +} + +func (u Switch) List(c *cli.Context) error { + var listSw []database.Switch + if err := database.Client.List(&listSw); err == nil { + return api.Out(listSw, c.String("format"), "") + } + return nil +} + +func (u Switch) Add(c *cli.Context) error { + protocol := c.String("protocol") + listen := c.Int("listen") + newSw := database.Switch{ + Protocol: protocol, + Listen: listen, + } + sw, _ := database.Client.Switch() + if sw == nil { + ops, err := database.Client.Create(&newSw) + if err != nil { + return err + } + libol.Debug("Switch.Add %s", ops) + if ret, err := database.Client.Transact(ops...); err != nil { + return err + } else { + database.PrintError(ret) + } + } else { + ops, err := database.Client.Where(sw).Update(&newSw) + if err != nil { + return err + } + libol.Debug("Switch.Add %s", ops) + if ret, err := database.Client.Transact(ops...); err != nil { + return err + } else { + database.PrintError(ret) + } + } + return nil +} + +func (u Switch) Commands(app *api.App) { + app.Command(&cli.Command{ + Name: "switch", + Aliases: []string{"sw"}, + Usage: "Global switch", + Subcommands: []*cli.Command{ + { + Name: "list", + Usage: "List global switch", + Aliases: []string{"ls"}, + Action: u.List, + }, + { + Name: "add", + Usage: "Add or update switch", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "protocol", + Value: "tcp", + Usage: "used protocol: tcp|udp|http|tls"}, + &cli.IntFlag{ + Name: "listen", + Value: 10002, + Usage: "listen on port: 1024-65535", + }, + }, + Action: u.Add, + }, + }, + }) +} diff --git a/cmd/main.go b/cmd/main.go new file mode 100755 index 0000000..9edd494 --- /dev/null +++ b/cmd/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/cmd/api/v5" + "github.com/luscis/openlan/cmd/api/v6" + "log" + "os" +) + +func main() { + api.Version = api.GetEnv("VERSION", api.Version) + api.Url = api.GetEnv("URL", api.Url) + api.Token = api.GetEnv("TOKEN", api.Token) + api.Server = api.GetEnv("CONFSERVER", api.Server) + api.Database = api.GetEnv("DATABASE", api.Database) + app := &api.App{} + app.New() + + switch api.Version { + case "v6": + v6.Commands(app) + default: + v5.Commands(app) + } + if err := app.Run(os.Args); err != nil { + log.Fatal(err) + } +} diff --git a/cmd/point_darwin/main.go b/cmd/point_darwin/main.go new file mode 100755 index 0000000..d98b82b --- /dev/null +++ b/cmd/point_darwin/main.go @@ -0,0 +1,23 @@ +// +build darwin + +package main + +import ( + "github.com/luscis/openlan/pkg/access" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" +) + +func main() { + c := config.NewPoint() + p := access.NewPoint(c) + p.Initialize() + libol.Go(p.Start) + if c.Terminal == "on" { + t := access.NewTerminal(p) + t.Start() + } else { + libol.Wait() + } + p.Stop() +} diff --git a/cmd/point_linux/main.go b/cmd/point_linux/main.go new file mode 100755 index 0000000..776889a --- /dev/null +++ b/cmd/point_linux/main.go @@ -0,0 +1,31 @@ +// +build linux + +package main + +import ( + "github.com/luscis/openlan/pkg/access" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" +) + +func main() { + c := config.NewPoint() + p := access.NewPoint(c) + // terminal off for linux service, on for open a terminal + // and others just wait. + if c.Terminal == "off" { + libol.PreNotify() + } + p.Initialize() + libol.Go(p.Start) + if c.Terminal == "on" { + t := access.NewTerminal(p) + t.Start() + } else if c.Terminal == "off" { + libol.SdNotify() + libol.Wait() + } else { + libol.Wait() + } + p.Stop() +} diff --git a/cmd/point_windows/main.go b/cmd/point_windows/main.go new file mode 100755 index 0000000..1f69574 --- /dev/null +++ b/cmd/point_windows/main.go @@ -0,0 +1,23 @@ +// +build windows + +package main + +import ( + "github.com/luscis/openlan/pkg/access" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" +) + +func main() { + c := config.NewPoint() + p := access.NewPoint(c) + p.Initialize() + libol.Go(p.Start) + if c.Terminal == "on" { + t := access.NewTerminal(p) + t.Start() + } else { + libol.Wait() + } + p.Stop() +} diff --git a/cmd/point_windows/main.ico b/cmd/point_windows/main.ico new file mode 100644 index 0000000..7f147a2 Binary files /dev/null and b/cmd/point_windows/main.ico differ diff --git a/cmd/point_windows/main.manifest b/cmd/point_windows/main.manifest new file mode 100644 index 0000000..afd5c43 --- /dev/null +++ b/cmd/point_windows/main.manifest @@ -0,0 +1,17 @@ + + + + Uradiam OpenLAN Point Software + + + + + + + + diff --git a/cmd/point_windows/main.syso b/cmd/point_windows/main.syso new file mode 100644 index 0000000..46ca025 Binary files /dev/null and b/cmd/point_windows/main.syso differ diff --git a/cmd/proxy/main.go b/cmd/proxy/main.go new file mode 100755 index 0000000..6756190 --- /dev/null +++ b/cmd/proxy/main.go @@ -0,0 +1,20 @@ +package main + +import ( + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/proxy" +) + +func main() { + c := config.NewProxy() + libol.SetLogger(c.Log.File, c.Log.Verbose) + + p := proxy.NewProxy(c) + libol.PreNotify() + p.Initialize() + libol.Go(p.Start) + libol.SdNotify() + libol.Wait() + p.Stop() +} diff --git a/cmd/switch/main.go b/cmd/switch/main.go new file mode 100755 index 0000000..e22fa4c --- /dev/null +++ b/cmd/switch/main.go @@ -0,0 +1,25 @@ +package main + +import ( + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/switch" +) + +func main() { + udp := api.GetEnv("ESPUDP", "4500") + config.SetLocalUdp(udp) + c := config.NewSwitch() + libol.SetLogger(c.Log.File, c.Log.Verbose) + libol.Debug("main %s", c) + cache.Init(&c.Perf) + s := _switch.NewSwitch(c) + libol.PreNotify() + s.Initialize() + s.Start() + libol.SdNotify() + libol.Wait() + s.Stop() +} diff --git a/core/CMakeLists.txt b/core/CMakeLists.txt new file mode 100755 index 0000000..fa40633 --- /dev/null +++ b/core/CMakeLists.txt @@ -0,0 +1,28 @@ +cmake_minimum_required(VERSION 2.8) +project(openlan C) + +set(CMAKE_C_STANDARD 99) +set(CMAKE_C_FLAGS "-std=gnu99 -g -DHAVE_CONFIG_H") +set(CMAKE_VERBOSE_MAKEFILE ON) + +include_directories(../3rd/ovs) +include_directories(../3rd/ovs/lib) +include_directories(../3rd/ovs/include) +include_directories(.) +include_directories(idlc) +include_directories(tcp) +include_directories(udp) + +link_directories(${CMAKE_SOURCE_DIR}/../build/obj/usr/lib) +link_directories(${CMAKE_SOURCE_DIR}/../build/obj/usr/lib64) + +file(GLOB IDL_SOURCES "idlc/*.c") +file(GLOB UDP_SOURCES "udp/*.c") +file(GLOB TCP_SOURCES "tcp/*.c") + +add_executable(openudp ${UDP_SOURCES} ${IDL_SOURCES}) +target_link_libraries(openudp libopenvswitch.a) +target_link_libraries(openudp pthread ssl crypto rt m unbound) + +add_executable(opentcp ${TCP_SOURCES}) +target_link_libraries(opentcp pthread) diff --git a/core/auto.sh b/core/auto.sh new file mode 100755 index 0000000..f63bfc0 --- /dev/null +++ b/core/auto.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +set -ex + +action=$1 +version=$(cat VERSION) +cd $(dirname $0) + +check_and_update() { + file0=$1 + file1=$2 + md5f0=$(md5sum $file0 | awk '{print $1}') + md5f1=$(md5sum $file1 | awk '{print $1}') + if [ "$md5f0"x != "$md5f1"x ]; then + mv $file0 $file1 + fi +} + +python_bin=python +type $python_bin || python_bin="python3" +ovs_dir="../3rd/ovs" + +build_idlc() { + idlc_bin="$ovs_dir/ovsdb/ovsdb-idlc.in" + [ -e "idlc/confd.ovsschema" ] || ln -s -f ../../dist/resource/confd.schema.json idlc/confd.ovsschema + PYTHONPATH="$ovs_dir/python:"$PYTHONPATH PYTHONDONTWRITEBYTECODE=yes $python_bin $idlc_bin annotate idlc/confd.ovsschema idlc/confd-idl.ann > /tmp/confd-idl.ovsidl + check_and_update /tmp/confd-idl.ovsidl idlc/confd-idl.ovsidl + PYTHONPATH="$ovs_dir/python:"$PYTHONPATH PYTHONDONTWRITEBYTECODE=yes $python_bin $idlc_bin c-idl-source idlc/confd-idl.ovsidl > /tmp/confd-idl.c + check_and_update /tmp/confd-idl.c idlc/confd-idl.c + PYTHONPATH="$ovs_dir/python:"$PYTHONPATH PYTHONDONTWRITEBYTECODE=yes $python_bin $idlc_bin c-idl-header idlc/confd-idl.ovsidl > /tmp/confd-idl.h + check_and_update /tmp/confd-idl.h idlc/confd-idl.h +} + +update_version() { + cp version.h /tmp/version.h + sed -i "s/#define CORE_PACKAGE_STRING .*/#define CORE_PACKAGE_STRING \"opencore $version\"/g" /tmp/version.h + sed -i "s/#define CORE_PACKAGE_VERSION .*/#define CORE_PACKAGE_VERSION \"$version\"/g" /tmp/version.h + check_and_update /tmp/version.h version.h +} + +if [ "$action"x == "build"x ] || [ "$action"x == ""x ]; then + update_version + build_idlc +elif [ "$action"x == "clean"x ]; then + echo "TODO" +fi diff --git a/core/idlc/confd-idl.ann b/core/idlc/confd-idl.ann new file mode 100644 index 0000000..2c1a19b --- /dev/null +++ b/core/idlc/confd-idl.ann @@ -0,0 +1,9 @@ +# -*- python -*- + +# This code, when invoked by "ovsdb-idlc annotate" (by the build +# process), annotates vswitch.ovsschema with additional data that give +# the ovsdb-idl engine information about the types involved, so that +# it can generate more programmer-friendly data structures. + +s["idlPrefix"] = "openrec_" +s["idlHeader"] = "\"confd-idl.h\"" diff --git a/core/idlc/confd.ovsschema b/core/idlc/confd.ovsschema new file mode 120000 index 0000000..62786d7 --- /dev/null +++ b/core/idlc/confd.ovsschema @@ -0,0 +1 @@ +../../dist/resource/confd.schema.json \ No newline at end of file diff --git a/core/tcp/README.md b/core/tcp/README.md new file mode 100644 index 0000000..56df0b2 --- /dev/null +++ b/core/tcp/README.md @@ -0,0 +1,9 @@ +# build + +mkdir -p build && cd ./build +cmake .. +make + +# valgrind + +valgrind --leak-check=yes ./build/core diff --git a/core/tcp/control.c b/core/tcp/control.c new file mode 100644 index 0000000..f39e6c2 --- /dev/null +++ b/core/tcp/control.c @@ -0,0 +1,10 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#include "control.h" diff --git a/core/tcp/control.h b/core/tcp/control.h new file mode 100644 index 0000000..fba6262 --- /dev/null +++ b/core/tcp/control.h @@ -0,0 +1,13 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#ifndef CORE_CONTROL_H +#define CORE_CONTROL_H + +#endif //CORE_CONTROL_H diff --git a/core/tcp/main.c b/core/tcp/main.c new file mode 100644 index 0000000..2a3021f --- /dev/null +++ b/core/tcp/main.c @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#include +#include "socket.h" + +int main(int argc, char *argv[]) { + char *addr = NULL; + int port = 9090; + + if (argc > 2) { + addr = argv[1]; + sscanf(argv[2], "%d", &port); + } else if (argc > 1) { + sscanf(argv[1], "%d", &port); + } + + if (addr == NULL) { + printf("Listen on %d!\n", port); + start_tcp_server(port); + } else { + printf("Connect to %s:%d\n", addr, port); + start_tcp_client(addr, port); + } + return 0; +} diff --git a/core/tcp/message.c b/core/tcp/message.c new file mode 100644 index 0000000..502cf35 --- /dev/null +++ b/core/tcp/message.c @@ -0,0 +1,10 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#include "message.h" diff --git a/core/tcp/message.h b/core/tcp/message.h new file mode 100644 index 0000000..09f96a5 --- /dev/null +++ b/core/tcp/message.h @@ -0,0 +1,13 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#ifndef CORE_MESSAGE_H +#define CORE_MESSAGE_H + +#endif //CORE_MESSAGE_H diff --git a/core/tcp/socket.c b/core/tcp/socket.c new file mode 100644 index 0000000..345938a --- /dev/null +++ b/core/tcp/socket.c @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tuntap.h" +#include "socket.h" + +int non_blocking(int fd) { + int flags = fcntl(fd, F_GETFL, 0); + return fcntl(fd, F_SETFL, flags | O_NONBLOCK); +} + +int recv_full(int fd, char *buf, ssize_t size) { + ssize_t read_size = 0; + + for (; size > 0;) { + read_size = recv(fd, buf, size, 0); + if (read_size <= 0) return read_size; + buf += read_size; + size -= read_size; + } + return 0; +} + +int send_full(int fd, char *buf, ssize_t size) { + ssize_t write_size = 0; + + for (;size > 0;) { + write_size = send(fd, buf, size, 0); + if (write_size <= 0) return write_size; + buf += write_size; + size -= write_size; + } + return 0; +} + +void *read_client(void *argv) { + uint16_t buf_size = 0; + uint16_t read_size = 0; + uint8_t buf[4096]; + peer_t *conn = NULL; + + assert(NULL != argv); + conn = (peer_t *) argv; + + for(;;) { + buf_size = recv_full(conn->socket_fd, buf, 4); + if (buf_size != 0) { + break; + } + read_size = ntohs(*(uint16_t *)(buf + 2)); + memset(buf, 0, sizeof buf); + buf_size = recv_full(conn->socket_fd, buf, read_size); + if (buf_size != 0) { + printf("ERROR: on read %d != %d\n", read_size, buf_size); + break; + } + write(conn->device_fd, buf, read_size); + } +} + +void *read_device(void *argv) { + uint16_t write_size = 0; + uint16_t read_size = 0; + uint8_t buf[4096]; + peer_t *conn = NULL; + + assert(NULL != argv); + conn = (peer_t *) argv; + + for(;;) { + read_size = read(conn->device_fd, buf + 4, sizeof (buf)); + if (read_size <= 0) { + continue; + } + *(uint16_t *)(buf + 2) = htons(read_size); + read_size += 4; + write_size = send_full(conn->socket_fd, buf, read_size); + if (write_size != 0) { + printf("ERROR: write to conn %d:%d", write_size, read_size); + break; + } + } +} + +int start_peer(peer_t *peer) { + pthread_t client; + pthread_t device; + + if(pthread_create(&client, NULL, read_client, &peer)) { + fprintf(stderr, "Error creating thread\n"); + return 1; + } + if(pthread_create(&device, NULL, read_device, &peer)) { + fprintf(stderr, "Error creating thread\n"); + return 1; + } + if(pthread_join(client, NULL)) { + fprintf(stderr, "Error joining thread\n"); + return 2; + } + if(pthread_join(device, NULL)) { + fprintf(stderr, "Error joining thread\n"); + return 2; + } +} + +int start_tcp_server(uint16_t port) { + struct sockaddr_in server_addr; + + bzero(&server_addr, sizeof(struct sockaddr_in)); + server_addr.sin_family = AF_INET; + server_addr.sin_addr.s_addr = htonl(INADDR_ANY); + server_addr.sin_port = htons(port); + + int server_fd = 0; + server_fd = socket(AF_INET, SOCK_STREAM, 0); + if(bind(server_fd, (struct sockaddr*)&server_addr, sizeof(server_addr)) < 0) { + printf("bind error\n"); + return -1; + } + if(listen(server_fd, 2) < 0) { + printf("listen error\n"); + return -1; + } + + struct sockaddr_in conn_addr; + socklen_t conn_addr_len = sizeof(conn_addr); + + int conn_fd = 0; + char dev_name[1024] = {0}; + int tap_fd = 0; + + conn_fd = accept(server_fd, (struct sockaddr *)&conn_addr, &conn_addr_len); + printf("accept connection on %d\n", conn_fd); + + tap_fd = create_tap(dev_name); + printf("open device on %s with %d\n", dev_name, tap_fd); + + peer_t peer = { + .socket_fd = conn_fd, + .device_fd = tap_fd, + }; + start_peer(&peer); + +finish: + close(conn_fd); + close(server_fd); + close(tap_fd); + printf("exit from %d\n", server_fd); + return 0; +} + +int start_tcp_client(const char *addr, uint16_t port) { + int ret = 0; + int socket_fd = 0; + struct sockaddr_in server_addr; + + socket_fd = socket(PF_INET, SOCK_STREAM, 0); + if (socket_fd < 0) { + printf("ERROR: open socket %d", socket_fd); + return socket_fd; + } + + bzero(&server_addr, sizeof (server_addr)); + server_addr.sin_family = AF_INET; + server_addr.sin_port = htons(port); + server_addr.sin_addr.s_addr = inet_addr(addr); + + ret = connect(socket_fd, (struct sockaddr *)&server_addr, sizeof(server_addr)); + if(ret ==-1) { + printf("connect() error\n"); + return ret; + } + char dev_name[1024] = {0}; + int tap_fd = 0; + + tap_fd = create_tap(dev_name); + printf("open device on %s with %d\n", dev_name, tap_fd); + + peer_t peer = { + .socket_fd = socket_fd, + .device_fd = tap_fd, + }; + start_peer(&peer); + +finish: + close(socket_fd); + close(tap_fd); + return 0; +} diff --git a/core/tcp/socket.h b/core/tcp/socket.h new file mode 100644 index 0000000..b315876 --- /dev/null +++ b/core/tcp/socket.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#ifndef CORE_SOCKET_H +#define CORE_SOCKET_H + +#include "types.h" + +typedef struct { + int socket_fd; + int device_fd; +} peer_t; + +int start_tcp_server(uint16_t port); +int start_tcp_client(const char *addr, uint16_t port); + +#endif //CORE_SOCKET_H diff --git a/core/tcp/tuntap.c b/core/tcp/tuntap.c new file mode 100644 index 0000000..3326d45 --- /dev/null +++ b/core/tcp/tuntap.c @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "tuntap.h" + +int create_tap(char *name) { + struct ifreq ifr; + int fd = -1; + int err = -1; + + assert(NULL != name); + if((fd = open(DEV_NET_TUN, O_RDWR)) < 0 ) { + return -1; + } + memset(&ifr, 0, sizeof(ifr)); + ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* IFF_TUN or IFF_TAP, plus maybe IFF_NO_PI */ + if (*name) { + strncpy(ifr.ifr_name, name, IFNAMSIZ); + } + if((err = ioctl(fd, TUNSETIFF, (void *) &ifr)) < 0) { + close(fd); + return err; + } + strcpy(name, ifr.ifr_name); + return fd; +} diff --git a/core/tcp/tuntap.h b/core/tcp/tuntap.h new file mode 100644 index 0000000..884f312 --- /dev/null +++ b/core/tcp/tuntap.h @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#ifndef CORE_TUNTAP_H +#define CORE_TUNTAP_H + +#include + +#define DEV_NET_TUN "/dev/net/tun" + +int create_tap(char *name); + +#endif //CORE_TUNTAP_H diff --git a/core/tcp/types.h b/core/tcp/types.h new file mode 100644 index 0000000..cc08253 --- /dev/null +++ b/core/tcp/types.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#ifndef CORE_TYPES_H +#define CORE_TYPES_H + +#ifndef int8_t +#define int8_t char +#endif + +#ifndef uint8_t +#define uint8_t unsigned char +#endif + +#ifndef uint16_t +#define uint16_t unsigned short +#endif + +#ifndef int16_t +#define int16_t short +#endif + +#ifndef uint32_t +#define uint32_t unsigned int +#endif + +#ifndef int32_t +#define int32_t int +#endif + +#endif //CORE_TYPES_H diff --git a/core/udp/main.c b/core/udp/main.c new file mode 100755 index 0000000..43dbed6 --- /dev/null +++ b/core/udp/main.c @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "openvswitch/dynamic-string.h" +#include "openvswitch/poll-loop.h" +#include "openvswitch/vconn.h" +#include "openvswitch/vlog.h" + +#include "ovsdb-data.h" +#include "ovsdb-idl-provider.h" + +#include "command-line.h" +#include "confd-idl.h" +#include "daemon.h" +#include "udp.h" +#include "unixctl.h" +#include "ovs-thread.h" +#include "timeval.h" +#include "version.h" + +#define RUN_DIR "/var/openlan" +#define UDP_PORT 4500 + +VLOG_DEFINE_THIS_MODULE(main); +/* Rate limit for error messages. */ +static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5); + +static char *default_db_ = NULL; +static char *db_remote = NULL; +static int32_t udp_port = 0; + +struct udp_context { + struct udp_server *srv; + struct ovsdb_idl *idl; + struct ovsdb_idl_txn *idl_txn; + struct shash names; + struct shash networks; + struct shash links; + +}; + +static inline char * +unixctl_dir() +{ + return xasprintf("%s/%s.ctl", RUN_DIR, program_name); +} + +static inline char * +default_db(void) +{ + if (!default_db_) { + default_db_ = xasprintf("unix:%s/confd.sock", RUN_DIR); + } + return default_db_; +} + +static void +usage(void) +{ + printf("\ +%s: OpenLAN UDP Connection\n\ +usage %s [OPTIONS]\n\ +\n\ +Options:\n\ + --port=PORT listen on local udp PORT\n\ + (default: %d)\n\ + --db=DATABASE connect to database at DATABASE\n\ + (default: %s)\n\ + -h, --help display this help message\n\ + -o, --options list available options\n\ + -V, --version display version information\n\ +", program_name, program_name, UDP_PORT, default_db()); + vlog_usage(); + exit(EXIT_SUCCESS); +} + +static void +parse_options(int argc, char *argv[]) +{ + enum { + VLOG_OPTION_ENUMS, + }; + + static struct option long_options[] = { + {"port", required_argument, NULL, 'p'}, + {"db", required_argument, NULL, 'd'}, + {"help", no_argument, NULL, 'h'}, + {"version", no_argument, NULL, 'V'}, + VLOG_LONG_OPTIONS, + {NULL, 0, NULL, 0} + }; + char *short_options = ovs_cmdl_long_options_to_short_options(long_options); + + for (;;) { + int c; + + c = getopt_long(argc, argv, short_options, long_options, NULL); + if (c == -1) { + break; + } + + switch (c) { + case 'd': + db_remote = xstrdup(optarg); + break; + + case 'p': + udp_port = atoi(optarg); + break; + + case 'h': + usage(); + + case 'V': + ovs_print_version(OFP13_VERSION, OFP13_VERSION); + exit(EXIT_SUCCESS); + + VLOG_OPTION_HANDLERS + + case '?': + exit(EXIT_FAILURE); + + default: + abort(); + } + } + free(short_options); + + if (!db_remote) { + db_remote = xstrdup(default_db()); + } + if (!udp_port) { + udp_port = UDP_PORT; + } +} + +static void +udp_exit(struct unixctl_conn *conn, int argc OVS_UNUSED, + const char *argv[] OVS_UNUSED, void *exiting_) +{ + bool *exiting = exiting_; + *exiting = true; + + unixctl_command_reply(conn, NULL); +} + +static void +cache_run(struct udp_context *ctx) +{ + const struct openrec_name_cache *nc; + const struct openrec_virtual_network *vn; + const struct openrec_virtual_link *vl; + + shash_empty(&ctx->names); + shash_empty(&ctx->networks); + shash_empty(&ctx->links); + + OPENREC_NAME_CACHE_FOR_EACH (nc, ctx->idl) { + VLOG_DBG("name_cache: %s %s", nc->name, nc->address); + shash_add(&ctx->names, nc->name, nc); + } + + OPENREC_VIRTUAL_NETWORK_FOR_EACH (vn, ctx->idl) { + VLOG_DBG("virtual_network: %s %s", vn->name, vn->address); + shash_add(&ctx->networks, vn->name, vn); + } + + OPENREC_VIRTUAL_LINK_FOR_EACH (vl, ctx->idl) { + VLOG_DBG("virtual_link: %s %s", vl->network, vl->connection); + if (!strncmp(vl->connection, "any", 3) || !strlen(vl->connection)) { + shash_add(&ctx->links, vl->device, vl); + } else { + shash_add(&ctx->links, vl->connection, vl); + } + } +} + +static void +ping_run(struct udp_context *ctx) +{ + char address[128] = {0}; + struct udp_server *srv = ctx->srv; + + if (time_msec() - srv->send_t < 5 *1000) { + return; + } + + struct udp_connect conn = { + .socket = srv->socket, + .remote_port = UDP_PORT, + .remote_address = address, + }; + struct shash_node *node; + + SHASH_FOR_EACH(node, &ctx->links) { + const struct openrec_virtual_link *vl = node->data; + if (strncmp(vl->device, "spi:", 4) || strncmp(vl->connection, "udp:", 4)) { + continue; + } + VLOG_DBG("send_ping to %s on %s\n", vl->connection, vl->device); + ovs_scan(vl->device, "spi:%d", &conn.spi); + ovs_scan(vl->connection, "udp:%[^:]:%d", address, &conn.remote_port); + + const struct shash_node *nc_node = shash_find(&ctx->names, address); + if (nc_node) { + const struct openrec_name_cache *nc = nc_node->data; + conn.remote_address = nc->address; + } + + send_ping_once(&conn); + } + srv->send_t = time_msec(); +} + +static void +pong_run(struct udp_context *ctx) +{ + int retval; + u_int8_t buf[1024]; + struct sockaddr_in from; + + struct udp_server *srv = ctx->srv; + struct udp_message *data = (struct udp_message *)buf; + + retval = recv_ping_once(srv, &from, buf, sizeof buf); + if (retval <= 0) { + return; + } + const char *remote_addr = inet_ntoa(from.sin_addr); + char *spi_conn = xasprintf("spi:%d", ntohl(data->spi)); + struct shash_node *node = shash_find(&ctx->links, spi_conn); + + VLOG_DBG("pong_run from: %s:%d and spi %d\n", remote_addr, ntohs(from.sin_port), ntohl(data->spi)); + if (node) { + struct openrec_virtual_link *vl = node->data; + VLOG_DBG("pong_run virtual link: %s %s\n", vl->connection, vl->network); + struct sockaddr_in dst_addr = from; + u_int32_t seqno = ntohl(data->seqno) + 1; + data->seqno = htonl(seqno); + retval = sendto(srv->socket, data, sizeof *data, 0, (struct sockaddr *)&dst_addr, sizeof dst_addr); + if (retval <= 0) { + VLOG_WARN_RL(&rl, "%s: could not send data\n", remote_addr); + } + // remote_connection=udp:a.b.c.d:1024 + char *connection = xasprintf("udp:%s:%d", remote_addr, ntohs(from.sin_port)); + openrec_virtual_link_update_status_setkey(vl, "remote_connection", connection); + free(connection); + } + free(spi_conn); +} + +static void +ping_wait(struct udp_context *ctx) +{ + poll_timer_wait_until(time_msec() + 5 * 1000); +} + +static void +pong_wait(struct udp_context *ctx) +{ + struct udp_server *srv = ctx->srv; + + poll_fd_wait(srv->socket, POLLIN); +} + +int +main(int argc, char *argv[]) +{ + struct unixctl_server *unixctl; + bool exiting = false; + int retval = 0; + char *unixdir; + + ovs_cmdl_proctitle_init(argc, argv); + ovs_set_program_name(argv[0], CORE_PACKAGE_VERSION); + + service_start(&argc, &argv); + parse_options(argc, argv); + + unixdir = unixctl_dir(); + /* Open and register unixctl */ + retval = unixctl_server_create(unixdir, &unixctl); + if (retval) { + goto RET; + } + unixctl_command_register("exit", "", 0, 0, udp_exit, &exiting); + + /* Connect to OpenLAN database. */ + struct ovsdb_idl_loop open_idl_loop = OVSDB_IDL_LOOP_INITIALIZER( + ovsdb_idl_create(db_remote, &openrec_idl_class, true, true)); + ovsdb_idl_get_initial_snapshot(open_idl_loop.idl); + + struct udp_server srv = { + .port = udp_port, + .socket = -1, + .send_t = time_msec(), + }; + open_socket(&srv); + if (configure_socket(&srv) < 0) { + VLOG_ERR("configure_socket: %s\n", strerror(errno)); + goto RET; + } + + struct udp_context ctx = { + .idl = open_idl_loop.idl, + .srv = &srv, + }; + + shash_init(&ctx.names); + shash_init(&ctx.networks); + shash_init(&ctx.links); + + while(!exiting) { + ctx.idl_txn = ovsdb_idl_loop_run(&open_idl_loop); + + if (ctx.idl_txn) { + cache_run(&ctx); + } + + ping_run(&ctx); + pong_run(&ctx); + + ping_wait(&ctx); + pong_wait(&ctx); + + unixctl_server_run(unixctl); + unixctl_server_wait(unixctl); + if (exiting) { + poll_immediate_wake(); + } + ovsdb_idl_loop_commit_and_wait(&open_idl_loop); + poll_block(); + if (should_service_stop()) { + exiting = true; + } + } + + shash_destroy(&ctx.names); + shash_destroy(&ctx.networks); + shash_destroy(&ctx.links); + + unixctl_server_destroy(unixctl); + ovsdb_idl_loop_destroy(&open_idl_loop); + service_stop(); + +RET: + if (db_remote) free(db_remote); + if (default_db_) free(default_db_); + if (unixdir) free(unixdir); + + exit(retval); +} diff --git a/core/udp/udp.c b/core/udp/udp.c new file mode 100755 index 0000000..fa05180 --- /dev/null +++ b/core/udp/udp.c @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "openvswitch/dynamic-string.h" +#include "openvswitch/vlog.h" + +#include "socket-util.h" + +#include "udp.h" + +VLOG_DEFINE_THIS_MODULE(udp); + +/* Rate limit for error messages. */ +static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5); + +void +print_hex(const char *prefix, u_int8_t *data, int len) +{ + if (VLOG_IS_DBG_ENABLED()) { + struct ds s; + ds_init(&s); + for (int i = 0; i < len; i++ ) { + ds_put_format(&s, "%02x ", data[i]); + } + VLOG_DBG("%s%s\n", prefix, ds_cstr(&s)); + ds_destroy(&s); + } +} + +int +send_ping_once(struct udp_connect *conn) +{ + int retval = 0; + struct udp_message data = { + .padding = {0, 0}, + .spi = htonl(conn->spi), + }; + data.seqno = htonl(conn->seqno++); + struct sockaddr_in dst_addr = { + .sin_family = AF_INET, + .sin_port = htons(conn->remote_port), + .sin_addr = { + .s_addr = inet_addr(conn->remote_address), + }, + }; + retval = sendto(conn->socket, &data, sizeof data, 0, (struct sockaddr *)&dst_addr, sizeof dst_addr); + if (retval <= 0) { + VLOG_WARN_RL(&rl, "%s: could not send data\n", conn->remote_address); + } + return retval; +} + +int +recv_ping_once(struct udp_server *srv, struct sockaddr_in *addr, u_int8_t *buf, size_t len) +{ + struct udp_message *data = (struct udp_message *)buf; + int retval = 0, addrlen = sizeof *addr; + + memset(data, 0, sizeof *data); + retval = recvfrom(srv->socket, buf, len, 0, (struct sockaddr *)addr, &addrlen); + if ( retval <= 0 ) { + if (errno == EAGAIN) { + return 0; + } + VLOG_ERR_RL(&rl, "recvfrom: %s\n", strerror(errno)); + return retval; + } + const char *remote_addr = inet_ntoa(addr->sin_addr); + VLOG_DBG("recvfrom: [%s:%d] %d bytes\n", remote_addr, ntohs(addr->sin_port), retval); + print_hex("recvfrom: ", buf, retval); + return retval; +} + +int +open_socket(struct udp_server *srv) +{ + int op = 1; + struct sockaddr_in addr = { + .sin_family = AF_INET, + .sin_port = htons(srv->port), + .sin_addr = { + .s_addr = INADDR_ANY, + }, + }; + + srv->socket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); + if (srv->socket == -1) { + return -1; + } + if (setsockopt(srv->socket, SOL_SOCKET, SO_REUSEADDR, (void *)&op, sizeof op) < 0) { + return -1; + } + if (bind(srv->socket, (struct sockaddr *)&addr, sizeof addr) == -1) { + return -1; + } + set_nonblocking(srv->socket); + + return srv->socket; +} + +int +configure_socket(struct udp_server *srv) +{ + int encap = UDP_ENCAP_ESPINUDP; + struct xfrm_userpolicy_info pol; + + memset(&pol, 0, sizeof(pol)); + pol.action = XFRM_POLICY_ALLOW; + pol.sel.family = AF_INET; + + pol.dir = XFRM_POLICY_OUT; + if (setsockopt(srv->socket, IPPROTO_IP, IP_XFRM_POLICY, &pol, sizeof pol) < 0) { + return -1; + } + pol.dir = XFRM_POLICY_IN; + if (setsockopt(srv->socket, IPPROTO_IP, IP_XFRM_POLICY, &pol, sizeof pol) < 0) { + return -1; + } + if (setsockopt(srv->socket, IPPROTO_UDP, UDP_ENCAP, &encap, sizeof encap) < 0) { + return -1; + } + return srv->socket; +} diff --git a/core/udp/udp.h b/core/udp/udp.h new file mode 100755 index 0000000..3a4ff5a --- /dev/null +++ b/core/udp/udp.h @@ -0,0 +1,40 @@ +#ifndef __OPENUDP_UDP_H +#define __OPENUDP_UDP_H 1 + +#include + +#include "openvswitch/shash.h" + +struct udp_message { + u_int32_t padding[2]; + u_int32_t spi; + u_int32_t seqno; +}; + +struct udp_server { + u_int16_t port; + int32_t socket; + long long int send_t; +}; + +struct udp_connect { + int32_t socket; + int32_t remote_port; + const char *remote_address; + u_int32_t spi; + u_int32_t seqno; +}; + +int send_ping_once(struct udp_connect *); +int recv_ping_once(struct udp_server *, struct sockaddr_in *, u_int8_t *, size_t); + +int open_socket(struct udp_server *); +int configure_socket(struct udp_server *); + +static inline void shash_empty(struct shash *sh) +{ + shash_destroy(sh); + shash_init(sh); +} + +#endif diff --git a/core/version.h b/core/version.h new file mode 100644 index 0000000..06c9631 --- /dev/null +++ b/core/version.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2021-2022 OpenLAN Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 3 as + * published by the Free Software Foundation. + * + */ + +#ifndef OPENUDP_VERSION_H +#define OPENUDP_VERSION_H 1 + +#define CORE_PACKAGE_STRING "opencore 5.10.4" +#define CORE_PACKAGE_VERSION "5.10.4" + +#define CORE_LIB_VERSION 0 +#define CORE_LIB_REVISION 0 +#define CORE_LIB_AGE 0 + +#endif /* version.h */ diff --git a/dist/openlan-switch.docker b/dist/openlan-switch.docker new file mode 100755 index 0000000..d4b1021 --- /dev/null +++ b/dist/openlan-switch.docker @@ -0,0 +1,21 @@ + +FROM centos:7 + +ARG VERSION + +WORKDIR /root + +ADD build/openlan-switch-${VERSION}-1.el7.x86_64.rpm /tmp + +RUN yum install -y epel-release \ + && yum install -y iptables bridge-utils \ + && yum install -y /tmp/openlan-switch-${VERSION}-1.el7.x86_64.rpm + +LABEL application="OpenLAN Switch Application" +LABEL maintainer="luscis@163.com" + +EXPOSE 10000/tcp +EXPOSE 10002/tcp +EXPOSE 10002/udp + +CMD ["/usr/bin/openlan-switch", "-conf:dir", "/etc/openlan/switch", "-log:level", "20"] diff --git a/dist/openlan.spec.in b/dist/openlan.spec.in new file mode 100755 index 0000000..e1d1a00 --- /dev/null +++ b/dist/openlan.spec.in @@ -0,0 +1,70 @@ +Name: openlan +Version: @VERSION@ +Release: 1%{?dist} +Source: %{name}-%{version}-source.tar.gz +Summary: OpenLAN's Project Software +Group: Applications/Communications +License: GPL-3.0 +URL: https://github.com/luscis/openlan +BuildRequires: unbound-devel openssl-devel libcap-ng-devel +Requires: net-tools, iptables, iputils, openvpn, openssl, openvswitch, dnsmasq + +# binary no-debug by go build +%define __debug_install_post \ +%{_rpmconfigdir}/find-debuginfo.sh %{?_find_debuginfo_opts} "%{_builddir}/%{?buildsubdir}"\ +%{nil} + +%description +OpenLAN's Project Software + +%prep +%setup -q + +%build +make clean +make linux + +%install +make install LINUX_DIR=%{buildroot} + +%pre + +%post +[ -e "/etc/openlan/switch/switch.json" ] || { + /usr/bin/cp -rvf /etc/openlan/switch/switch.json.example /etc/openlan/switch/switch.json +} +[ -e "/var/openlan/openvpn/dh.pem" ] || { + /usr/bin/openssl dhparam -out /var/openlan/openvpn/dh.pem 2048 +} +[ -e "/var/openlan/openvpn/ta.key" ] || { + /usr/sbin/openvpn --genkey --secret /var/openlan/openvpn/ta.key +} +[ -e "/etc/openlan/switch/confd.db" ] || { + /usr/bin/ovsdb-tool create /etc/openlan/switch/confd.db /etc/openlan/switch/confd.schema.json +} +[ -e "/var/openlan/confd.sock" ] && { + /usr/bin/ovsdb-client convert unix:///var/openlan/confd.sock /etc/openlan/switch/confd.schema.json +} +[ -e "/etc/sysctl.d/90-openlan.conf" ] && { + /usr/sbin/sysctl -p /etc/sysctl.d/90-openlan.conf || : +} +[ -e "/etc/openlan/switch/network/ipsec.json" ] || { + /usr/bin/cat > /etc/openlan/switch/network/ipsec.json < +-----BEGIN CERTIFICATE----- +MIIDGDCCAgCgAwIBAgIJAJPUuCImsVp3MA0GCSqGSIb3DQEBCwUAMA0xCzAJBgNV +BAMMAm5qMB4XDTIwfcTAwNjE4MDgyNloXDTMwMTAwNDE4MDgyNlowDTELMAkGA1UE +AwwCbmowggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDL4vdtg5m0XeEa +99f8bUO4TmuouhQ5+jr2kDxN4adDGHTANqOpV9yTsHE3NfrcOlecm3MCk1b05g3p +6nzu4iF4de/c6avy0Vpq8w8XnfUYS1lCIb9R9q8heN5P3Awn3Tszctw+QMBgH0Hb +TrYYOm6QIir0XOyB82traZxZyVL6XObVy993wbRpyVF4YDFzbSx/08au7+7tNf4l +DZRTCPHx0lYYKgzAwdM9yD95D/mpbfsE5TxArzADNTEBUSvoUU3q7ulEaSE0yKgP +iu5c8UIHbBFDAy98e+kqgqLd8ljOuMNEezZsLm1mxrFFQBQ/Efw3LqzAjR/D5fJw +qr6cRZPFAgMBAAGjezB5MB0GA1UdDgQWBBQqkJzxLx4q0nA/MaT+2YJef5CFBzA9 +BgNVHSMENjA0gBQqkJzxLx4q0nA/MaT+2YJef5CFB6ERpA8wDTELMAkGA1UEAwwC +bmqCCQCT1LgiJrFadzAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG +9w0BAQsFAAOCAQEAqGY0Z2cQmtUWNhTWQQ0Lq4hSq0zpRqXBwF1jy8j/aLAzdw9M +V/3/N5UvbyNyUz6sRmFc9E+qY+8ze7cby5O8eydOXoht8UX/agh3IroEghy+JA5d +QSkZkk5DAxUQamXVRYKCySJwzbmHSx5wzb9Dh8VXU5ILippdTMBHvrpr86+NDF2m +VsusYb+S1h2GgfvwxqDMwiqz06sy0Jm+mTqJn5Ssf9rdmD7lJiE8953LmEUtdepe +aPL9aLDbePx+6UQEchotYYnV4f7cuR6l9gChshih/EE9eorU/itgY4+ZgOuT60BB +am0CqJZqZF4SdWuuz2wzVsQWsITIB3JCDhR0rQ== +-----END CERTIFICATE----- + +ns-cert-type server + +# +# 2048 bit OpenVPN static key +# +-----BEGIN OpenVPN Static key V1----- +164ee33c50503cea99000524dccdb7f055 +dc15bad47ebfe29beaf0988ba87da88522 +4b10f6a458d6e8e7d4f6bd7532686ed2 +827e086c3dc89f87483a078f7918c71f1 +bae2f8fe4f94e966a772129cb1d8114c +fd1aa1754ef590fbf5e0c88c433eec82 +d99d74b25c8595f515554946e045e3b7 +d79ee8b0042c27994ee9f342d56668bb +33be2f1bfca0809e50b9acfae9532b24 +2337781ac187dc691ea5606c6e3b1a58 +ed2dd0fbcc0ac513ecad1aa7890065e0 +297494854df9da1d931b0eab4b935987 +38202c9ea8b8f9b769ad4357e2bab881 +706fae5bcbc64737973f168f556b6bfb +726ef63b7c38c82e0baf089106743891 +70656a96ba2dd6d594aa7bef00e582df +-----END OpenVPN Static key V1----- + +key-direction 1 +cipher AES-256-CBC +auth-nocache +verb 4 +auth-user-pass diff --git a/dist/resource/fabric.json.example b/dist/resource/fabric.json.example new file mode 100755 index 0000000..8aae2f9 --- /dev/null +++ b/dist/resource/fabric.json.example @@ -0,0 +1,62 @@ +{ + "name": "fabric", + "provider": "fabric", + "bridge": { + "name": "br-tun" + }, + "specifies": { + "tcpMss": 1332, + "tunnels": [ + { + "remote": "100.64.0.20" + }, + { + "remote": "100.64.0.21" + }, + { + "remote": "100.64.0.33" + } + ], + "networks": [ + { + "vni": 2077, + "outputs": [ + { + "interface": "vxlan:10.10.1.7:3303" + }, + { + "vlan": 60, + "interface": "gre:10.10.1.8" + } + ] + }, + { + "vni": 2088, + "outputs": [ + { + "vlan": 99, + "interface": "gre:10.10.1.10" + } + ] + }, + { + "vni": 2099, + "bridge": { + "address": "172.16.100.1/24" + } + "outputs": [ + { + "vlan": 100, + "interface": "eth1" + } + ], + "subnet": { + "start": "172.32.100.250", + "end": "172.32.100.254", + "dns": "8.8.8.8,8.8.4.4", + }, + "dhcp": "enable" + } + ] + } +} diff --git a/dist/resource/ipsec.json.example b/dist/resource/ipsec.json.example new file mode 100755 index 0000000..816d4af --- /dev/null +++ b/dist/resource/ipsec.json.example @@ -0,0 +1,41 @@ +{ + "name": "ipsec", + "provider": "esp", + "specifies": { + "address": "100.64.0.10", + "members": [ + { + "spi": 300, + "peer": "100.64.0.30", + "state": { + "auth": "a263d01a96db11eb9", + "crypt": "9b73bc48e3864b3ebc" + } + }, + { + "spi": 100, + "peer": "100.64.0.20", + "state": { + "remote": "2.16.1.2" + } + }, + { + "spi": 200, + "address": "100.64.0.11", + "peer": "100.64.0.30", + "state": { + "local": "192.168.1.10", + "remote": "2.16.1.2", + "auth": "a263d01a96db11eb9", + "crypt": "9b73bc48e3864b3ebc" + }, + "policy": [ + { + "source": "192.168.1.0/24", + "destination": "192.168.2.0/24" + } + ] + } + ] + } +} diff --git a/dist/resource/network.json.example b/dist/resource/network.json.example new file mode 100755 index 0000000..a46c525 --- /dev/null +++ b/dist/resource/network.json.example @@ -0,0 +1,55 @@ +{ + "name": "example", + "bridge": { + "name": "br-eth0", + "address": "172.32.100.40/24" + }, + "subnet": { + "start": "172.32.100.250", + "end": "172.32.100.254", + "netmask": "255.255.255.0", + "dns": "8.8.8.8,8.8.4.4", + }, + "hosts": [ + { + "hostname": "pc-99", + "address": "172.32.100.99" + } + ], + "routes": [ + { + "prefix": "172.32.10.0/24" + } + ], + "password": [ + { + "username": "hi", + "password": "1f4ee82b5eb6" + } + ], + "links": [ + { + "protocol": "tls", + "connection": "hi.openlan.net", + "username": "hi", + "password": "1f4ee82b5eb6" + } + ], + "openvpn": { + "protocol": "tcp", + "listen": "0.0.0.0:3295", + "subnet": "172.32.195.0/24", + "push": [ + "dhcp-option DNS 8.8.8.8" + ], + "clients": [ + { + "name": "hi", + "address": "172.32.195.233", + "netmask": "172.32.195.234" + } + ] + }, + "acl": "acl-100", + "dhcp": "enable", +} diff --git a/dist/resource/openlan-confd.service b/dist/resource/openlan-confd.service new file mode 100755 index 0000000..be2b94f --- /dev/null +++ b/dist/resource/openlan-confd.service @@ -0,0 +1,16 @@ +[Unit] +Description=OpenLAN Switch Database Unit +After=syslog.target network-pre.target +Before=network.target network.service + +[Service] +Type=forking +PIDFile=/var/openlan/confd.pid +Restart=on-failure +EnvironmentFile=-/etc/sysconfig/openlan/confd + +ExecStart=/var/openlan/script/confd start +ExecStop=/var/openlan/script/confd stop + +[Install] +WantedBy=multi-user.target diff --git a/dist/resource/openlan-point@.service b/dist/resource/openlan-point@.service new file mode 100755 index 0000000..cdf1d40 --- /dev/null +++ b/dist/resource/openlan-point@.service @@ -0,0 +1,12 @@ +[Unit] +Description=OpenLAN Point daemon for %I +After=network.target + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/openlan/point.cfg +ExecStart=/usr/bin/openlan-point $OPTIONS -conf /etc/openlan/%i.json +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/dist/resource/openlan-proxy.service b/dist/resource/openlan-proxy.service new file mode 100755 index 0000000..517b43a --- /dev/null +++ b/dist/resource/openlan-proxy.service @@ -0,0 +1,13 @@ +[Unit] +Description=OpenLAN Proxy daemon +After=network.target + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/openlan/proxy.cfg +ExecStart=/usr/bin/openlan-proxy $OPTIONS +LimitNOFILE=102400 +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/dist/resource/openlan-switch.service b/dist/resource/openlan-switch.service new file mode 100755 index 0000000..856ea17 --- /dev/null +++ b/dist/resource/openlan-switch.service @@ -0,0 +1,15 @@ +[Unit] +Description=OpenLAN Virtual Switch daemon +After=network.target openlan-confd.service +Requires=openlan-confd.service + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/openlan/switch.cfg +ExecStartPre=-/var/openlan/script/setup.sh +ExecStart=/usr/bin/openlan-switch $OPTIONS +LimitNOFILE=102400 +Restart=always + +[Install] +WantedBy=multi-user.target diff --git a/dist/resource/openssl b/dist/resource/openssl new file mode 100755 index 0000000..0e0750e --- /dev/null +++ b/dist/resource/openssl @@ -0,0 +1,41 @@ +#!/bin/bash + +action=$1 + +if [ "$action" == "" ]; then + echo "$0 " + exit 0 +fi + +[ -e "ca" ] || mkdir ca +[ -e "cert" ] || mkdir cert + +if [ "$action" == "cert" ]; then + source cert.vars + [ -e "cert/key" ] || openssl genrsa -out cert/key 2048 + # openssl rsa -in key -noout -text + [ -e "cert/key.pub" ] || openssl rsa -in cert/key -pubout -out cert/key.pub + # openssl rsa -pubin -in key.pub -noout -text + # using private to sign pem and generate a csr request. + [ -e "cert/crt.csr" ] || openssl req -new -key cert/key -out cert/crt.csr -subj "/C=$C/ST=$ST/L=$NJ/O=$O/OU=$OU/CN=$CN/emailAddress=$emailAddress" +fi + +if [ "$action" == "ca" ]; then #### CA &x509 + source ca.vars + [ -e "ca/ca.key" ] || openssl genrsa -out ca/ca.key 2048 + [ -e "ca/ca.crt" ] || openssl req -new -x509 -days 3650 -key ca/ca.key -out ca/ca.crt -subj "/C=$C/ST=$ST/L=$L/O=$O/OU=$OU/CN=$CN/emailAddress=$emailAddress" +fi + +if [ "$action" == "sign" ]; then + #### Sign cert using myself ca + [ -e "cert/crt" ] || openssl x509 -req -days 730 -in cert/crt.csr -CA ./ca/ca.crt -CAkey ./ca/ca.key -CAcreateserial -out cert/crt -sha256 -extfile cert.ext +fi + +if [ "$action" == "show" ]; then + echo "### ca.crt" + openssl x509 -in ca/ca.crt -noout -text + echo "### crt.csr" + openssl req -noout -text -in cert/crt.csr + echo "### crt" + openssl x509 -in cert/crt -noout -text +fi diff --git a/dist/resource/openvpn.md b/dist/resource/openvpn.md new file mode 100755 index 0000000..d50fcce --- /dev/null +++ b/dist/resource/openvpn.md @@ -0,0 +1,30 @@ +# OpenVPN + + yum install -y epel-release + + yum install -y openvpn + +## Generate Diffie-Hellman + + openssl dhparam -out /var/openlan/openvpn/dh.pem 1024 + +## Generate TLS Auth Key + + openvpn --genkey --secret /var/openlan/openvpn/ta.key + + +# Configure OpenVPN in Network + + { + "name": "example", + "openvpn": { + "listen": "0.0.0.0:1194", + "subnet": "10.9.9.0/24" + } + } + + +## Restart OpenLAN Switch Service + + systemctl reload openlan-switch + diff --git a/dist/resource/password.example b/dist/resource/password.example new file mode 100755 index 0000000..75539fc --- /dev/null +++ b/dist/resource/password.example @@ -0,0 +1,2 @@ +hi@example:efb19651 +hei@guest:abef6f1e diff --git a/dist/resource/point.cfg b/dist/resource/point.cfg new file mode 100755 index 0000000..b13d588 --- /dev/null +++ b/dist/resource/point.cfg @@ -0,0 +1,4 @@ +# --- point.cfg --- +# This file define the configuration for OpenLAN Point. + +OPTIONS='-log:file /dev/null -terminal off -cacert /etc/openlan/ca-trusted.crt' diff --git a/dist/resource/point.json.example b/dist/resource/point.json.example new file mode 100755 index 0000000..7b6dd4e --- /dev/null +++ b/dist/resource/point.json.example @@ -0,0 +1,7 @@ +{ + "protocol": "tls", + "connection": "who.openlan.net", + "username": "hi@default", + "password": "cb2ff088a34d", + "cert": { "insecure": true } +} diff --git a/dist/resource/point.json.full.example b/dist/resource/point.json.full.example new file mode 100755 index 0000000..399c4a6 --- /dev/null +++ b/dist/resource/point.json.full.example @@ -0,0 +1,19 @@ +{ + "network": "default", + "interface": { + "name": "tap0", + "bridge": "br-default", + "address": "172.32.100.10/24" + }, + "connection": "who.openlan.net", + "username": "hi", + "password": "1f4ee82b5eb6", + "protocol": "tls", + "cert": { + "insecure": true + }, + "crypt": { + "algo": "aes-256", + "secret": "1f4ee82b5eb6" + } +} diff --git a/dist/resource/proxy.cfg b/dist/resource/proxy.cfg new file mode 100755 index 0000000..19b3c7b --- /dev/null +++ b/dist/resource/proxy.cfg @@ -0,0 +1,4 @@ +# --- point.cfg --- +# This file define the configuration for OpenLAN Proxy. + +OPTIONS='-log:file /dev/null -conf /etc/openlan/proxy.json' \ No newline at end of file diff --git a/dist/resource/proxy.json.example b/dist/resource/proxy.json.example new file mode 100755 index 0000000..de73bfd --- /dev/null +++ b/dist/resource/proxy.json.example @@ -0,0 +1,42 @@ +{ + "socks": [ + { + "listen": "0.0.0.0:11080" + } + ], + "shadow": [ + { + "server": ":8488", + "password": "ecd0820973c9", + "cipher": "AEAD_CHACHA20_POLY1305" + } + ], + "http": [ + { + "listen": "0.0.0.0:11082", + "auth": { + "username": "hi", + "password": "cb2ff088a34d" + } + }, + { + "listen": "0.0.0.0:11083", + "auth": { + "username": "hi", + "password": "cb2ff088a34d" + }, + "cert": { + "dir": "/var/openlan/cert" + } + } + ], + "tcp": [ + { + "listen": "0.0.0.0:80", + "target": [ + "192.168.100.80:80", + "192.168.100.81:80" + ] + } + ] +} diff --git a/dist/resource/switch.cfg b/dist/resource/switch.cfg new file mode 100755 index 0000000..7a00f4c --- /dev/null +++ b/dist/resource/switch.cfg @@ -0,0 +1,5 @@ +# --- switch.cfg --- +# This file define the configuration for OpenLAN Switch. + +# ESPUDP="4600" +OPTIONS='-log:file /dev/null -conf:dir /etc/openlan/switch' \ No newline at end of file diff --git a/dist/resource/switch.json.example b/dist/resource/switch.json.example new file mode 100755 index 0000000..bc1e9b2 --- /dev/null +++ b/dist/resource/switch.json.example @@ -0,0 +1,11 @@ +{ + "cert": { + "dir": "/var/openlan/cert" + }, + "http": { + "public": "/var/openlan/public" + }, + "crypt": { + "secret": "cb2ff088a34d" + } +} diff --git a/dist/resource/switch.json.full.example b/dist/resource/switch.json.full.example new file mode 100755 index 0000000..e3d6b76 --- /dev/null +++ b/dist/resource/switch.json.full.example @@ -0,0 +1,37 @@ +{ + "protocol": "tls", + "cert": { + "dir": "/var/openlan/cert" + }, + "http": { + "public": "/var/openlan/public" + }, + "crypt": { + "algo": "aes-128", + "secret": "cb2ff088a34d" + }, + "inspect": [ + "neighbor", + "online" + ], + "firewall": [ + { + "table": "nat", + "chain": "PREROUTING", + "protocol": "tcp", + "match": "tcp", + "destination": "0.0.0.0/0", + "dport": 80, + "jump": "DNAT", + "to-destination": "100.44.85.6:80" + } + ], + "ldap": { + "server": "ldap-server.net:389", + "bindDN": "cn=admin,dc=openlan,dc=com", + "password": "your-passowrd", + "baseDN": "dc=openlan,dc=com", + "attribute": "cn", + "filter": "(cn=%s)" + } +} diff --git a/dist/resource/v1024.json.example b/dist/resource/v1024.json.example new file mode 100644 index 0000000..a927258 --- /dev/null +++ b/dist/resource/v1024.json.example @@ -0,0 +1,26 @@ +{ + "name": "v1024", + "provider": "vxlan", + "specifies": { + "vni": 1024, + "fabric": "fabric" + }, + "bridge": { + "address": "192.168.55.1/24" + }, + "subnet": { + "start": "192.168.55.100", + "end": "192.168.55.130" + }, + "dhcp": "enable", + "outputs": [ + { + "vlan": 100, + "interface": "vxlan:3.3.3.5:43" + }, + { + "vlan": 0, + "interface": "gre:3.3.3.3" + } + ] +} diff --git a/dist/script/bridge b/dist/script/bridge new file mode 100755 index 0000000..02ce54c --- /dev/null +++ b/dist/script/bridge @@ -0,0 +1,67 @@ +#!/bin/bash + +## Load configuration. +cfg="eth0.cfg" +if [ $# -ge 1 ]; then + cfg="$1" + shift +fi +if [ -e "$cfg" ]; then + . $cfg +fi + +## Check enviroment. +if [ -z "$PHY" ] || [ -z "$ADDR" ] || [ -z "$PREFIX" ]; then + echo "Variable(PHY|ADDR|PREFIX) is NULL" + exit +fi + +yum install bridge-utils -y + +## Set variable +phy="$PHY" +addr="$ADDR" +prefix="$PREFIX" +br=br-"$PHY" +gw="$GATEWAY" +dns1="$DNS1" + +## Configure script +brCfg=/etc/sysconfig/network-scripts/ifcfg-"$br" +phyCfg=/etc/sysconfig/network-scripts/ifcfg-"$phy" + +## Generate bridge configure +echo "## Generate by OpenLAN project" > $brCfg +echo "STP=\"yes\"" >> $brCfg +echo "DELAY=\"2\"" >> $brCfg +echo "TYPE=\"Bridge\"" >> $brCfg +echo "NAME=\"$br\"" >> $brCfg +echo "DEVICE=\"$br\"" >> $brCfg +echo "BOOTPROTO=\"none\"" >> $brCfg +if [ -n "$addr" ]; then + echo "IPADDR=\"$addr\"" >> $brCfg +fi +if [ -n "$prefix" ]; then + echo "PREFIX=\"$prefix\"" >> $brCfg +fi +if [ -n "$gw" ]; then + echo "GATEWAY=\"$gw\"" >> $brCfg +fi +if [ -n "$dns1" ]; then + echo "DNS1=\"$dns1\"" >> $brCfg +fi +echo "ONBOOT=\"yes\"" >> $brCfg +echo "NM_CONTROLLED=\"no\"" >> $brCfg + +## Generate physical configure +echo "## Generate by OpenLAN project" > $phyCfg +echo "TYPE=\"Ethernet\"" >> $phyCfg +echo "NAME=\"$phy\"" >> $phyCfg +echo "DEVICE=\"$phy\"" >> $phyCfg +echo "BOOTPROTO=\"none\"" >> $phyCfg +echo "ONBOOT=\"yes\"" >> $phyCfg +echo "NM_CONTROLLED=\"no\"" >> $phyCfg +echo "BRIDGE=\"$br\"" >> $phyCfg +echo "BRIDGING_OPTS=\"path_cost=4\"" >> $phyCfg # 4: 1G, 2: 10G, 19: 100Mb and 100: 1Mb. + +ifdown "$br"; ifdown "$phy"; ifup "$br"; ifup "$phy"; diff --git a/dist/script/confd b/dist/script/confd new file mode 100755 index 0000000..3ca9d69 --- /dev/null +++ b/dist/script/confd @@ -0,0 +1,42 @@ +#!/bin/bash + +set -e + +command=$1 + +OVSDB_SERVER_BIN="/usr/bin/env ovsdb-server" +[ "$OVSDB_DATABASE" == "" ] && OVSDB_DATABASE="/etc/openlan/switch/confd.db" +[ "$OVSDB_LOG_FILE" == "" ] && OVSDB_LOG_FILE="/var/openlan/confd.log" +[ "$OVSDB_SOCK" == "" ] && OVSDB_SOCK="/var/openlan/confd.sock" +[ "$OVSDB_PID_FILE" == "" ] && OVSDB_PID_FILE="/var/openlan/confd.pid" + +function stop() { + [ -e "$OVSDB_PID_FILE" ] && kill "$(cat $OVSDB_PID_FILE)" +} + +function start() { + set $OVSDB_SERVER_BIN $OVSDB_DATABASE + set "$@" -vconsole:emer -vsyslog:err -vfile:info + set "$@" --remote=punix:"$OVSDB_SOCK" + set "$@" --log-file="$OVSDB_LOG_FILE" + set "$@" --pidfile="$OVSDB_PID_FILE" + [ "$OVSDB_OPTIONS" != "" ] && set "$@" $OVSDB_OPTIONS + set "$@" --detach + OVS_RUNDIR="/var/openlan" "$@" +} + +case $command in + start) + start + ;; + stop) + stop + ;; + restart) + restart + ;; + *) + echo >&2 "$0: unknown command \"$command\" (start/stop/restart)" + exit 1 + ;; +esac diff --git a/dist/script/eth0.cfg b/dist/script/eth0.cfg new file mode 100755 index 0000000..2785570 --- /dev/null +++ b/dist/script/eth0.cfg @@ -0,0 +1,5 @@ +PHY="eth0" +ADDR="192.168.2.2" +PREFIX="24" +GATEWAY="" +DNS1="" diff --git a/dist/script/ifcfg-veth0 b/dist/script/ifcfg-veth0 new file mode 100644 index 0000000..c79961c --- /dev/null +++ b/dist/script/ifcfg-veth0 @@ -0,0 +1,9 @@ +DEVICE="veth0" +TYPE="veth" +DEVICETYPE="Ethernet" +BOOTPROTO="static" +IPADDR="192.168.3.4" +NETMASK="255.255.255.0" +ONBOOT="no" +NM_CONTROLLED="no" +VETH_PARENT="br0" diff --git a/dist/script/ifdown-veth b/dist/script/ifdown-veth new file mode 100755 index 0000000..a54d351 --- /dev/null +++ b/dist/script/ifdown-veth @@ -0,0 +1,60 @@ +#!/bin/bash +# +# Derived from initscripts-macvlan +# Copyright (C) 2014 Lars Kellogg-Stedman +# +# Adopted for veth by Oleksandr Natalenko +# Copyright (C) 2015 Lanet Network +# +# Based on Network Interface Configuration System +# Copyright (c) 1996-2009 Red Hat, Inc. all rights reserved. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /etc/init.d/functions + +cd /etc/sysconfig/network-scripts +. ./network-functions + +[ -f ../network ] && . ../network + +CONFIG=${1} + +need_config ${CONFIG} + +source_config + +OTHERSCRIPT="/etc/sysconfig/network-scripts/ifdown-${REAL_DEVICETYPE}" + +if [ ! -x ${OTHERSCRIPT} ]; then + OTHERSCRIPT="/etc/sysconfig/network-scripts/ifdown-eth" +fi + +${OTHERSCRIPT} ${CONFIG} + +if [ -n "${VETH_PARENT}" ] && [ -x /usr/sbin/brctl ]; then + ip link set \ + dev ${DEVICE}-bp down + brctl delif -- \ + ${VETH_PARENT} ${DEVICE}-bp + [ -r /var/run/radvd/radvd.pid ] && kill -HUP $(cat /var/run/radvd/radvd.pid) + if [ -d /sys/class/net/${VETH_PARENT}/brif ] && [ $(ls -1 /sys/class/net/${VETH_PARENT}/brif | wc -l) -eq 0 ]; then + ip link set \ + dev ${VETH_PARENT} down + brctl delbr -- ${VETH_PARENT} + fi +fi + +ip link del ${DEVICE} + diff --git a/dist/script/ifup-veth b/dist/script/ifup-veth new file mode 100755 index 0000000..7cf6e98 --- /dev/null +++ b/dist/script/ifup-veth @@ -0,0 +1,78 @@ +#!/bin/bash +# +# Derived from initscripts-macvlan +# Copyright (C) 2014 Lars Kellogg-Stedman +# +# Adopted for veth by Oleksandr Natalenko +# Copyright (C) 2015 Lanet Network +# +# Based on Network Interface Configuration System +# Copyright (c) 1996-2009 Red Hat, Inc. all rights reserved. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +. /etc/init.d/functions + +cd /etc/sysconfig/network-scripts +. ./network-functions + +[ -f ../network ] && . ../network + +CONFIG=${1} + +need_config ${CONFIG} + +source_config + +OTHERSCRIPT="/etc/sysconfig/network-scripts/ifup-${REAL_DEVICETYPE}" + +if [ ! -x ${OTHERSCRIPT} ]; then + OTHERSCRIPT="/etc/sysconfig/network-scripts/ifup-eth" +fi + +ip link add \ + name ${DEVICE} \ + type veth \ + peer name ${DEVICE}-bp + +if [ -n "${VETH_MAC}" ]; then + ip link set \ + dev ${DEVICE} \ + address ${VETH_MAC} +fi + +if [ -n "${VETH_PARENT}" ] && [ -x /usr/sbin/brctl ]; then + if [ ! -d /sys/class/net/${VETH_PARENT}/bridge ]; then + brctl addbr -- \ + ${VETH_PARENT} 2>/dev/null + ip link set \ + dev ${VETH_PARENT} up + fi + ip addr flush dev ${DEVICE}-bp 2>/dev/null + ip link set \ + dev ${DEVICE}-bp up + ethtool_set + [ -n "${LINKDELAY}" ] && /bin/sleep ${LINKDELAY} + brctl addif -- \ + ${VETH_PARENT} ${DEVICE}-bp + for arg in $BRIDGING_OPTS ; do + key=${arg%%=*}; + value=${arg##*=}; + echo $value > /sys/class/net/${DEVICE}-bp/brport/$key + done + [ -r /var/run/radvd/radvd.pid ] && kill -HUP $(cat /var/run/radvd/radvd.pid) +fi + +${OTHERSCRIPT} ${CONFIG} + diff --git a/dist/script/install.sh b/dist/script/install.sh new file mode 100755 index 0000000..96d0308 --- /dev/null +++ b/dist/script/install.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +set -e + +tmp="" +installer="$0" +archive=$(grep -a -n "__ARCHIVE_BELOW__:$" $installer | cut -f1 -d:) + +function download() { + tmp=$(mktemp -d) + tail -n +$((archive + 1)) $installer | gzip -dc - | tar -xf - -C $tmp +} + +function requires() { + local os=$(cat /etc/os-release | grep ^ID= | sed 's/"//g') + if echo $os | grep -q -e centos -e redhat; then + yum install -y net-tools iptables iputils openvpn openssl openvswitch dnsmasq + elif echo $os | grep -q -e debian -e ubuntu; then + apt-get install -y net-tools iptables iproute2 openvpn openssl openvswitch-switch dnsmasq + fi +} + +function install() { + local source=$(find $tmp -name 'openlan-linux-*') + cd $source && { + /usr/bin/env \cp -rf ./{etc,usr,var} / + /usr/bin/env find ./ -type f > /usr/share/openlan.db + } +} + +function post() { + [ -e "/etc/openlan/switch/switch.json" ] || { + cp -rf /etc/openlan/switch/switch.json.example /etc/openlan/switch/switch.json + } + [ -e "/var/openlan/openvpn/dh.pem" ] || { + openssl dhparam -out /var/openlan/openvpn/dh.pem 2048 + } + [ -e "/var/openlan/openvpn/ta.key" ] || { + openvpn --genkey --secret /var/openlan/openvpn/ta.key + } + [ -e "/etc/openlan/switch/confd.db" ] || { + /usr/bin/ovsdb-tool create /etc/openlan/switch/confd.db /etc/openlan/switch/confd.schema.json + } + [ -e "/var/openlan/confd.sock" ] && { + /usr/bin/ovsdb-client convert unix:///var/openlan/confd.sock /etc/openlan/switch/confd.schema.json + } + sysctl -p /etc/sysctl.d/90-openlan.conf +} + +function finish() { + rm -rf $tmp + systemctl daemon-reload + echo "success" +} + +download +requires +install +post +finish +exit 0 diff --git a/dist/script/prepare b/dist/script/prepare new file mode 100755 index 0000000..10cb17f --- /dev/null +++ b/dist/script/prepare @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e + +systemctl disable firewalld +systemctl stop firewalld + +systemctl disable NetworkManager +systemctl stop NetworkManager + +systemctl enable network +systemctl start network + +cat > /etc/sysconfig/selinux <> /etc/sysctl.conf < ~/rpmbuild/SOURCES/$package-source.tar.gz + rm -rf $tmp +fi diff --git a/docs/central.md b/docs/central.md new file mode 100755 index 0000000..0ebe678 --- /dev/null +++ b/docs/central.md @@ -0,0 +1,130 @@ +# Central Branch Example + +## Topology + +``` + OLSW(Central) - 10.16.1.10/24 + ^ + | + Wifi(DNAT) + | + | + +---------------------Internet-----------------------+ + ^ ^ ^ + | | | + Branch1 Branch2 Branch3 + | | | + OLAP1 OLAP2 OLAP3 + 10.16.1.11/24 10.16.1.12/24 10.16.1.13/24 + +``` + +## Configure OLSW + +生成预共享密钥: + +``` +[root@olsw ~]# uuidgen +e108fe36-a2cd-43bc-82e2-f367aa429ed2 +[root@olsw ~]# +``` + +交换机配置: + +``` +[root@olsw ~]# cd /etc/openlan/switch +[root@olsw ~]# cat > switch.json < central.json < central.json <:10000/get/network/example/tcp1194.ovpn + ## 替换access-ip为公网IP地址 + ``` +8. 添加一个新的接入认证的用户; + ``` + openlan us add --name hi@example ## <用户名>@<网络> + openlan us ls | grep example ## 查看随机密码 + hi@example l6llot97yxulsw1qqbm07vn1 guest ## <用户名>@<网络> 密码 角色 租期 + + openlan us rm --name hi@example ## 删除一个用户 + ``` +## OpenLAN Point + +同样的您也可以在centos7上通过下面步骤部署openlan point软件: +1. 使用yum安装openlan point软件; + ``` + yum install -y https://github.com/luscis/openlan/releases/download/v5.6.4/openlan-point-5.6.4-1.el7.x86_64.rpm + ``` +2. 添加一个新的网络配置; + ``` + cd /etc/openlan + cp point.json.example example.json + vim example.json ## <网络名>.json + { + "protocol": "tcp", ## 同上 + "crypt": { ## 同上 + "algo": "aes-128", + "secret": "ea64d5b0c96c" + }, + "connection": "example.net", ## 默认端口10002,格式:: + "username": "hi@example", ## <用户名>@<网络> + "password": "l6llot97yxulsw1qqbm07vn1" ## 认证的密码 + } + cat example.json | python -m json.tool ## 配置预检查 + ``` +3. 配置openlan point服务自启动; + ``` + systemctl enable openlan-point@example + systemctl start openlan-point@example + journalctl -u openlan-point@example ## 查看日志信息 + ``` +4. 检测网络是否可达; + ``` + ping 172.32.10.10 -c 3 + pint 192.168.10.1 -c 3 + ``` diff --git a/docs/ipsec.md b/docs/ipsec.md new file mode 100755 index 0000000..3b5d8bb --- /dev/null +++ b/docs/ipsec.md @@ -0,0 +1,58 @@ +Topology +======== +We use 192.168.7.0/24 as underlay network for IPSec. And S1 has public address with 192.168.7.11, C1 and C2 under firewall without public address. + + +----+ + | s1 | -- .10.1/24 + +----+ + / \ + / \ + / \ + +----+ +----+ + 192.168.2.0/24 -- | c2 | | c3 | -- 192.168.3.0/24 + +----+ +----+ + | | + .10.2/32 .10.3/32 + +Server +====== +``` +$ openlan network add --name ipsec --provider esp --address 10.10.10.1/24 +$ openlan link add --network ipsec --device spi:12 --remote-address 10.10.10.2 +$ openlan link add --network ipsec --device spi:13 --remote-address 10.10.10.3 +``` +``` +$ openlan route add --network ipsec --prefix 192.168.2.0/24 --gateway spi:12 +$ openlan route add --network ipsec --prefix 192.168.3.0/24 --gateway spi:13 +``` + +Client +====== + +C2 +-- +``` +$ openlan network add --name ipsec --provider esp --address 10.10.10.2 +$ openlan link add --network ipsec --connection udp:192.168.7.11 --device spi:12 --remote-address 10.10.10.1/24 +$ openlan link ls +``` +``` +$ ping 10.10.10.1 +``` +``` +$ openlan route add --network ipsec --prefix 192.168.3.0/24 --gateway spi:12 +``` + +C3 +-- + +``` +$ openlan network add --name ipsec --provider esp --address 10.10.10.3 +$ openlan link add --network ipsec --connection udp:192.168.7.11 --device spi:13 --remote-address 10.10.10.1/24 +``` +``` +$ ping 10.10.10.2 +``` +``` +$ openlan route add --network ipsec --prefix 192.168.2.0/24 --gateway spi:13 +``` \ No newline at end of file diff --git a/docs/multiarea.md b/docs/multiarea.md new file mode 100755 index 0000000..68b1d2a --- /dev/null +++ b/docs/multiarea.md @@ -0,0 +1,146 @@ +# Multiple Area Example + +## Topology + +``` + 192.168.1.20/24 192.168.1.21/24 + | | + OLAP1 -- Hotal Wifi --> OLSW(NJ) <--- Other Wifi --- OLAP2 + | + | + Internet + | + | + OLSW(SH) - 192.168.1.10/24 + | + | + +------------------------+---------------------------+ + ^ ^ ^ + | | | + Office Wifi Home Wifi Hotal Wifi + | | | + OLAP3 OLAP4 OLAP5 + 192.168.1.11/24 192.168.1.12/24 192.168.1.13/24 +``` + +## Configure OLSW for Nanjing + +配置交换机: + +``` +[root@olsw-nj ~]# cd /etc/openlan/switch +[root@olsw-nj ~]# cat > switch.json < private.json < switch.json < private.json < github.com/sirupsen/logrus v1.8.1 + golang.org/x/crypto => github.com/golang/crypto v0.0.0-20200604202706-70a84ac30bf9 + golang.org/x/net => github.com/golang/net v0.0.0-20190812203447-cdfb69ac37fc + golang.org/x/sys => github.com/golang/sys v0.0.0-20190209173611-3b5209105503 + golang.org/x/time => github.com/golang/time v0.0.0-20210220033141-f8bda1e9f3ba +) + +exclude github.com/sirupsen/logrus v1.8.1 + +require ( + github.com/Sirupsen/logrus v0.0.0-00010101000000-000000000000 // indirect + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 + github.com/chzyer/logex v1.2.0 // indirect + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e + github.com/chzyer/test v0.0.0-20210722231415-061457976a23 // indirect + github.com/coreos/go-systemd/v22 v22.3.2 + github.com/danieldin95/go-openvswitch v0.0.5 + github.com/docker/libnetwork v0.5.6 // indirect + github.com/go-ldap/ldap v3.0.3+incompatible + github.com/go-logr/logr v1.1.0 + github.com/go-logr/stdr v1.1.0 + github.com/godbus/dbus v4.1.0+incompatible // indirect + github.com/gorilla/mux v1.8.0 + github.com/moby/libnetwork v0.5.6 + github.com/ovn-org/libovsdb v0.6.1-0.20220127023511-a619f0fd93be + github.com/shadowsocks/go-shadowsocks2 v0.1.5 + github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 + github.com/stretchr/testify v1.7.0 + github.com/urfave/cli/v2 v2.3.0 + github.com/vishvananda/netlink v1.1.0 + github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df + github.com/xtaci/kcp-go/v5 v5.6.1 + golang.org/x/net v0.0.0-20210525063256-abc453219eb5 + golang.org/x/time v0.0.0-00010101000000-000000000000 + gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect + gopkg.in/yaml.v2 v2.4.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..b3eeed3 --- /dev/null +++ b/go.sum @@ -0,0 +1,312 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/cenk/hub v1.0.1 h1:RBwXNOF4a8KjD8BJ08XqN8KbrqaGiQLDrgvUGJSHuPA= +github.com/cenk/hub v1.0.1/go.mod h1:rJM1LNAW0ppT8FMMuPK6c2NP/R2nH/UthtuRySSaf6Y= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/hub v1.0.1 h1:UMtjc6dHSaOQTO15SVA50MBIR9zQwvsukQupDrkIRtg= +github.com/cenkalti/hub v1.0.1/go.mod h1:tcYwtS3a2d9NO/0xDXVJWx3IedurUjYCqFCmpi0lpHs= +github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984 h1:CNwZyGS6KpfaOWbh2yLkSy3rSTUh3jub9CzpFpP6PVQ= +github.com/cenkalti/rpc2 v0.0.0-20210604223624-c1acbc6ec984/go.mod h1:v2npkhrXyk5BCnkNIiPdRI23Uq6uWPUQGL2hnRcRr/M= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/chzyer/logex v1.2.0 h1:+eqR0HfOetur4tgnC8ftU5imRnhi4te+BadWS95c5AM= +github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23 h1:dZ0/VyGgQdVGAss6Ju0dt5P0QltE0SFY5Woh6hbIfiQ= +github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/danieldin95/go-openvswitch v0.0.5 h1:Ssd6MO6WPGIuIOGFt5Qp8lk9JDqiQLsvJ6lPeGfV9WY= +github.com/danieldin95/go-openvswitch v0.0.5/go.mod h1:e3VRiIjGokniZ4h9q4q3Yi/kcwbxpV5EeoVA7OdKlec= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/cli v20.10.8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.5.6 h1:hnGiypBsZR6PW1I8lqaBHh06U6LCJbI3IhOvfsZiymY= +github.com/docker/libnetwork v0.5.6/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-ldap/ldap v3.0.3+incompatible h1:HTeSZO8hWMS1Rgb2Ziku6b8a7qRIZZMHjsvuZyatzwk= +github.com/go-ldap/ldap v3.0.3+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.1.0 h1:nAbevmWlS2Ic4m4+/An5NXkaGqlqpbBgdcuThZxnZyI= +github.com/go-logr/logr v1.1.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.1.0 h1:WSypkOvL7AfqHep42aGGLagyxUjJCQFFs/2nIitlVTQ= +github.com/go-logr/stdr v1.1.0/go.mod h1:Xff/PTSzmJ+zDsu/KDy4l6Axizfso1w7QcxLnWTdto4= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4= +github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/crypto v0.0.0-20200604202706-70a84ac30bf9 h1:lIQUCEYYutFXVbIXlWWJUOtinadHOl9N6P84tMjXLQU= +github.com/golang/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +github.com/golang/net v0.0.0-20190812203447-cdfb69ac37fc h1:ANlSYw21rkVMLEZGosY7p2hFSi/JitYbABaYmw7Tv4A= +github.com/golang/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:98y8FxUyMjTdJ5eOj/8vzuiVO14/dkJ98NYhEPG8QGY= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/sys v0.0.0-20190209173611-3b5209105503 h1:KkzKRheGyjoZo7gN0XfHPvHPiigTohQEshRskFvbq6s= +github.com/golang/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:5JyrLPvD/ZdaYkT7IqKhsP5xt7aLjA99KXRtk4EIYDk= +github.com/golang/time v0.0.0-20210220033141-f8bda1e9f3ba h1:xgei/lBA0MICqy4kX0+HHp9N3aFDmulXmfDG4mvhA+c= +github.com/golang/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= +github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= +github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= +github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= +github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= +github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= +github.com/klauspost/reedsolomon v1.9.9 h1:qCL7LZlv17xMixl55nq2/Oa1Y86nfO8EqDfv2GHND54= +github.com/klauspost/reedsolomon v1.9.9/go.mod h1:O7yFFHiQwDR6b2t63KPUpccPtNdp5ADgh1gg4fd12wo= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v0.0.0-20180327071824-d34b9ff171c2/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= +github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= +github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= +github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= +github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= +github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104 h1:ULR/QWMgcgRiZLUjSSJMU+fW+RDMstRdmnDWj9Q+AsA= +github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104/go.mod h1:wqKykBG2QzQDJEzvRkcS8x6MiSJkF52hXZsXcjaB3ls= +github.com/moby/libnetwork v0.5.6 h1:pDuZwsc3ViMrIi0i4vrTtsgBN1+8ReJY6vAGnDCZ8AI= +github.com/moby/libnetwork v0.5.6/go.mod h1:RQTqDxGZChsPHosY8R3ZL2THYWUuW8X5SRhiBNoTY5I= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/ory/dockertest/v3 v3.8.0/go.mod h1:9zPATATlWQru+ynXP+DytBQrsXV7Tmlx7K86H6fQaDo= +github.com/ovn-org/libovsdb v0.6.1-0.20220127023511-a619f0fd93be h1:U8WVtNNTfBKj/5OE3uBe57oNJ+x5KSMl+3hM7iLSbdk= +github.com/ovn-org/libovsdb v0.6.1-0.20220127023511-a619f0fd93be/go.mod h1:aLvY7gPs/vLyJXF+PpZzvWlR5LB4QNJvBYIQMskJLZk= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/riobard/go-bloom v0.0.0-20200614022211-cdc8013cb5b3 h1:f/FNXud6gA3MNr8meMVVGxhp+QBTqY91tM8HjEuMjGg= +github.com/riobard/go-bloom v0.0.0-20200614022211-cdc8013cb5b3/go.mod h1:HgjTstvQsPGkxUsCd2KWxErBblirPizecHcpD3ffK+s= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shadowsocks/go-shadowsocks2 v0.1.5 h1:PDSQv9y2S85Fl7VBeOMF9StzeXZyK1HakRm86CUbr28= +github.com/shadowsocks/go-shadowsocks2 v0.1.5/go.mod h1:AGGpIoek4HRno4xzyFiAtLHkOpcoznZEkAccaI/rplM= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8= +github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk= +github.com/templexxx/cpu v0.0.7 h1:pUEZn8JBy/w5yzdYWgx+0m0xL9uk6j4K91C5kOViAzo= +github.com/templexxx/cpu v0.0.7/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk= +github.com/templexxx/xorsimd v0.4.1 h1:iUZcywbOYDRAZUasAs2eSCUW8eobuZDy0I9FJiORkVg= +github.com/templexxx/xorsimd v0.4.1/go.mod h1:W+ffZz8jJMH2SXwuKu9WhygqBMbFnp14G2fqEr8qaNo= +github.com/tjfoc/gmsm v1.3.2 h1:7JVkAn5bvUJ7HtU08iW6UiD+UTmJTIToHCfeFzkcCxM= +github.com/tjfoc/gmsm v1.3.2/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= +github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xtaci/kcp-go/v5 v5.6.1 h1:Pwn0aoeNSPF9dTS7IgiPXn0HEtaIlVb6y5UKWPsx8bI= +github.com/xtaci/kcp-go/v5 v5.6.1/go.mod h1:W3kVPyNYwZ06p79dNwFWQOVFrdcBpDBsdyvK8moQrYo= +github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae h1:J0GxkO96kL4WF+AIT3M4mfUVinOCPgf2uUWYFUzN0sM= +github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +golang.org/x/arch v0.0.0-20190909030613-46d78d1859ac/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200425043458-8463f397d07c/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200808161706-5bf02b21f123/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/misc/learn/append.go b/misc/learn/append.go new file mode 100755 index 0000000..dcb267d --- /dev/null +++ b/misc/learn/append.go @@ -0,0 +1,42 @@ +package main + +import "fmt" + +func main() { + var a = []int{1, 2, 3} + + fmt.Println(a) + a0 := append(a, []int{4, 5, 6}...) + a0[0] = 9 + a1 := append(a, []int{7, 8}...) + fmt.Println(a, a0) + fmt.Println(a, a1) + + a0 = append(a[:3], []int{4, 5, 6}...) + a0[0] = 9 + a1 = append(a[:3], []int{7, 8}...) + fmt.Println(a, a0) + fmt.Println(a, a1) + + a = make([]int, 0, 1024) + + b := append(a, []int{4, 5, 6}...) + fmt.Println(b, a) + //fmt.Println(cap(b), len(b)) + //fmt.Println(cap(a), len(a)) + + c := append(b, []int{8, 9}...) + c[1] = 10 + b[0] = 9 + fmt.Println(c, b) + //fmt.Println(cap(c), len(c)) + //fmt.Println(cap(b), len(b)) + + bb := b + b = append(b, []int{8, 9}...) + bb[2] = 11 + fmt.Println(b, bb) + //fmt.Println(cap(bb), len(bb)) + //fmt.Println(cap(b), len(b)) + +} diff --git a/misc/learn/array.go b/misc/learn/array.go new file mode 100755 index 0000000..918d15d --- /dev/null +++ b/misc/learn/array.go @@ -0,0 +1,38 @@ +package main + +import ( + "encoding/json" + "fmt" +) + +func InArray(data []int) { + data[0] = 0x04 + fmt.Println(data) +} +func main() { + var a = []int{1, 2, 3} + + fmt.Println(a) + InArray(a) + fmt.Println(a) + + b := a + a[1] = 5 + fmt.Println(a, b) + + b[1] = 6 + fmt.Println(a, b) + + c := a[1:] + a[2] = 10 + fmt.Println(a, c) + + c[1] = 11 + fmt.Println(a, c) + + var aa []int + str := `[1, 2, 3]` + err := json.Unmarshal([]byte(str), &aa) + fmt.Println(err) + fmt.Println(aa) +} diff --git a/misc/learn/channel.go b/misc/learn/channel.go new file mode 100755 index 0000000..ab3b583 --- /dev/null +++ b/misc/learn/channel.go @@ -0,0 +1,10 @@ +package main + +func main() { + ch := make(chan int, 2) + + ch <- 1 + ch <- 2 + ch <- 3 + +} diff --git a/misc/learn/china/china.list b/misc/learn/china/china.list new file mode 100755 index 0000000..3df4826 --- /dev/null +++ b/misc/learn/china/china.list @@ -0,0 +1,6073 @@ +1.0.1.0/24 +1.0.2.0/23 +1.0.8.0/21 +1.0.32.0/19 +1.1.0.0/24 +1.1.2.0/23 +1.1.4.0/22 +1.1.8.0/21 +1.1.16.0/20 +1.1.32.0/19 +1.2.0.0/23 +1.2.2.0/24 +1.2.4.0/22 +1.2.8.0/21 +1.2.16.0/20 +1.2.32.0/19 +1.2.64.0/18 +1.3.0.0/16 +1.4.1.0/24 +1.4.2.0/23 +1.4.4.0/22 +1.4.8.0/21 +1.4.16.0/20 +1.4.32.0/19 +1.4.64.0/18 +1.8.0.0/16 +1.10.0.0/21 +1.10.8.0/23 +1.10.11.0/24 +1.10.12.0/22 +1.10.16.0/20 +1.10.32.0/19 +1.10.64.0/18 +1.12.16.0/20 +1.12.32.0/23 +1.12.36.0/22 +1.12.40.0/21 +1.12.48.0/20 +1.12.64.0/18 +1.12.128.0/17 +1.13.0.0/16 +1.14.0.0/15 +1.18.128.0/24 +1.24.0.0/13 +1.45.0.0/16 +1.48.0.0/14 +1.56.0.0/13 +1.68.0.0/14 +1.80.0.0/12 +1.116.0.0/15 +1.118.1.0/24 +1.118.2.0/23 +1.118.4.0/22 +1.118.8.0/21 +1.118.16.0/20 +1.118.33.0/24 +1.118.34.0/23 +1.118.36.0/22 +1.118.40.0/21 +1.118.48.0/20 +1.118.64.0/18 +1.118.128.0/17 +1.119.0.0/16 +1.180.0.0/14 +1.184.0.0/15 +1.188.0.0/14 +1.192.0.0/13 +1.202.0.0/15 +1.204.0.0/14 +3.5.214.0/23 +8.128.0.0/10 +14.0.0.0/21 +14.0.12.0/22 +14.1.0.0/22 +14.1.24.0/22 +14.1.108.0/22 +14.16.0.0/12 +14.102.128.0/22 +14.102.180.0/22 +14.103.0.0/16 +14.104.0.0/13 +14.112.0.0/12 +14.130.0.0/15 +14.134.0.0/15 +14.144.0.0/12 +14.192.60.0/22 +14.192.76.0/22 +14.196.0.0/15 +14.204.0.0/15 +14.208.0.0/12 +15.230.41.0/24 +15.230.49.0/24 +15.230.141.0/24 +20.139.160.0/20 +20.249.255.0/24 +20.251.0.0/22 +27.0.128.0/22 +27.0.132.0/24 +27.0.134.0/23 +27.0.160.0/21 +27.0.188.0/22 +27.0.204.0/22 +27.0.208.0/21 +27.8.0.0/13 +27.16.0.0/12 +27.34.232.0/21 +27.36.0.0/14 +27.40.0.0/13 +27.50.40.0/21 +27.50.128.0/17 +27.54.72.0/21 +27.54.152.0/21 +27.54.192.0/18 +27.98.208.0/20 +27.98.224.0/19 +27.99.128.0/17 +27.103.0.0/16 +27.106.128.0/18 +27.106.204.0/22 +27.109.32.0/19 +27.109.124.0/22 +27.112.0.0/18 +27.112.80.0/20 +27.112.112.0/21 +27.113.128.0/18 +27.115.0.0/17 +27.116.44.0/22 +27.121.72.0/21 +27.121.120.0/21 +27.128.0.0/15 +27.131.220.0/22 +27.144.0.0/16 +27.148.0.0/14 +27.152.0.0/13 +27.184.0.0/13 +27.192.0.0/11 +27.224.0.0/14 +36.0.0.0/22 +36.0.16.0/20 +36.0.32.0/19 +36.0.64.0/18 +36.0.128.0/17 +36.1.0.0/16 +36.4.0.0/14 +36.16.0.0/12 +36.32.0.0/14 +36.36.0.0/16 +36.37.0.0/19 +36.37.36.0/23 +36.37.39.0/24 +36.37.40.0/21 +36.37.48.0/20 +36.40.0.0/13 +36.48.0.0/15 +36.51.0.0/17 +36.51.128.0/18 +36.51.192.0/19 +36.51.224.0/20 +36.51.240.0/21 +36.51.248.0/24 +36.51.250.0/23 +36.51.252.0/23 +36.56.0.0/13 +36.96.0.0/11 +36.128.0.0/10 +36.192.0.0/11 +36.248.0.0/14 +36.254.0.0/16 +36.255.116.0/22 +36.255.128.0/22 +36.255.164.0/22 +36.255.172.0/22 +36.255.176.0/22 +38.142.239.114/31 +39.0.0.0/24 +39.0.2.0/23 +39.0.4.0/22 +39.0.8.0/21 +39.0.16.0/20 +39.0.32.0/19 +39.0.64.0/18 +39.0.128.0/17 +39.64.0.0/11 +39.96.0.0/16 +39.97.0.0/17 +39.97.128.0/18 +39.97.192.0/21 +39.97.200.0/22 +39.97.208.0/20 +39.97.224.0/19 +39.98.0.0/15 +39.100.0.0/14 +39.104.0.0/14 +39.108.0.0/16 +39.109.120.0/23 +39.128.0.0/10 +40.0.176.0/20 +40.0.248.0/21 +40.72.0.0/15 +40.77.136.112/28 +40.77.236.224/27 +40.77.254.64/27 +40.125.128.0/17 +40.126.64.0/18 +40.198.10.0/24 +40.198.16.0/21 +40.198.24.0/23 +40.251.225.0/24 +40.251.227.0/24 +42.0.0.0/22 +42.0.8.0/21 +42.0.16.0/21 +42.0.24.0/22 +42.0.32.0/19 +42.0.128.0/17 +42.1.0.0/19 +42.1.32.0/20 +42.1.48.0/21 +42.1.56.0/22 +42.4.0.0/14 +42.48.0.0/13 +42.56.0.0/14 +42.62.0.0/17 +42.62.128.0/19 +42.62.160.0/20 +42.62.180.0/22 +42.62.184.0/21 +42.63.0.0/16 +42.80.0.0/15 +42.83.64.0/20 +42.83.80.0/22 +42.83.88.0/21 +42.83.96.0/19 +42.83.128.0/23 +42.83.134.0/24 +42.83.138.0/23 +42.83.140.0/22 +42.83.144.0/20 +42.83.160.0/19 +42.83.192.0/18 +42.84.0.0/14 +42.88.0.0/13 +42.96.64.0/19 +42.96.96.0/21 +42.96.108.0/22 +42.96.112.0/20 +42.96.128.0/17 +42.97.0.0/16 +42.99.0.0/18 +42.99.64.0/19 +42.99.96.0/20 +42.99.112.0/22 +42.99.120.0/21 +42.100.0.0/14 +42.120.0.0/15 +42.122.0.0/16 +42.123.0.0/19 +42.123.36.0/22 +42.123.40.0/21 +42.123.48.0/20 +42.123.64.0/18 +42.123.128.0/17 +42.128.0.0/12 +42.156.0.0/19 +42.156.36.0/22 +42.156.40.0/21 +42.156.48.0/20 +42.156.64.0/18 +42.156.128.0/17 +42.157.0.0/21 +42.157.8.0/22 +42.157.14.0/23 +42.157.16.0/20 +42.157.32.0/19 +42.157.64.0/18 +42.157.128.0/17 +42.158.0.0/15 +42.160.0.0/12 +42.176.0.0/13 +42.184.0.0/15 +42.186.0.0/16 +42.187.0.0/18 +42.187.64.0/19 +42.187.96.0/20 +42.187.112.0/21 +42.187.120.0/22 +42.187.128.0/17 +42.192.0.0/13 +42.201.0.0/17 +42.202.0.0/15 +42.204.0.0/14 +42.208.0.0/12 +42.224.0.0/12 +42.240.0.0/16 +42.242.0.0/15 +42.244.0.0/15 +42.246.0.0/16 +42.247.0.0/22 +42.247.4.0/24 +42.247.5.0/25 +42.247.5.128/26 +42.247.5.204/30 +42.247.5.208/28 +42.247.5.224/27 +42.247.6.0/23 +42.247.8.0/21 +42.247.16.0/20 +42.247.32.0/19 +42.247.64.0/18 +42.247.128.0/17 +42.248.0.0/13 +43.130.128.0/17 +43.131.128.0/17 +43.132.4.0/22 +43.132.8.0/21 +43.132.16.0/20 +43.132.32.0/19 +43.132.64.0/18 +43.132.128.0/17 +43.133.0.0/16 +43.134.0.0/15 +43.136.0.0/13 +43.144.0.0/12 +43.160.0.0/11 +43.224.12.0/22 +43.224.24.0/22 +43.224.44.0/22 +43.224.52.0/22 +43.224.56.0/22 +43.224.68.0/22 +43.224.72.0/22 +43.224.80.0/22 +43.224.100.0/22 +43.224.144.0/22 +43.224.161.0/24 +43.224.176.0/22 +43.224.184.0/22 +43.224.200.0/21 +43.224.208.0/21 +43.224.216.0/22 +43.224.240.0/22 +43.225.76.0/22 +43.225.86.0/24 +43.225.120.0/22 +43.225.180.0/22 +43.225.208.0/22 +43.225.216.0/21 +43.225.224.0/20 +43.225.240.0/21 +43.225.252.0/22 +43.226.32.0/19 +43.226.64.0/19 +43.226.96.0/20 +43.226.112.0/21 +43.226.120.0/22 +43.226.128.0/19 +43.226.160.0/21 +43.226.236.0/22 +43.226.240.0/20 +43.227.0.0/21 +43.227.8.0/22 +43.227.32.0/19 +43.227.64.0/19 +43.227.104.0/22 +43.227.136.0/21 +43.227.144.0/22 +43.227.152.0/21 +43.227.160.0/20 +43.227.176.0/21 +43.227.188.0/22 +43.227.192.0/19 +43.227.232.0/22 +43.227.248.0/21 +43.228.0.0/18 +43.228.64.0/21 +43.228.76.0/22 +43.228.100.0/22 +43.228.116.0/24 +43.228.118.0/23 +43.228.132.0/22 +43.228.136.0/22 +43.228.148.0/22 +43.228.152.0/22 +43.228.188.0/22 +43.229.40.0/22 +43.229.48.0/23 +43.229.50.0/24 +43.229.51.0/26 +43.229.51.64/27 +43.229.51.96/30 +43.229.51.101/32 +43.229.51.102/31 +43.229.51.104/29 +43.229.51.112/28 +43.229.51.128/25 +43.229.56.0/22 +43.229.96.0/22 +43.229.136.0/21 +43.229.168.0/21 +43.229.176.0/20 +43.229.192.0/21 +43.229.216.0/21 +43.229.232.0/21 +43.230.20.0/22 +43.230.32.0/22 +43.230.68.0/22 +43.230.72.0/22 +43.230.84.0/23 +43.230.124.0/22 +43.230.220.0/22 +43.230.224.0/19 +43.231.32.0/20 +43.231.80.0/20 +43.231.96.0/20 +43.231.136.0/21 +43.231.144.0/20 +43.231.160.0/20 +43.231.176.0/21 +43.236.0.0/15 +43.238.0.0/16 +43.239.0.0/19 +43.239.32.0/20 +43.239.48.0/22 +43.239.116.0/22 +43.239.120.0/22 +43.239.172.0/22 +43.240.0.0/22 +43.240.56.0/21 +43.240.68.0/22 +43.240.72.0/21 +43.240.84.0/22 +43.240.124.0/22 +43.240.128.0/21 +43.240.136.0/22 +43.240.156.0/22 +43.240.160.0/19 +43.240.192.0/19 +43.240.240.0/20 +43.241.0.0/20 +43.241.16.0/21 +43.241.48.0/22 +43.241.76.0/22 +43.241.80.0/20 +43.241.112.0/22 +43.241.168.0/21 +43.241.176.0/21 +43.241.184.0/22 +43.241.208.0/20 +43.241.224.0/20 +43.241.240.0/22 +43.241.248.0/22 +43.242.8.0/21 +43.242.16.0/20 +43.242.48.0/22 +43.242.53.0/24 +43.242.54.0/24 +43.242.56.0/21 +43.242.64.0/22 +43.242.72.0/21 +43.242.80.0/20 +43.242.96.0/22 +43.242.144.0/20 +43.242.160.0/21 +43.242.180.0/22 +43.242.188.0/22 +43.242.192.0/21 +43.242.204.0/22 +43.242.216.0/21 +43.242.252.0/22 +43.243.4.0/22 +43.243.8.0/21 +43.243.16.0/22 +43.243.88.0/22 +43.243.128.0/22 +43.243.136.0/22 +43.243.144.0/21 +43.243.156.0/22 +43.243.180.0/22 +43.243.228.0/22 +43.243.232.0/22 +43.243.244.0/22 +43.246.0.0/18 +43.246.64.0/19 +43.246.96.0/22 +43.246.147.0/24 +43.246.148.0/24 +43.246.150.0/24 +43.246.151.0/30 +43.246.151.4/32 +43.246.151.6/31 +43.246.151.8/29 +43.246.151.16/28 +43.246.151.32/27 +43.246.151.64/26 +43.246.151.128/25 +43.246.228.0/22 +43.247.4.0/22 +43.247.8.0/22 +43.247.44.0/22 +43.247.48.0/22 +43.247.68.0/22 +43.247.76.0/22 +43.247.84.0/22 +43.247.88.0/21 +43.247.96.0/21 +43.247.108.0/22 +43.247.112.0/22 +43.247.148.0/22 +43.247.152.0/22 +43.247.176.0/20 +43.247.196.0/22 +43.247.200.0/21 +43.247.208.0/20 +43.247.224.0/19 +43.248.0.0/21 +43.248.20.0/22 +43.248.28.0/22 +43.248.48.0/22 +43.248.76.0/22 +43.248.80.0/20 +43.248.96.0/19 +43.248.128.0/20 +43.248.144.0/21 +43.248.176.0/20 +43.248.192.0/20 +43.248.208.0/22 +43.248.228.0/22 +43.248.232.0/22 +43.248.244.0/22 +43.249.4.0/22 +43.249.120.0/22 +43.249.132.0/22 +43.249.136.0/22 +43.249.144.0/20 +43.249.160.0/21 +43.249.168.0/22 +43.249.192.0/22 +43.249.236.0/22 +43.250.4.0/22 +43.250.12.0/22 +43.250.16.0/21 +43.250.28.0/22 +43.250.32.0/22 +43.250.96.0/21 +43.250.108.0/22 +43.250.112.0/22 +43.250.118.0/23 +43.250.128.0/22 +43.250.144.0/21 +43.250.160.0/22 +43.250.168.0/22 +43.250.176.0/22 +43.250.200.0/22 +43.250.212.0/22 +43.250.216.0/21 +43.250.236.0/22 +43.250.244.0/22 +43.251.4.0/22 +43.251.36.0/22 +43.251.69.0/24 +43.251.70.0/24 +43.251.149.0/24 +43.251.192.0/22 +43.251.232.0/22 +43.251.244.0/22 +43.252.48.0/22 +43.252.56.0/22 +43.252.224.0/22 +43.254.0.0/21 +43.254.8.0/22 +43.254.24.0/22 +43.254.36.0/22 +43.254.44.0/22 +43.254.52.0/22 +43.254.64.0/22 +43.254.72.0/22 +43.254.84.0/22 +43.254.88.0/21 +43.254.100.0/22 +43.254.104.0/22 +43.254.112.0/21 +43.254.128.0/22 +43.254.136.0/21 +43.254.144.0/20 +43.254.168.0/21 +43.254.180.0/22 +43.254.184.0/21 +43.254.192.0/22 +43.254.200.0/22 +43.254.208.0/22 +43.254.220.0/22 +43.254.224.0/20 +43.254.240.0/22 +43.254.248.0/21 +43.255.0.0/21 +43.255.8.0/22 +43.255.16.0/22 +43.255.48.0/22 +43.255.64.0/20 +43.255.84.0/22 +43.255.96.0/22 +43.255.144.0/22 +43.255.176.0/22 +43.255.184.0/22 +43.255.192.0/22 +43.255.200.0/21 +43.255.208.0/21 +43.255.224.0/21 +43.255.232.0/22 +43.255.244.0/22 +45.40.192.0/20 +45.40.208.0/21 +45.40.224.0/19 +45.65.16.0/20 +45.87.53.0/24 +45.87.54.0/23 +45.112.132.0/22 +45.112.188.0/22 +45.112.208.0/20 +45.112.228.0/22 +45.112.232.0/21 +45.113.12.0/22 +45.113.16.0/20 +45.113.40.0/22 +45.113.52.0/22 +45.113.72.0/22 +45.113.144.0/21 +45.113.168.0/22 +45.113.184.0/22 +45.113.200.0/21 +45.113.208.0/20 +45.113.240.0/22 +45.113.252.0/22 +45.114.0.0/22 +45.114.32.0/22 +45.114.52.0/22 +45.114.96.0/22 +45.114.136.0/22 +45.114.196.0/22 +45.114.200.0/22 +45.114.228.0/22 +45.114.237.0/24 +45.114.238.0/23 +45.114.252.0/22 +45.115.44.0/22 +45.115.100.0/22 +45.115.120.0/22 +45.115.132.0/22 +45.115.144.0/22 +45.115.156.0/22 +45.115.164.0/22 +45.115.200.0/22 +45.115.212.0/22 +45.115.244.0/22 +45.115.248.0/22 +45.116.16.0/22 +45.116.24.0/22 +45.116.32.0/21 +45.116.52.0/22 +45.116.96.0/21 +45.116.140.0/22 +45.116.152.0/22 +45.116.208.0/22 +45.117.8.0/22 +45.117.20.0/22 +45.117.68.0/22 +45.117.124.0/22 +45.117.252.0/22 +45.119.60.0/22 +45.119.64.0/21 +45.119.72.0/22 +45.119.104.0/22 +45.119.232.0/22 +45.120.100.0/22 +45.120.140.0/22 +45.120.164.0/22 +45.120.180.128/27 +45.120.240.0/24 +45.120.242.0/23 +45.121.52.0/22 +45.121.64.0/21 +45.121.72.0/22 +45.121.92.0/22 +45.121.96.0/22 +45.121.172.0/22 +45.121.176.0/22 +45.121.240.0/20 +45.122.0.0/19 +45.122.32.0/21 +45.122.40.0/22 +45.122.60.0/22 +45.122.64.0/19 +45.122.96.0/20 +45.122.112.0/21 +45.122.160.0/19 +45.122.192.0/20 +45.122.208.0/21 +45.122.216.0/22 +45.123.28.0/22 +45.123.32.0/21 +45.123.44.0/22 +45.123.48.0/20 +45.123.64.0/20 +45.123.80.0/21 +45.123.120.0/22 +45.123.128.0/21 +45.123.136.0/22 +45.123.148.0/22 +45.123.152.0/21 +45.123.164.0/22 +45.123.168.0/21 +45.123.176.0/21 +45.123.184.0/22 +45.123.204.0/22 +45.123.212.0/22 +45.123.224.0/19 +45.124.0.0/22 +45.124.20.0/22 +45.124.28.0/22 +45.124.32.0/21 +45.124.44.0/22 +45.124.68.0/22 +45.124.76.0/22 +45.124.80.0/22 +45.124.100.0/22 +45.124.124.0/22 +45.124.172.0/22 +45.124.176.0/22 +45.124.208.0/22 +45.124.248.0/22 +45.125.24.0/22 +45.125.44.0/22 +45.125.52.0/22 +45.125.56.0/22 +45.125.76.0/22 +45.125.80.0/20 +45.125.96.0/21 +45.125.136.0/22 +45.126.48.0/21 +45.126.108.0/22 +45.126.112.0/21 +45.126.120.0/22 +45.126.220.0/22 +45.127.8.0/21 +45.127.128.0/22 +45.127.144.0/21 +45.127.156.0/22 +45.248.8.0/22 +45.248.80.0/22 +45.248.88.0/22 +45.248.96.0/20 +45.248.128.0/21 +45.248.204.0/22 +45.248.208.0/20 +45.248.224.0/19 +45.249.0.0/21 +45.249.12.0/22 +45.249.16.0/20 +45.249.32.0/21 +45.249.112.0/22 +45.249.188.0/22 +45.249.192.0/20 +45.249.208.0/21 +45.250.12.0/22 +45.250.16.0/22 +45.250.28.0/22 +45.250.32.0/21 +45.250.40.0/22 +45.250.76.0/22 +45.250.80.0/20 +45.250.96.0/22 +45.250.104.0/21 +45.250.112.0/20 +45.250.128.0/20 +45.250.144.0/21 +45.250.152.0/22 +45.250.164.0/22 +45.250.180.0/22 +45.250.184.0/21 +45.250.192.0/22 +45.251.0.0/22 +45.251.8.0/22 +45.251.16.0/21 +45.251.52.0/22 +45.251.84.0/22 +45.251.88.0/21 +45.251.96.0/21 +45.251.120.0/21 +45.251.138.0/23 +45.251.140.0/22 +45.251.144.0/20 +45.251.160.0/19 +45.251.192.0/19 +45.251.224.0/22 +45.252.0.0/19 +45.252.32.0/20 +45.252.48.0/22 +45.252.84.0/22 +45.252.88.0/21 +45.252.96.0/19 +45.252.128.0/19 +45.252.160.0/20 +45.252.176.0/22 +45.252.192.0/19 +45.252.224.0/21 +45.252.232.0/22 +45.253.0.0/18 +45.253.64.0/20 +45.253.80.0/21 +45.253.92.0/22 +45.253.96.0/20 +45.253.112.0/21 +45.253.120.0/22 +45.253.132.0/22 +45.253.136.0/21 +45.253.144.0/20 +45.253.160.0/19 +45.253.192.0/19 +45.253.224.0/20 +45.253.240.0/22 +45.254.0.0/20 +45.254.16.0/21 +45.254.28.0/22 +45.254.40.0/22 +45.254.48.0/20 +45.254.64.0/18 +45.254.128.0/18 +45.254.192.0/19 +45.254.224.0/21 +45.254.236.0/22 +45.254.248.0/22 +45.255.0.0/18 +45.255.64.0/19 +45.255.96.0/20 +45.255.112.0/21 +45.255.120.0/22 +45.255.136.0/21 +45.255.144.0/20 +45.255.160.0/19 +45.255.192.0/19 +45.255.224.0/20 +45.255.240.0/21 +45.255.248.0/22 +46.248.24.0/23 +47.92.0.0/14 +47.96.0.0/11 +49.4.0.0/14 +49.51.57.0/24 +49.51.58.0/23 +49.51.60.0/23 +49.51.110.0/23 +49.51.112.0/20 +49.52.0.0/14 +49.64.0.0/11 +49.112.0.0/13 +49.120.0.0/14 +49.128.0.0/24 +49.128.2.0/23 +49.140.0.0/15 +49.152.0.0/14 +49.208.0.0/14 +49.220.0.0/14 +49.232.0.0/14 +49.239.0.0/18 +49.239.192.0/18 +52.80.0.0/14 +52.93.127.92/30 +52.93.127.96/29 +52.93.127.104/31 +52.93.242.120/29 +52.93.242.128/25 +52.94.249.0/27 +52.130.0.0/15 +54.222.0.0/15 +54.231.208.0/20 +54.240.224.0/24 +57.92.96.0/20 +58.14.0.0/15 +58.16.0.0/13 +58.24.0.0/15 +58.30.0.0/15 +58.32.0.0/11 +58.65.232.0/21 +58.66.0.0/15 +58.68.128.0/19 +58.68.160.0/21 +58.68.200.0/21 +58.68.208.0/20 +58.68.224.0/19 +58.82.0.0/17 +58.83.0.0/16 +58.87.64.0/18 +58.99.128.0/17 +58.100.0.0/15 +58.116.0.0/14 +58.128.0.0/13 +58.144.0.0/16 +58.154.0.0/15 +58.192.0.0/11 +58.240.0.0/12 +59.32.0.0/11 +59.64.0.0/12 +59.80.0.0/15 +59.82.0.0/16 +59.83.0.0/18 +59.83.132.0/22 +59.83.136.0/21 +59.83.144.0/20 +59.83.160.0/19 +59.83.192.0/19 +59.83.224.0/20 +59.83.240.0/21 +59.83.248.0/22 +59.83.252.0/23 +59.83.254.0/24 +59.107.0.0/16 +59.108.0.0/14 +59.151.0.0/17 +59.152.16.0/20 +59.152.36.0/22 +59.152.64.0/20 +59.152.112.0/21 +59.153.4.0/22 +59.153.32.0/22 +59.153.64.0/21 +59.153.72.0/22 +59.153.92.0/22 +59.153.136.0/22 +59.153.152.0/21 +59.153.164.0/22 +59.153.168.0/21 +59.153.176.0/20 +59.153.192.0/22 +59.155.0.0/16 +59.172.0.0/14 +59.191.0.0/17 +59.192.0.0/10 +60.0.0.0/11 +60.55.0.0/16 +60.63.0.0/16 +60.160.0.0/11 +60.194.0.0/15 +60.200.0.0/13 +60.208.0.0/12 +60.232.0.0/15 +60.235.0.0/16 +60.245.128.0/17 +60.247.0.0/16 +60.252.0.0/16 +60.253.128.0/17 +60.255.0.0/16 +61.4.81.0/24 +61.4.82.0/23 +61.4.84.0/22 +61.4.88.0/21 +61.4.176.0/20 +61.8.160.0/20 +61.14.212.0/22 +61.14.216.0/21 +61.14.240.0/21 +61.28.0.0/17 +61.29.128.0/18 +61.29.192.0/19 +61.29.224.0/20 +61.45.128.0/18 +61.45.224.0/20 +61.47.128.0/18 +61.48.0.0/13 +61.87.192.0/18 +61.128.0.0/10 +61.232.0.0/14 +61.236.0.0/15 +61.240.0.0/14 +62.234.0.0/16 +64.188.38.0/23 +64.188.40.0/22 +64.188.44.0/23 +67.17.83.117/32 +68.79.0.0/18 +69.230.192.0/18 +69.231.128.0/18 +69.234.192.0/18 +69.235.128.0/18 +71.131.192.0/18 +71.132.0.0/18 +71.136.64.0/18 +71.137.0.0/18 +72.163.240.0/23 +72.163.248.0/22 +81.68.0.0/14 +82.156.0.0/15 +87.254.207.0/24 +93.183.14.0/24 +93.183.18.0/24 +94.191.0.0/17 +101.0.0.0/22 +101.1.0.0/22 +101.2.172.0/22 +101.4.0.0/14 +101.16.0.0/12 +101.33.16.0/24 +101.33.20.0/22 +101.33.24.0/23 +101.33.128.0/17 +101.34.0.0/15 +101.36.0.0/18 +101.36.64.0/20 +101.36.88.0/21 +101.36.128.0/17 +101.37.0.0/16 +101.38.0.0/15 +101.40.0.0/13 +101.48.0.0/15 +101.50.8.0/21 +101.50.56.0/22 +101.52.0.0/16 +101.53.100.0/22 +101.54.0.0/16 +101.55.224.0/21 +101.64.0.0/13 +101.72.0.0/14 +101.76.0.0/15 +101.78.0.0/22 +101.78.32.0/19 +101.80.0.0/12 +101.96.0.0/21 +101.96.8.0/22 +101.96.16.0/20 +101.96.128.0/17 +101.99.96.0/19 +101.101.64.0/19 +101.101.100.0/24 +101.101.102.0/23 +101.101.104.0/21 +101.101.112.0/20 +101.102.64.0/19 +101.102.100.0/23 +101.102.102.0/24 +101.102.104.0/21 +101.102.112.0/20 +101.104.0.0/14 +101.110.64.0/19 +101.110.96.0/20 +101.110.116.0/22 +101.110.120.0/21 +101.120.0.0/14 +101.124.0.0/15 +101.126.0.0/16 +101.128.0.0/22 +101.128.8.0/21 +101.128.16.0/20 +101.128.32.0/19 +101.129.0.0/16 +101.130.0.0/15 +101.132.0.0/15 +101.134.0.0/17 +101.134.128.0/19 +101.134.160.0/20 +101.134.176.0/21 +101.134.184.0/22 +101.134.189.0/24 +101.134.190.0/23 +101.134.192.0/18 +101.135.0.0/16 +101.144.0.0/12 +101.192.0.0/14 +101.196.0.0/16 +101.198.128.0/24 +101.198.170.0/23 +101.198.172.0/22 +101.198.176.0/21 +101.198.184.0/22 +101.198.189.0/24 +101.198.190.0/23 +101.198.194.0/24 +101.198.196.0/23 +101.198.200.0/22 +101.198.224.0/19 +101.199.0.0/19 +101.199.48.0/20 +101.199.64.0/18 +101.199.128.0/17 +101.200.0.0/15 +101.203.128.0/19 +101.203.160.0/21 +101.203.172.0/22 +101.203.176.0/20 +101.204.0.0/14 +101.224.0.0/13 +101.232.0.0/15 +101.234.64.0/21 +101.234.76.0/22 +101.234.80.0/20 +101.234.96.0/19 +101.236.0.0/14 +101.240.0.0/13 +101.248.0.0/15 +101.251.0.0/22 +101.251.8.0/21 +101.251.16.0/20 +101.251.32.0/19 +101.251.64.0/18 +101.251.128.0/17 +101.252.0.0/15 +101.254.0.0/16 +102.176.130.0/24 +103.1.8.0/22 +103.1.20.0/22 +103.1.24.0/22 +103.1.88.0/22 +103.1.168.0/22 +103.2.108.0/22 +103.2.156.0/22 +103.2.164.0/22 +103.2.200.0/21 +103.2.208.0/21 +103.3.84.0/22 +103.3.88.0/21 +103.3.96.0/19 +103.3.128.0/20 +103.3.148.0/22 +103.3.152.0/21 +103.4.56.0/22 +103.4.168.0/22 +103.4.184.0/22 +103.5.36.0/22 +103.5.52.0/24 +103.5.56.0/22 +103.5.152.0/22 +103.5.168.0/22 +103.5.192.0/22 +103.5.252.0/22 +103.6.76.0/22 +103.6.120.0/22 +103.6.220.0/22 +103.7.140.0/22 +103.7.212.0/22 +103.7.216.0/21 +103.8.0.0/21 +103.8.8.0/22 +103.8.32.0/22 +103.8.52.0/22 +103.8.68.0/22 +103.8.108.0/22 +103.8.156.0/22 +103.8.200.0/21 +103.8.220.0/22 +103.9.8.0/22 +103.9.24.0/22 +103.9.108.0/22 +103.9.152.0/22 +103.9.248.0/21 +103.10.0.0/22 +103.10.16.0/22 +103.10.84.0/22 +103.10.111.0/24 +103.10.140.0/22 +103.11.16.0/22 +103.11.168.0/22 +103.11.180.0/22 +103.12.32.0/22 +103.12.136.0/22 +103.12.184.0/22 +103.12.232.0/22 +103.13.12.0/22 +103.13.124.0/22 +103.13.145.0/24 +103.13.147.0/24 +103.13.196.0/22 +103.13.244.0/22 +103.14.84.0/22 +103.14.132.0/22 +103.14.136.0/22 +103.14.156.0/22 +103.14.240.0/22 +103.15.4.0/22 +103.15.8.0/22 +103.15.16.0/22 +103.15.96.0/22 +103.15.200.0/22 +103.16.52.0/22 +103.16.80.0/21 +103.16.88.0/22 +103.16.108.0/22 +103.16.124.0/22 +103.17.40.0/22 +103.17.64.0/22 +103.17.120.0/23 +103.17.136.0/22 +103.17.160.0/22 +103.17.204.0/22 +103.17.228.0/22 +103.18.192.0/22 +103.18.208.0/21 +103.18.224.0/22 +103.19.12.0/22 +103.19.40.0/21 +103.19.64.0/21 +103.19.72.0/22 +103.19.232.0/22 +103.20.12.0/22 +103.20.32.0/23 +103.20.34.0/24 +103.20.68.0/22 +103.20.112.0/22 +103.20.128.0/22 +103.20.160.0/22 +103.20.248.0/22 +103.21.112.0/21 +103.21.140.0/22 +103.21.176.0/22 +103.21.240.0/24 +103.21.242.0/23 +103.22.0.0/18 +103.22.64.0/19 +103.22.100.0/22 +103.22.104.0/21 +103.22.112.0/20 +103.22.188.0/22 +103.22.228.0/22 +103.22.252.0/22 +103.23.8.0/22 +103.23.56.0/22 +103.23.160.0/21 +103.23.176.0/22 +103.23.228.0/22 +103.24.24.0/22 +103.24.116.0/22 +103.24.128.0/22 +103.24.144.0/22 +103.24.176.0/22 +103.24.184.0/22 +103.24.228.0/22 +103.24.252.0/22 +103.25.20.0/22 +103.25.24.0/21 +103.25.32.0/21 +103.25.40.0/22 +103.25.48.0/22 +103.25.64.0/21 +103.25.148.0/22 +103.25.156.0/22 +103.25.216.0/22 +103.26.0.0/22 +103.26.64.0/22 +103.26.76.0/22 +103.26.116.0/22 +103.26.156.0/22 +103.26.160.0/22 +103.26.228.0/22 +103.26.240.0/22 +103.27.4.0/22 +103.27.12.0/22 +103.27.24.0/22 +103.27.56.0/22 +103.27.96.0/22 +103.27.240.0/22 +103.28.4.0/22 +103.28.8.0/22 +103.28.184.0/22 +103.28.204.0/22 +103.28.212.0/22 +103.29.16.0/22 +103.29.128.0/21 +103.29.136.0/22 +103.30.20.0/22 +103.30.96.0/22 +103.30.148.0/22 +103.30.202.0/23 +103.30.228.0/22 +103.30.236.0/22 +103.31.0.0/22 +103.31.48.0/21 +103.31.60.0/22 +103.31.64.0/21 +103.31.72.0/24 +103.31.148.0/22 +103.31.160.0/22 +103.31.168.0/22 +103.31.200.0/22 +103.32.0.0/15 +103.34.0.0/16 +103.35.0.0/19 +103.35.32.0/20 +103.35.48.0/22 +103.35.104.0/22 +103.35.220.0/22 +103.36.28.0/22 +103.36.36.0/22 +103.36.56.0/21 +103.36.64.0/22 +103.36.72.0/22 +103.36.96.0/22 +103.36.132.0/22 +103.36.136.0/22 +103.36.160.0/19 +103.36.192.0/19 +103.36.224.0/20 +103.36.240.0/21 +103.37.12.0/22 +103.37.16.0/22 +103.37.24.0/22 +103.37.44.0/22 +103.37.52.0/22 +103.37.56.0/22 +103.37.72.0/22 +103.37.100.0/22 +103.37.104.0/22 +103.37.136.0/21 +103.37.144.0/20 +103.37.160.0/21 +103.37.172.0/22 +103.37.176.0/22 +103.37.188.0/22 +103.37.208.0/20 +103.37.252.0/22 +103.38.0.0/22 +103.38.32.0/22 +103.38.40.0/21 +103.38.76.0/22 +103.38.84.0/22 +103.38.92.0/22 +103.38.96.0/22 +103.38.116.0/22 +103.38.132.0/22 +103.38.140.0/22 +103.38.220.0/22 +103.38.224.0/21 +103.38.232.0/22 +103.38.252.0/23 +103.39.64.0/22 +103.39.88.0/22 +103.39.100.0/22 +103.39.104.0/22 +103.39.160.0/19 +103.39.200.0/21 +103.39.208.0/20 +103.39.224.0/21 +103.39.232.0/22 +103.40.12.0/22 +103.40.16.0/20 +103.40.32.0/20 +103.40.88.0/22 +103.40.192.0/22 +103.40.212.0/22 +103.40.220.0/22 +103.40.228.0/22 +103.40.232.0/21 +103.40.240.0/20 +103.41.0.0/22 +103.41.52.0/22 +103.41.140.0/22 +103.41.148.0/22 +103.41.152.0/22 +103.41.160.0/21 +103.41.184.0/24 +103.41.220.0/22 +103.41.224.0/21 +103.41.232.0/22 +103.42.8.0/22 +103.42.24.0/22 +103.42.32.0/22 +103.42.64.0/21 +103.42.76.0/22 +103.42.232.0/22 +103.43.26.0/23 +103.43.96.0/21 +103.43.104.0/22 +103.43.124.0/22 +103.43.184.0/22 +103.43.192.0/21 +103.43.208.0/22 +103.43.220.0/22 +103.43.224.0/22 +103.43.240.0/22 +103.44.58.0/23 +103.44.80.0/22 +103.44.120.0/21 +103.44.144.0/22 +103.44.152.0/22 +103.44.168.0/22 +103.44.176.0/20 +103.44.192.0/20 +103.44.224.0/22 +103.44.236.0/22 +103.44.240.0/20 +103.45.0.0/18 +103.45.72.0/21 +103.45.80.0/20 +103.45.96.0/19 +103.45.128.0/18 +103.45.192.0/19 +103.45.224.0/22 +103.45.248.0/22 +103.46.0.0/22 +103.46.12.0/22 +103.46.16.0/20 +103.46.32.0/19 +103.46.64.0/18 +103.46.128.0/21 +103.46.136.0/22 +103.46.152.0/21 +103.46.160.0/20 +103.46.176.0/21 +103.46.244.0/22 +103.46.248.0/22 +103.47.4.0/22 +103.47.20.0/22 +103.47.36.0/22 +103.47.40.0/22 +103.47.48.0/22 +103.47.80.0/22 +103.47.96.0/22 +103.47.116.0/22 +103.47.120.0/22 +103.47.136.0/21 +103.47.212.0/22 +103.48.52.0/22 +103.48.92.0/22 +103.48.148.0/22 +103.48.152.0/22 +103.48.202.0/23 +103.48.216.0/21 +103.48.224.0/20 +103.48.240.0/21 +103.49.12.0/22 +103.49.20.0/22 +103.49.72.0/21 +103.49.96.0/22 +103.49.108.0/22 +103.49.131.0/24 +103.49.176.0/21 +103.50.36.0/22 +103.50.44.0/22 +103.50.48.0/20 +103.50.64.0/21 +103.50.72.0/22 +103.50.108.0/22 +103.50.112.0/20 +103.50.132.0/22 +103.50.136.0/21 +103.50.172.0/22 +103.50.176.0/20 +103.50.192.0/21 +103.50.200.0/22 +103.50.220.0/22 +103.50.224.0/20 +103.50.240.0/21 +103.50.248.0/22 +103.52.40.0/22 +103.52.72.0/21 +103.52.80.0/21 +103.52.96.0/21 +103.52.104.0/22 +103.52.160.0/21 +103.52.172.0/22 +103.52.176.0/22 +103.52.184.0/22 +103.52.196.0/22 +103.53.64.0/21 +103.53.92.0/22 +103.53.124.0/22 +103.53.128.0/20 +103.53.144.0/22 +103.53.160.0/22 +103.53.180.0/22 +103.53.204.0/22 +103.53.208.0/21 +103.53.236.0/22 +103.53.248.0/22 +103.54.8.0/22 +103.54.48.0/22 +103.54.160.0/21 +103.54.212.0/22 +103.54.228.0/22 +103.54.240.0/22 +103.55.80.0/22 +103.55.120.0/22 +103.55.152.0/22 +103.55.172.0/22 +103.55.204.0/22 +103.55.208.0/22 +103.55.228.0/22 +103.55.236.0/22 +103.55.240.0/22 +103.56.20.0/22 +103.56.32.0/22 +103.56.56.0/21 +103.56.72.0/21 +103.56.140.0/22 +103.56.152.0/22 +103.56.184.0/22 +103.56.200.0/22 +103.57.12.0/22 +103.57.52.0/22 +103.57.56.0/22 +103.57.76.0/22 +103.57.136.0/22 +103.57.196.0/22 +103.58.24.0/22 +103.59.76.0/22 +103.59.112.0/21 +103.59.120.0/24 +103.59.123.0/24 +103.59.124.0/22 +103.59.128.0/22 +103.59.148.0/22 +103.60.32.0/22 +103.60.44.0/22 +103.60.164.0/22 +103.60.228.0/22 +103.60.236.0/22 +103.61.60.0/24 +103.61.104.0/22 +103.61.140.0/22 +103.61.152.0/21 +103.61.160.0/22 +103.61.172.0/22 +103.61.176.0/22 +103.62.24.0/22 +103.62.72.0/21 +103.62.80.0/21 +103.62.88.0/22 +103.62.96.0/19 +103.62.128.0/21 +103.62.156.0/22 +103.62.160.0/19 +103.62.192.0/22 +103.62.204.0/22 +103.62.208.0/20 +103.62.224.0/22 +103.63.32.0/19 +103.63.64.0/20 +103.63.80.0/21 +103.63.88.0/22 +103.63.140.0/22 +103.63.144.0/22 +103.63.152.0/22 +103.63.160.0/20 +103.63.176.0/21 +103.63.184.0/22 +103.63.192.0/20 +103.63.208.0/22 +103.63.240.0/20 +103.64.0.0/21 +103.64.24.0/21 +103.64.32.0/19 +103.64.64.0/18 +103.64.140.0/22 +103.64.144.0/22 +103.64.152.0/21 +103.64.160.0/19 +103.64.192.0/18 +103.65.0.0/20 +103.65.16.0/22 +103.65.48.0/20 +103.65.64.0/19 +103.65.100.0/22 +103.65.104.0/21 +103.65.112.0/20 +103.65.128.0/21 +103.65.136.0/22 +103.65.144.0/20 +103.65.160.0/20 +103.66.32.0/22 +103.66.40.0/22 +103.66.108.0/22 +103.66.200.0/22 +103.66.240.0/20 +103.67.0.0/21 +103.67.8.0/22 +103.67.40.0/21 +103.67.48.0/20 +103.67.64.0/18 +103.67.128.0/20 +103.67.144.0/21 +103.67.172.0/24 +103.67.175.0/24 +103.67.192.0/22 +103.67.212.0/22 +103.68.88.0/22 +103.68.100.0/22 +103.68.128.0/22 +103.69.16.0/22 +103.70.8.0/22 +103.70.148.0/22 +103.70.236.0/22 +103.70.252.0/22 +103.71.0.0/22 +103.71.68.0/22 +103.71.72.0/22 +103.71.80.0/21 +103.71.88.0/22 +103.71.120.0/21 +103.71.128.0/22 +103.71.196.0/22 +103.71.200.0/22 +103.71.232.0/22 +103.72.12.0/22 +103.72.16.0/20 +103.72.32.0/20 +103.72.48.0/21 +103.72.112.0/21 +103.72.124.0/22 +103.72.128.0/21 +103.72.149.0/24 +103.72.150.0/23 +103.72.172.0/22 +103.72.180.0/22 +103.72.224.0/19 +103.73.0.0/19 +103.73.48.0/22 +103.73.116.0/22 +103.73.120.0/22 +103.73.128.0/20 +103.73.168.0/22 +103.73.176.0/22 +103.73.204.0/22 +103.73.208.0/22 +103.73.240.0/23 +103.73.244.0/22 +103.73.248.0/22 +103.74.24.0/21 +103.74.32.0/20 +103.74.48.0/22 +103.74.56.0/21 +103.74.80.0/22 +103.74.124.0/22 +103.74.148.0/22 +103.74.152.0/21 +103.74.204.0/22 +103.74.232.0/22 +103.75.87.0/24 +103.75.88.0/21 +103.75.104.0/21 +103.75.112.0/22 +103.75.120.0/22 +103.75.128.0/22 +103.75.144.0/23 +103.75.146.0/24 +103.75.152.0/22 +103.76.60.0/22 +103.76.64.0/21 +103.76.72.0/22 +103.76.92.0/22 +103.76.216.0/21 +103.76.224.0/22 +103.77.28.0/22 +103.77.52.0/22 +103.77.56.0/22 +103.77.88.0/22 +103.77.132.0/22 +103.77.148.0/22 +103.77.220.0/22 +103.78.56.0/21 +103.78.64.0/22 +103.78.124.0/22 +103.78.172.0/22 +103.78.176.0/22 +103.78.196.0/22 +103.78.228.0/22 +103.79.24.0/21 +103.79.36.0/22 +103.79.40.0/21 +103.79.56.0/21 +103.79.64.0/21 +103.79.80.0/21 +103.79.136.0/22 +103.79.188.0/22 +103.79.192.0/20 +103.79.208.0/21 +103.80.44.0/22 +103.80.72.0/22 +103.80.176.0/21 +103.80.184.0/22 +103.80.192.0/22 +103.80.200.0/22 +103.80.232.0/22 +103.81.4.0/22 +103.81.44.0/22 +103.81.48.0/22 +103.81.96.0/22 +103.81.120.0/22 +103.81.148.0/22 +103.81.164.0/22 +103.81.200.0/22 +103.81.232.0/22 +103.82.60.0/22 +103.82.68.0/22 +103.82.84.0/22 +103.82.104.0/22 +103.82.224.0/22 +103.82.236.0/22 +103.83.44.0/22 +103.83.52.0/22 +103.83.60.0/22 +103.83.72.0/22 +103.83.112.0/22 +103.83.132.0/22 +103.83.180.0/22 +103.84.0.0/22 +103.84.12.0/22 +103.84.20.0/22 +103.84.24.0/21 +103.84.48.0/22 +103.84.56.0/22 +103.84.64.0/22 +103.84.72.0/22 +103.85.44.0/22 +103.85.48.0/21 +103.85.56.0/22 +103.85.84.0/22 +103.85.136.0/22 +103.85.144.0/22 +103.85.164.0/22 +103.85.168.0/21 +103.85.176.0/22 +103.86.28.0/22 +103.86.32.0/22 +103.86.60.0/22 +103.86.129.0/24 +103.86.204.0/22 +103.86.208.0/20 +103.86.224.0/19 +103.87.0.0/21 +103.87.20.0/22 +103.87.32.0/22 +103.87.96.0/22 +103.87.132.0/22 +103.87.180.0/22 +103.87.224.0/22 +103.88.4.0/22 +103.88.8.0/21 +103.88.16.0/21 +103.88.32.0/21 +103.88.60.0/22 +103.88.64.0/22 +103.88.72.0/22 +103.88.96.0/21 +103.88.152.0/23 +103.88.164.0/22 +103.88.212.0/22 +103.89.28.0/22 +103.89.96.0/20 +103.89.112.0/22 +103.89.148.0/22 +103.89.172.0/22 +103.89.184.0/21 +103.89.192.0/19 +103.89.224.0/21 +103.90.52.0/22 +103.90.92.0/22 +103.90.100.0/22 +103.90.104.0/21 +103.90.112.0/20 +103.90.128.0/21 +103.90.152.0/22 +103.90.168.0/22 +103.90.173.0/24 +103.90.176.0/22 +103.90.188.0/22 +103.90.192.0/22 +103.91.36.0/22 +103.91.40.0/22 +103.91.108.0/22 +103.91.152.0/22 +103.91.176.0/22 +103.91.200.0/22 +103.91.208.0/21 +103.91.236.0/22 +103.92.48.0/20 +103.92.64.0/20 +103.92.80.0/22 +103.92.88.0/22 +103.92.108.0/22 +103.92.124.0/22 +103.92.132.0/22 +103.92.156.0/22 +103.92.164.0/22 +103.92.168.0/21 +103.92.176.0/20 +103.92.192.0/22 +103.92.236.0/22 +103.92.240.0/20 +103.93.0.0/21 +103.93.28.0/22 +103.93.84.0/22 +103.93.152.0/22 +103.93.180.0/22 +103.93.204.0/22 +103.94.12.0/22 +103.94.20.0/22 +103.94.29.0/24 +103.94.30.0/23 +103.94.32.0/20 +103.94.72.0/22 +103.94.88.0/22 +103.94.116.0/22 +103.94.160.0/22 +103.94.200.0/22 +103.95.28.0/24 +103.95.52.0/22 +103.95.68.0/22 +103.95.88.0/21 +103.95.136.0/21 +103.95.144.0/22 +103.95.152.0/22 +103.95.216.0/21 +103.95.224.0/22 +103.95.236.0/22 +103.95.240.0/20 +103.96.8.0/22 +103.96.124.0/22 +103.96.136.0/22 +103.96.152.0/21 +103.96.160.0/19 +103.96.192.0/20 +103.96.208.0/21 +103.96.216.0/22 +103.97.40.0/22 +103.97.60.0/23 +103.97.112.0/21 +103.97.148.0/22 +103.97.188.0/22 +103.97.192.0/22 +103.98.40.0/21 +103.98.48.0/22 +103.98.56.0/22 +103.98.80.0/22 +103.98.88.0/22 +103.98.100.0/22 +103.98.124.0/24 +103.98.126.0/23 +103.98.136.0/21 +103.98.144.0/22 +103.98.164.0/22 +103.98.168.0/22 +103.98.180.0/22 +103.98.196.0/22 +103.98.216.0/21 +103.98.224.0/21 +103.98.232.0/22 +103.98.240.0/21 +103.98.248.0/23 +103.98.250.0/24 +103.98.252.0/22 +103.99.56.0/22 +103.99.104.0/22 +103.99.116.0/22 +103.99.120.0/22 +103.99.132.0/22 +103.99.136.0/21 +103.99.144.0/22 +103.99.152.0/22 +103.99.220.0/22 +103.99.232.0/21 +103.100.0.0/22 +103.100.32.0/22 +103.100.40.0/22 +103.100.48.0/22 +103.100.56.0/22 +103.100.64.0/22 +103.100.88.0/22 +103.100.116.0/22 +103.100.144.0/22 +103.100.240.0/22 +103.100.248.0/21 +103.101.4.0/22 +103.101.8.0/21 +103.101.60.0/22 +103.101.121.0/24 +103.101.122.0/23 +103.101.124.0/24 +103.101.126.0/23 +103.101.144.0/21 +103.101.180.0/22 +103.101.184.0/22 +103.102.76.0/22 +103.102.80.0/22 +103.102.168.0/21 +103.102.180.0/22 +103.102.184.0/21 +103.102.192.0/22 +103.102.196.0/24 +103.102.200.0/22 +103.102.208.0/21 +103.103.12.0/22 +103.103.16.0/22 +103.103.36.0/22 +103.103.72.0/22 +103.103.188.0/22 +103.103.204.0/22 +103.104.36.0/22 +103.104.40.0/22 +103.104.64.0/22 +103.104.152.0/22 +103.104.252.0/22 +103.105.0.0/21 +103.105.12.0/22 +103.105.16.0/22 +103.105.60.0/22 +103.105.116.0/22 +103.105.180.0/22 +103.105.184.0/22 +103.105.200.0/21 +103.105.220.0/22 +103.106.36.0/22 +103.106.40.0/21 +103.106.60.0/22 +103.106.68.0/22 +103.106.96.0/22 +103.106.120.0/22 +103.106.128.0/21 +103.106.190.0/23 +103.106.196.0/22 +103.106.212.0/22 +103.106.252.0/22 +103.107.0.0/22 +103.107.28.0/22 +103.107.32.0/22 +103.107.44.0/22 +103.107.72.0/22 +103.107.164.0/22 +103.107.168.0/22 +103.107.188.0/22 +103.107.192.0/22 +103.107.208.0/20 +103.108.52.0/22 +103.108.160.0/22 +103.108.196.0/22 +103.108.208.0/21 +103.108.224.0/22 +103.108.244.0/22 +103.108.251.0/24 +103.109.20.0/22 +103.109.48.0/22 +103.109.88.0/22 +103.109.248.0/22 +103.110.32.0/22 +103.110.92.0/22 +103.110.119.0/24 +103.110.127.0/24 +103.110.128.0/23 +103.110.131.0/24 +103.110.132.0/22 +103.110.136.0/22 +103.110.156.0/22 +103.110.188.0/22 +103.110.204.0/22 +103.111.64.0/22 +103.111.172.0/22 +103.111.252.0/22 +103.112.72.0/22 +103.112.88.0/21 +103.112.108.0/22 +103.112.112.0/22 +103.112.140.0/22 +103.113.4.0/22 +103.113.144.0/22 +103.113.220.0/22 +103.113.232.0/21 +103.114.4.0/22 +103.114.68.0/22 +103.114.100.0/22 +103.114.148.0/22 +103.114.156.0/23 +103.114.176.0/22 +103.114.212.0/22 +103.114.236.0/22 +103.114.240.0/22 +103.115.52.0/22 +103.115.68.0/22 +103.115.92.0/22 +103.115.120.0/22 +103.115.148.0/22 +103.115.248.0/22 +103.116.76.0/22 +103.116.92.0/22 +103.116.120.0/22 +103.116.128.0/22 +103.116.184.0/22 +103.116.220.0/22 +103.116.224.0/21 +103.117.16.0/22 +103.117.88.0/22 +103.117.188.0/22 +103.117.220.0/22 +103.118.19.0/24 +103.118.52.0/22 +103.118.56.0/21 +103.118.64.0/21 +103.118.72.0/22 +103.118.88.0/22 +103.118.173.0/24 +103.119.115.0/24 +103.119.156.0/22 +103.119.180.0/22 +103.119.200.0/22 +103.119.224.0/23 +103.120.52.0/22 +103.120.72.0/22 +103.120.76.0/24 +103.120.88.0/22 +103.120.96.0/22 +103.120.140.0/22 +103.120.196.0/22 +103.120.224.0/22 +103.121.52.0/22 +103.121.160.0/21 +103.121.250.0/24 +103.121.252.0/22 +103.122.48.0/22 +103.122.179.0/24 +103.122.192.0/22 +103.122.240.0/23 +103.122.242.0/24 +103.123.4.0/22 +103.123.56.0/22 +103.123.88.0/21 +103.123.116.0/22 +103.123.176.0/22 +103.123.200.0/21 +103.123.208.0/21 +103.124.24.0/22 +103.124.48.0/22 +103.124.64.0/22 +103.124.212.0/22 +103.124.216.0/22 +103.125.20.0/22 +103.125.44.0/22 +103.125.132.0/22 +103.125.164.0/22 +103.125.196.0/22 +103.125.236.0/22 +103.126.0.0/22 +103.126.16.0/23 +103.126.44.0/22 +103.126.124.0/22 +103.126.128.0/22 +103.129.53.0/24 +103.129.54.0/23 +103.129.148.0/22 +103.130.132.0/22 +103.130.160.0/22 +103.130.228.0/22 +103.131.20.0/22 +103.131.36.0/22 +103.131.152.0/22 +103.131.168.0/22 +103.131.224.0/21 +103.131.240.0/22 +103.132.60.0/22 +103.132.64.0/20 +103.132.80.0/22 +103.132.104.0/21 +103.132.112.0/21 +103.132.120.0/22 +103.132.188.0/22 +103.132.208.0/21 +103.133.12.0/22 +103.133.40.0/22 +103.133.128.0/22 +103.133.232.0/22 +103.134.196.0/22 +103.135.80.0/22 +103.135.124.0/22 +103.135.148.0/22 +103.135.156.0/22 +103.135.160.0/21 +103.135.176.0/22 +103.135.184.0/22 +103.135.192.0/21 +103.135.236.0/22 +103.136.128.0/22 +103.136.232.0/22 +103.137.58.0/23 +103.137.60.0/24 +103.137.136.0/23 +103.137.149.0/24 +103.137.180.0/22 +103.137.236.0/22 +103.138.2.0/23 +103.138.135.0/24 +103.138.208.0/23 +103.138.220.0/23 +103.138.248.0/23 +103.139.22.0/23 +103.139.134.0/23 +103.139.172.0/23 +103.139.204.0/23 +103.139.212.0/23 +103.140.14.0/23 +103.140.46.0/23 +103.140.140.0/23 +103.140.144.0/23 +103.140.192.0/23 +103.141.10.0/23 +103.141.58.0/23 +103.141.128.0/23 +103.141.186.0/23 +103.141.242.0/23 +103.142.0.0/23 +103.142.28.0/23 +103.142.58.0/23 +103.142.82.0/23 +103.142.96.0/23 +103.142.122.0/23 +103.142.128.0/23 +103.142.154.0/23 +103.142.156.0/23 +103.142.180.0/23 +103.142.186.0/23 +103.142.234.0/23 +103.142.238.0/23 +103.143.16.0/22 +103.143.31.0/24 +103.143.74.0/23 +103.143.124.0/23 +103.143.132.0/22 +103.143.174.0/23 +103.143.228.0/23 +103.144.66.0/23 +103.144.70.0/23 +103.144.72.0/23 +103.144.88.0/24 +103.144.136.0/23 +103.144.158.0/23 +103.145.42.0/23 +103.145.94.0/23 +103.145.98.0/23 +103.145.188.0/23 +103.146.6.0/23 +103.146.72.0/23 +103.146.126.0/23 +103.146.138.0/23 +103.146.236.0/23 +103.146.252.0/23 +103.147.124.0/23 +103.147.206.0/23 +103.148.174.0/23 +103.149.6.0/23 +103.149.17.0/24 +103.149.44.0/23 +103.149.210.0/23 +103.149.214.0/23 +103.149.220.0/23 +103.149.242.0/23 +103.149.244.0/22 +103.150.24.0/23 +103.150.66.0/23 +103.150.72.0/23 +103.150.122.0/23 +103.150.126.0/23 +103.150.128.0/23 +103.150.146.0/23 +103.150.164.0/23 +103.150.200.0/23 +103.150.216.0/23 +103.150.244.0/23 +103.151.4.0/23 +103.151.142.0/23 +103.151.148.0/23 +103.151.158.0/23 +103.151.229.0/24 +103.152.28.0/22 +103.152.76.0/23 +103.152.80.0/23 +103.152.120.0/22 +103.152.152.0/23 +103.152.168.0/23 +103.152.186.0/23 +103.152.190.0/23 +103.152.192.0/23 +103.152.200.0/23 +103.152.208.0/23 +103.152.224.0/23 +103.152.250.0/23 +103.153.4.0/23 +103.153.99.0/24 +103.153.114.0/23 +103.153.122.0/23 +103.153.128.0/24 +103.153.132.0/23 +103.153.138.0/23 +103.153.146.0/23 +103.153.160.0/23 +103.154.18.0/23 +103.154.30.0/23 +103.154.32.0/23 +103.154.40.0/23 +103.154.66.0/23 +103.154.162.0/23 +103.154.164.0/23 +103.154.168.0/23 +103.155.14.0/23 +103.155.17.0/24 +103.155.34.0/23 +103.155.48.0/23 +103.155.76.0/23 +103.155.100.0/23 +103.155.110.0/23 +103.155.120.0/23 +103.155.214.0/23 +103.155.248.0/23 +103.156.28.0/23 +103.156.68.0/23 +103.156.78.0/23 +103.156.104.0/23 +103.156.158.0/23 +103.156.166.0/23 +103.156.174.0/23 +103.156.186.0/23 +103.156.228.0/23 +103.157.30.0/23 +103.157.138.0/23 +103.157.174.0/23 +103.157.212.0/23 +103.157.234.0/23 +103.157.254.0/23 +103.158.0.0/23 +103.158.8.0/23 +103.158.16.0/23 +103.158.200.0/23 +103.158.224.0/23 +103.159.81.0/24 +103.159.122.0/23 +103.159.124.0/23 +103.159.134.0/23 +103.159.142.0/23 +103.160.32.0/22 +103.160.112.0/22 +103.160.244.0/23 +103.160.254.0/23 +103.161.14.0/23 +103.161.102.0/23 +103.161.138.0/23 +103.161.208.0/23 +103.161.220.0/23 +103.161.254.0/23 +103.162.10.0/23 +103.162.32.0/23 +103.162.38.0/23 +103.162.116.0/23 +103.163.28.0/23 +103.163.32.0/23 +103.163.46.0/23 +103.163.74.0/23 +103.163.180.0/23 +103.164.4.0/23 +103.164.32.0/23 +103.164.40.0/22 +103.164.64.0/23 +103.164.76.0/23 +103.164.178.0/23 +103.164.226.0/23 +103.165.44.0/23 +103.165.52.0/23 +103.165.82.0/23 +103.165.110.0/23 +103.192.0.0/19 +103.192.48.0/21 +103.192.56.0/22 +103.192.84.0/22 +103.192.88.0/21 +103.192.96.0/20 +103.192.112.0/22 +103.192.128.0/20 +103.192.144.0/22 +103.192.164.0/22 +103.192.188.0/22 +103.192.208.0/21 +103.192.216.0/22 +103.192.252.0/22 +103.193.40.0/21 +103.193.120.0/22 +103.193.140.0/22 +103.193.160.0/22 +103.193.188.0/22 +103.193.192.0/22 +103.193.212.0/22 +103.193.216.0/21 +103.193.224.0/20 +103.194.16.0/22 +103.195.112.0/22 +103.195.152.0/22 +103.195.160.0/22 +103.196.64.0/22 +103.196.72.0/22 +103.196.88.0/21 +103.196.96.0/22 +103.196.168.0/22 +103.196.185.0/24 +103.196.186.0/23 +103.197.181.0/24 +103.197.183.0/24 +103.197.228.0/22 +103.197.253.0/24 +103.197.254.0/23 +103.198.20.0/22 +103.198.60.0/22 +103.198.64.0/22 +103.198.72.0/22 +103.198.124.0/22 +103.198.156.0/22 +103.198.180.0/22 +103.198.196.0/22 +103.198.200.0/22 +103.199.164.0/22 +103.199.196.0/22 +103.199.228.0/22 +103.199.252.0/22 +103.200.52.0/22 +103.200.64.0/21 +103.200.136.0/21 +103.200.144.0/20 +103.200.160.0/19 +103.200.192.0/22 +103.200.220.0/22 +103.200.224.0/19 +103.201.0.0/20 +103.201.16.0/21 +103.201.28.0/22 +103.201.32.0/19 +103.201.64.0/22 +103.201.76.0/22 +103.201.80.0/20 +103.201.96.0/20 +103.201.112.0/21 +103.201.120.0/22 +103.201.152.0/21 +103.201.160.0/19 +103.201.192.0/18 +103.202.0.0/19 +103.202.32.0/20 +103.202.56.0/21 +103.202.64.0/18 +103.202.128.0/20 +103.202.144.0/22 +103.202.152.0/21 +103.202.160.0/19 +103.202.192.0/20 +103.202.212.0/22 +103.202.228.0/22 +103.202.236.0/22 +103.202.240.0/20 +103.203.0.0/19 +103.203.32.0/22 +103.203.96.0/19 +103.203.128.0/22 +103.203.140.0/22 +103.203.164.0/22 +103.203.168.0/22 +103.203.192.0/22 +103.203.200.0/22 +103.203.212.0/22 +103.203.216.0/22 +103.204.24.0/22 +103.204.72.0/22 +103.204.88.0/22 +103.204.112.0/22 +103.204.136.0/21 +103.204.144.0/21 +103.204.152.0/22 +103.204.196.0/22 +103.204.232.0/21 +103.205.4.0/22 +103.205.40.0/21 +103.205.52.0/22 +103.205.108.0/22 +103.205.116.0/22 +103.205.136.0/22 +103.205.162.0/24 +103.205.188.0/22 +103.205.192.0/21 +103.205.200.0/22 +103.205.236.0/22 +103.205.248.0/21 +103.206.0.0/22 +103.206.44.0/22 +103.206.148.0/22 +103.207.104.0/22 +103.207.184.0/21 +103.207.192.0/20 +103.207.208.0/21 +103.207.220.0/22 +103.207.228.0/22 +103.207.232.0/22 +103.208.12.0/22 +103.208.16.0/22 +103.208.28.0/22 +103.208.48.0/22 +103.208.148.0/22 +103.209.112.0/22 +103.209.136.0/22 +103.209.200.0/22 +103.209.208.0/22 +103.209.216.0/22 +103.210.0.0/22 +103.210.96.0/22 +103.210.156.0/22 +103.210.160.0/19 +103.210.217.0/24 +103.210.218.0/23 +103.211.44.0/22 +103.211.96.0/23 +103.211.98.0/24 +103.211.102.0/23 +103.211.156.0/22 +103.211.165.0/24 +103.211.168.0/22 +103.211.220.0/22 +103.211.248.0/22 +103.212.0.0/20 +103.212.44.0/22 +103.212.48.0/22 +103.212.84.0/22 +103.212.100.0/22 +103.212.148.0/22 +103.212.164.0/22 +103.212.196.0/22 +103.212.200.0/22 +103.212.252.0/22 +103.213.40.0/21 +103.213.48.0/20 +103.213.64.0/19 +103.213.96.0/22 +103.213.132.0/22 +103.213.136.0/21 +103.213.144.0/20 +103.213.160.0/19 +103.213.252.0/22 +103.214.48.0/22 +103.214.84.0/22 +103.214.212.0/22 +103.214.240.0/21 +103.215.28.0/22 +103.215.32.0/21 +103.215.44.0/22 +103.215.100.0/23 +103.215.108.0/22 +103.215.116.0/22 +103.215.120.0/22 +103.215.140.0/22 +103.216.4.0/22 +103.216.8.0/21 +103.216.16.0/20 +103.216.32.0/20 +103.216.64.0/22 +103.216.108.0/22 +103.216.136.0/22 +103.216.152.0/22 +103.216.224.0/21 +103.216.240.0/20 +103.217.0.0/18 +103.217.168.0/22 +103.217.180.0/22 +103.217.184.0/21 +103.217.192.0/20 +103.218.8.0/21 +103.218.16.0/21 +103.218.28.0/22 +103.218.32.0/19 +103.218.64.0/19 +103.218.192.0/20 +103.218.208.0/21 +103.218.216.0/22 +103.219.24.0/21 +103.219.32.0/21 +103.219.64.0/22 +103.219.84.0/22 +103.219.88.0/21 +103.219.96.0/21 +103.219.176.0/22 +103.219.184.0/22 +103.220.48.0/20 +103.220.64.0/22 +103.220.92.0/22 +103.220.96.0/22 +103.220.104.0/21 +103.220.116.0/22 +103.220.120.0/21 +103.220.128.0/20 +103.220.144.0/21 +103.220.152.0/22 +103.220.160.0/19 +103.220.192.0/21 +103.220.200.0/22 +103.220.240.0/21 +103.221.12.0/22 +103.221.32.0/22 +103.221.88.0/22 +103.221.96.0/19 +103.221.128.0/18 +103.221.192.0/20 +103.222.0.0/20 +103.222.16.0/22 +103.222.24.0/21 +103.222.33.0/24 +103.222.34.0/23 +103.222.36.0/22 +103.222.40.0/21 +103.222.48.0/20 +103.222.64.0/18 +103.222.128.0/18 +103.222.192.0/19 +103.222.224.0/21 +103.222.232.0/22 +103.222.240.0/21 +103.223.16.0/20 +103.223.32.0/19 +103.223.64.0/19 +103.223.96.0/20 +103.223.112.0/21 +103.223.124.0/22 +103.223.128.0/21 +103.223.140.0/22 +103.223.144.0/20 +103.223.160.0/20 +103.223.176.0/21 +103.223.188.0/22 +103.223.192.0/18 +103.224.0.0/22 +103.224.40.0/21 +103.224.60.0/22 +103.224.220.0/22 +103.224.224.0/21 +103.224.232.0/22 +103.225.32.0/22 +103.226.40.0/22 +103.226.57.0/24 +103.226.58.0/23 +103.226.60.0/22 +103.226.80.0/22 +103.226.116.0/22 +103.226.132.0/22 +103.226.156.0/22 +103.226.180.0/22 +103.226.196.0/22 +103.227.48.0/22 +103.227.72.0/21 +103.227.80.0/22 +103.227.100.0/22 +103.227.120.0/22 +103.227.132.0/22 +103.227.136.0/22 +103.227.196.0/22 +103.227.204.0/23 +103.227.206.0/24 +103.227.212.0/22 +103.227.228.0/22 +103.228.12.0/22 +103.228.88.0/22 +103.228.136.0/22 +103.228.160.0/22 +103.228.176.0/22 +103.228.204.0/22 +103.228.208.0/22 +103.228.228.0/22 +103.228.232.0/22 +103.229.20.0/22 +103.229.136.0/22 +103.229.148.0/22 +103.229.172.0/22 +103.229.212.0/22 +103.229.216.0/21 +103.229.228.0/22 +103.229.236.0/22 +103.229.240.0/22 +103.230.0.0/22 +103.230.28.0/22 +103.230.40.0/21 +103.230.96.0/22 +103.230.196.0/22 +103.230.200.0/21 +103.230.212.0/22 +103.230.236.0/22 +103.231.16.0/21 +103.231.64.0/21 +103.231.144.0/22 +103.231.180.0/22 +103.231.244.0/22 +103.232.4.0/22 +103.232.17.168/29 +103.232.144.0/22 +103.233.4.0/22 +103.233.44.0/22 +103.233.52.0/22 +103.233.104.0/22 +103.233.128.0/22 +103.233.136.0/22 +103.233.228.0/22 +103.234.0.0/22 +103.234.20.0/22 +103.234.56.0/22 +103.234.124.0/22 +103.234.128.0/22 +103.234.172.0/22 +103.234.180.0/22 +103.235.56.0/21 +103.235.80.0/22 +103.235.85.0/24 +103.235.87.0/24 +103.235.128.0/20 +103.235.144.0/21 +103.235.184.0/22 +103.235.192.0/22 +103.235.200.0/22 +103.235.220.0/22 +103.235.224.0/19 +103.236.0.0/18 +103.236.64.0/19 +103.236.96.0/22 +103.236.120.0/22 +103.236.184.0/22 +103.236.240.0/20 +103.237.0.0/20 +103.237.24.0/21 +103.237.68.0/22 +103.237.88.0/22 +103.237.152.0/22 +103.237.176.0/20 +103.237.192.0/18 +103.238.0.0/21 +103.238.18.0/23 +103.238.20.0/22 +103.238.24.0/21 +103.238.32.0/20 +103.238.48.0/21 +103.238.56.0/22 +103.238.88.0/21 +103.238.96.0/22 +103.238.132.0/22 +103.238.140.0/22 +103.238.144.0/22 +103.238.160.0/22 +103.238.165.0/24 +103.238.166.0/23 +103.238.168.0/21 +103.238.176.0/20 +103.238.196.0/22 +103.238.204.0/22 +103.238.252.0/22 +103.239.0.0/22 +103.239.44.0/22 +103.239.68.0/22 +103.239.152.0/21 +103.239.180.0/22 +103.239.184.0/22 +103.239.192.0/21 +103.239.204.0/22 +103.239.208.0/22 +103.239.224.0/22 +103.239.244.0/22 +103.240.16.0/22 +103.240.36.0/22 +103.240.72.0/22 +103.240.84.0/22 +103.240.124.0/22 +103.240.172.0/22 +103.240.188.0/22 +103.240.244.0/22 +103.241.12.0/22 +103.241.92.0/22 +103.241.96.0/22 +103.241.160.0/22 +103.241.184.0/21 +103.241.220.0/22 +103.242.64.0/23 +103.242.128.0/23 +103.242.160.0/22 +103.242.168.0/21 +103.242.176.0/22 +103.242.200.0/22 +103.242.212.0/22 +103.242.220.0/22 +103.242.240.0/22 +103.243.136.0/22 +103.243.252.0/22 +103.244.16.0/22 +103.244.58.0/23 +103.244.60.0/22 +103.244.64.0/20 +103.244.80.0/21 +103.244.116.0/23 +103.244.118.0/24 +103.244.164.0/22 +103.244.232.0/22 +103.244.252.0/22 +103.245.23.0/24 +103.245.52.0/22 +103.245.60.0/22 +103.245.80.0/22 +103.245.124.0/22 +103.245.128.0/22 +103.246.8.0/21 +103.246.120.0/21 +103.246.132.0/22 +103.246.152.0/22 +103.247.168.0/21 +103.247.176.0/22 +103.247.200.0/22 +103.247.212.0/22 +103.248.64.0/23 +103.248.100.0/22 +103.248.124.0/22 +103.248.152.0/22 +103.248.168.0/22 +103.248.192.0/22 +103.248.212.0/22 +103.248.224.0/21 +103.249.8.0/21 +103.249.52.0/22 +103.249.128.0/22 +103.249.136.0/22 +103.249.144.0/22 +103.249.164.0/22 +103.249.168.0/21 +103.249.176.0/22 +103.249.188.0/22 +103.249.192.0/22 +103.249.244.0/22 +103.249.252.0/22 +103.250.32.0/22 +103.250.104.0/22 +103.250.124.0/22 +103.250.180.0/22 +103.250.192.0/22 +103.250.216.0/22 +103.250.224.0/22 +103.250.236.0/22 +103.250.248.0/21 +103.251.32.0/22 +103.251.84.0/22 +103.251.96.0/22 +103.251.124.0/22 +103.251.160.0/22 +103.251.192.0/22 +103.251.204.0/22 +103.251.240.0/22 +103.252.28.0/22 +103.252.36.0/22 +103.252.64.0/22 +103.252.96.0/22 +103.252.104.0/22 +103.252.172.0/22 +103.252.204.0/22 +103.252.208.0/22 +103.252.232.0/22 +103.252.248.0/22 +103.253.4.0/22 +103.253.60.0/22 +103.253.204.0/22 +103.253.220.0/22 +103.253.224.0/22 +103.253.232.0/22 +103.254.8.0/22 +103.254.20.0/22 +103.254.64.0/21 +103.254.76.0/22 +103.254.112.0/22 +103.254.176.0/22 +103.254.188.0/22 +103.255.68.0/22 +103.255.88.0/21 +103.255.136.0/21 +103.255.184.0/22 +103.255.200.0/22 +103.255.208.0/22 +103.255.228.0/22 +106.0.0.0/24 +106.0.2.0/23 +106.0.4.0/22 +106.0.8.0/21 +106.0.16.0/20 +106.0.44.0/22 +106.0.64.0/18 +106.2.0.0/17 +106.2.128.0/20 +106.2.144.0/21 +106.2.152.0/22 +106.2.156.0/23 +106.2.160.0/19 +106.2.192.0/18 +106.3.16.0/20 +106.3.32.0/19 +106.3.64.0/20 +106.3.80.0/22 +106.3.88.0/21 +106.3.96.0/19 +106.3.128.0/19 +106.3.164.0/22 +106.3.168.0/21 +106.3.176.0/20 +106.3.192.0/18 +106.4.0.0/14 +106.8.0.0/15 +106.11.0.0/16 +106.12.0.0/14 +106.16.0.0/12 +106.32.0.0/12 +106.48.0.0/21 +106.48.8.0/22 +106.48.16.0/20 +106.48.32.0/20 +106.48.57.0/24 +106.48.60.0/24 +106.48.63.0/24 +106.48.64.0/18 +106.48.128.0/17 +106.49.1.0/24 +106.49.2.0/23 +106.49.4.0/22 +106.49.8.0/21 +106.49.16.0/20 +106.49.32.0/19 +106.49.64.0/19 +106.49.96.0/24 +106.49.98.0/23 +106.49.100.0/22 +106.49.104.0/21 +106.49.112.0/20 +106.49.128.0/17 +106.50.0.0/16 +106.52.0.0/14 +106.56.0.0/13 +106.74.0.0/16 +106.75.0.0/17 +106.75.128.0/18 +106.75.201.0/24 +106.75.204.0/22 +106.75.208.0/20 +106.75.224.0/19 +106.80.0.0/12 +106.108.0.0/14 +106.112.0.0/12 +106.224.0.0/12 +107.176.0.0/15 +109.71.4.0/24 +109.244.0.0/16 +110.6.0.0/15 +110.16.0.0/14 +110.34.40.0/21 +110.40.0.0/15 +110.42.0.0/16 +110.43.0.0/18 +110.43.64.0/21 +110.43.72.0/22 +110.43.76.0/23 +110.43.80.0/20 +110.43.96.0/19 +110.43.128.0/17 +110.44.12.0/22 +110.44.144.0/20 +110.48.0.0/16 +110.51.0.0/16 +110.52.0.0/15 +110.56.0.0/13 +110.64.0.0/15 +110.72.0.0/15 +110.75.0.0/16 +110.76.0.0/20 +110.76.16.0/22 +110.76.20.0/24 +110.76.22.0/24 +110.76.24.0/21 +110.76.32.0/19 +110.76.132.0/22 +110.76.156.0/22 +110.76.184.0/22 +110.76.192.0/18 +110.77.0.0/17 +110.80.0.0/13 +110.88.0.0/14 +110.92.68.0/22 +110.93.32.0/19 +110.94.0.0/15 +110.96.0.0/11 +110.152.0.0/14 +110.156.0.0/15 +110.166.0.0/15 +110.172.192.0/18 +110.173.0.0/19 +110.173.32.0/20 +110.173.64.0/19 +110.173.192.0/19 +110.176.0.0/12 +110.192.0.0/11 +110.228.0.0/14 +110.232.32.0/19 +110.236.0.0/15 +110.240.0.0/12 +111.0.0.0/10 +111.66.0.0/16 +111.67.192.0/20 +111.68.64.0/19 +111.72.0.0/13 +111.85.0.0/16 +111.91.192.0/19 +111.92.248.0/21 +111.112.0.0/14 +111.116.0.0/15 +111.118.200.0/21 +111.119.64.0/18 +111.119.128.0/19 +111.120.0.0/14 +111.124.0.0/16 +111.126.0.0/15 +111.128.0.0/11 +111.160.0.0/13 +111.170.0.0/16 +111.172.0.0/14 +111.176.0.0/13 +111.186.0.0/15 +111.192.0.0/12 +111.208.0.0/13 +111.221.28.0/24 +111.221.128.0/17 +111.222.0.0/16 +111.223.4.0/22 +111.223.8.0/21 +111.223.16.0/22 +111.223.240.0/22 +111.223.249.0/24 +111.223.250.0/23 +111.224.0.0/13 +111.235.96.0/19 +111.235.156.0/22 +111.235.160.0/21 +111.235.170.0/23 +111.235.172.0/22 +111.235.176.0/20 +112.0.0.0/10 +112.64.0.0/14 +112.73.64.0/18 +112.74.0.0/16 +112.80.0.0/12 +112.96.0.0/13 +112.109.128.0/17 +112.111.0.0/16 +112.112.0.0/14 +112.116.0.0/15 +112.122.0.0/15 +112.124.0.0/14 +112.128.0.0/14 +112.132.0.0/16 +112.137.48.0/21 +112.192.0.0/14 +112.224.0.0/11 +113.0.0.0/13 +113.8.0.0/15 +113.11.192.0/19 +113.12.0.0/14 +113.16.0.0/15 +113.18.0.0/16 +113.21.232.0/21 +113.24.0.0/14 +113.31.0.0/16 +113.44.0.0/14 +113.48.0.0/14 +113.52.160.0/19 +113.52.228.0/22 +113.54.0.0/15 +113.56.0.0/15 +113.58.0.0/16 +113.59.0.0/17 +113.59.224.0/22 +113.62.0.0/15 +113.64.0.0/10 +113.128.0.0/15 +113.130.96.0/20 +113.130.112.0/21 +113.132.0.0/14 +113.136.0.0/13 +113.194.0.0/15 +113.197.100.0/23 +113.197.102.0/24 +113.197.104.0/22 +113.200.0.0/15 +113.202.0.0/16 +113.204.0.0/14 +113.208.96.0/19 +113.208.128.0/17 +113.209.0.0/16 +113.212.0.0/18 +113.212.100.0/22 +113.212.184.0/21 +113.213.0.0/17 +113.214.0.0/15 +113.218.0.0/15 +113.220.0.0/14 +113.224.0.0/12 +113.240.0.0/13 +113.248.0.0/14 +114.28.0.0/17 +114.28.128.0/18 +114.28.194.0/23 +114.28.196.0/22 +114.28.200.0/21 +114.28.208.0/20 +114.28.232.0/22 +114.28.240.0/20 +114.31.64.0/21 +114.54.0.0/15 +114.60.0.0/14 +114.64.0.0/14 +114.68.0.0/16 +114.79.64.0/18 +114.80.0.0/12 +114.96.0.0/13 +114.104.0.0/14 +114.110.0.0/20 +114.110.64.0/18 +114.111.0.0/19 +114.111.160.0/19 +114.112.4.0/22 +114.112.8.0/22 +114.112.24.0/21 +114.112.32.0/19 +114.112.64.0/19 +114.112.96.0/20 +114.112.116.0/22 +114.112.120.0/21 +114.112.136.0/21 +114.112.144.0/20 +114.112.160.0/19 +114.112.192.0/19 +114.113.0.0/17 +114.113.128.0/21 +114.113.140.0/22 +114.113.144.0/20 +114.113.160.0/19 +114.113.196.0/22 +114.113.200.0/21 +114.113.208.0/20 +114.113.224.0/20 +114.114.0.0/15 +114.116.0.0/15 +114.118.0.0/16 +114.119.0.0/17 +114.119.192.0/18 +114.132.0.0/16 +114.135.0.0/16 +114.138.0.0/15 +114.141.64.0/21 +114.141.80.0/21 +114.141.128.0/18 +114.196.0.0/15 +114.198.248.0/21 +114.208.0.0/12 +114.224.0.0/11 +115.24.0.0/14 +115.28.0.0/15 +115.31.64.0/20 +115.32.0.0/14 +115.42.56.0/22 +115.44.0.0/14 +115.48.0.0/12 +115.69.64.0/20 +115.84.0.0/18 +115.84.192.0/19 +115.85.192.0/18 +115.100.0.0/14 +115.104.0.0/14 +115.120.0.0/14 +115.124.16.0/20 +115.148.0.0/14 +115.152.0.0/13 +115.166.64.0/19 +115.168.0.0/16 +115.169.0.0/23 +115.169.3.0/24 +115.169.6.0/24 +115.169.9.0/24 +115.169.14.0/23 +115.169.16.0/20 +115.169.39.0/24 +115.169.42.0/23 +115.169.44.0/22 +115.169.48.0/20 +115.169.64.0/18 +115.169.128.0/17 +115.170.0.0/15 +115.172.0.0/14 +115.180.0.0/14 +115.187.0.0/20 +115.190.0.0/15 +115.192.0.0/11 +115.224.0.0/12 +116.0.8.0/21 +116.0.24.0/21 +116.1.0.0/16 +116.2.0.0/15 +116.4.0.0/14 +116.8.0.0/14 +116.13.0.0/16 +116.16.0.0/12 +116.50.0.0/20 +116.52.0.0/14 +116.56.0.0/15 +116.58.128.0/20 +116.58.208.0/20 +116.60.0.0/14 +116.66.0.0/18 +116.66.64.0/19 +116.66.96.0/20 +116.66.120.0/22 +116.68.136.0/21 +116.68.176.0/21 +116.69.0.0/16 +116.70.0.0/17 +116.76.0.0/14 +116.85.0.0/17 +116.85.128.0/18 +116.85.192.0/19 +116.85.224.0/20 +116.85.240.0/21 +116.85.248.0/23 +116.85.250.0/24 +116.85.252.0/22 +116.89.144.0/20 +116.90.80.0/20 +116.90.184.0/21 +116.95.0.0/16 +116.112.0.0/14 +116.116.0.0/15 +116.128.0.0/10 +116.192.0.0/16 +116.193.16.0/20 +116.193.32.0/19 +116.193.176.0/21 +116.194.0.0/15 +116.196.0.0/21 +116.196.8.0/22 +116.196.12.0/23 +116.196.16.0/20 +116.196.32.0/19 +116.196.64.0/18 +116.196.128.0/18 +116.196.192.0/21 +116.196.200.0/23 +116.196.203.0/24 +116.196.204.0/22 +116.196.208.0/20 +116.196.224.0/19 +116.197.160.0/21 +116.197.180.0/23 +116.198.0.0/16 +116.199.0.0/17 +116.199.128.0/19 +116.204.0.0/17 +116.204.232.0/22 +116.205.0.0/16 +116.207.0.0/16 +116.208.0.0/14 +116.212.160.0/20 +116.213.64.0/18 +116.213.128.0/17 +116.214.32.0/19 +116.214.64.0/20 +116.214.128.0/17 +116.215.0.0/16 +116.216.0.0/14 +116.224.0.0/12 +116.242.0.0/15 +116.244.0.0/14 +116.248.0.0/15 +116.252.0.0/15 +116.254.104.0/21 +116.254.129.0/24 +116.254.130.0/23 +116.254.132.0/22 +116.254.136.0/21 +116.254.144.0/20 +116.254.160.0/19 +116.254.192.0/18 +116.255.128.0/17 +117.8.0.0/13 +117.21.0.0/16 +117.22.0.0/15 +117.24.0.0/13 +117.32.0.0/13 +117.40.0.0/14 +117.44.0.0/15 +117.48.0.0/15 +117.50.0.0/16 +117.51.128.0/23 +117.51.131.0/24 +117.51.132.0/22 +117.51.136.0/21 +117.51.144.0/20 +117.51.160.0/19 +117.51.192.0/18 +117.53.48.0/20 +117.53.176.0/20 +117.57.0.0/16 +117.58.0.0/18 +117.59.0.0/16 +117.60.0.0/14 +117.64.0.0/13 +117.72.0.0/15 +117.74.64.0/19 +117.74.128.0/17 +117.75.0.0/16 +117.76.0.0/14 +117.80.0.0/12 +117.100.0.0/15 +117.103.16.0/20 +117.103.40.0/21 +117.103.72.0/21 +117.103.128.0/20 +117.104.168.0/21 +117.106.0.0/15 +117.112.0.0/13 +117.120.64.0/18 +117.120.128.0/17 +117.121.0.0/17 +117.121.128.0/20 +117.121.148.0/22 +117.121.152.0/21 +117.121.160.0/19 +117.121.192.0/21 +117.122.128.0/17 +117.124.0.0/14 +117.128.0.0/10 +118.24.0.0/15 +118.26.0.0/19 +118.26.40.0/21 +118.26.48.0/20 +118.26.64.0/19 +118.26.112.0/21 +118.26.121.0/24 +118.26.122.0/23 +118.26.124.0/23 +118.26.128.0/17 +118.28.0.0/15 +118.30.0.0/20 +118.30.16.0/21 +118.30.24.0/22 +118.30.32.0/19 +118.30.64.0/18 +118.30.128.0/17 +118.31.0.0/16 +118.64.0.0/15 +118.66.0.0/16 +118.67.112.0/20 +118.72.0.0/13 +118.80.0.0/15 +118.84.0.0/15 +118.88.32.0/19 +118.88.64.0/18 +118.88.128.0/17 +118.89.0.0/16 +118.102.16.0/20 +118.102.32.0/21 +118.103.164.0/22 +118.103.168.0/21 +118.103.176.0/22 +118.103.245.0/24 +118.103.246.0/23 +118.112.0.0/13 +118.120.0.0/14 +118.124.0.0/15 +118.126.1.0/24 +118.126.2.0/23 +118.126.4.0/22 +118.126.8.0/21 +118.126.16.0/23 +118.126.18.0/24 +118.126.32.0/19 +118.126.64.0/18 +118.126.128.0/17 +118.127.128.0/19 +118.132.0.0/14 +118.144.0.0/14 +118.178.0.0/16 +118.180.0.0/14 +118.184.5.0/24 +118.184.128.0/18 +118.184.192.0/19 +118.184.240.0/20 +118.186.0.0/15 +118.188.0.0/16 +118.190.0.0/16 +118.191.0.0/20 +118.191.32.0/19 +118.191.64.0/18 +118.191.144.0/21 +118.191.153.0/24 +118.191.154.0/23 +118.191.156.0/22 +118.191.160.0/19 +118.191.192.0/20 +118.191.209.0/24 +118.191.210.0/23 +118.191.212.0/22 +118.191.248.0/21 +118.192.0.0/16 +118.193.0.0/22 +118.193.96.0/19 +118.194.0.0/17 +118.194.128.0/18 +118.194.192.0/19 +118.194.240.0/21 +118.195.0.0/16 +118.196.0.0/14 +118.202.0.0/15 +118.204.0.0/14 +118.212.0.0/15 +118.215.192.0/18 +118.224.0.0/14 +118.228.0.0/17 +118.228.128.0/20 +118.228.144.0/21 +118.228.156.0/22 +118.228.160.0/19 +118.228.192.0/18 +118.229.0.0/16 +118.230.0.0/16 +118.239.0.0/16 +118.242.0.0/16 +118.244.0.0/14 +118.248.0.0/13 +119.0.0.0/15 +119.2.0.0/19 +119.2.128.0/17 +119.3.0.0/16 +119.4.0.0/14 +119.10.0.0/17 +119.15.136.0/21 +119.16.0.0/16 +119.18.192.0/20 +119.18.208.0/21 +119.18.224.0/19 +119.19.0.0/16 +119.20.0.0/14 +119.27.64.0/18 +119.27.128.0/17 +119.28.28.0/24 +119.29.0.0/16 +119.30.48.0/20 +119.31.192.0/19 +119.32.0.0/14 +119.36.0.0/15 +119.38.0.0/17 +119.38.128.0/18 +119.38.192.0/20 +119.38.208.0/22 +119.38.212.0/23 +119.38.214.0/27 +119.38.214.56/29 +119.38.214.64/26 +119.38.214.128/25 +119.38.215.0/24 +119.38.216.0/21 +119.39.0.0/16 +119.40.0.0/18 +119.40.64.0/20 +119.40.128.0/17 +119.41.0.0/16 +119.42.0.0/19 +119.42.128.0/20 +119.42.224.0/19 +119.44.0.0/15 +119.48.0.0/13 +119.57.0.0/16 +119.58.0.0/16 +119.59.128.0/17 +119.60.0.0/15 +119.62.0.0/16 +119.63.32.0/19 +119.75.208.0/20 +119.78.0.0/15 +119.80.0.0/16 +119.82.208.0/20 +119.84.0.0/14 +119.88.0.0/16 +119.89.0.0/17 +119.89.128.0/21 +119.89.136.0/23 +119.89.139.0/24 +119.89.140.0/22 +119.89.144.0/20 +119.89.160.0/20 +119.89.176.0/22 +119.89.180.0/23 +119.89.183.0/24 +119.89.184.0/21 +119.89.192.0/23 +119.89.194.0/24 +119.89.196.0/22 +119.89.200.0/21 +119.89.208.0/21 +119.89.217.0/24 +119.89.218.0/23 +119.89.220.0/22 +119.89.224.0/19 +119.90.0.0/15 +119.96.0.0/13 +119.108.0.0/15 +119.112.0.0/12 +119.128.0.0/12 +119.144.0.0/14 +119.148.160.0/19 +119.151.192.0/18 +119.160.200.0/21 +119.161.120.0/21 +119.161.128.0/21 +119.161.160.0/19 +119.161.192.0/18 +119.162.0.0/15 +119.164.0.0/14 +119.176.0.0/12 +119.232.0.0/15 +119.235.128.0/19 +119.235.160.0/20 +119.235.184.0/22 +119.248.0.0/14 +119.252.96.0/21 +119.252.240.0/21 +119.252.249.0/24 +119.252.252.0/23 +119.253.0.0/16 +119.254.0.0/15 +120.0.0.0/12 +120.24.0.0/14 +120.30.0.0/15 +120.32.0.0/12 +120.48.0.0/15 +120.52.0.0/16 +120.53.0.0/19 +120.53.32.0/20 +120.53.48.0/22 +120.53.54.0/23 +120.53.56.0/21 +120.53.64.0/18 +120.53.128.0/17 +120.54.0.0/15 +120.64.0.0/13 +120.72.32.0/19 +120.72.128.0/17 +120.76.0.0/14 +120.80.0.0/13 +120.88.8.0/21 +120.90.0.0/15 +120.92.0.0/17 +120.92.128.0/18 +120.92.192.0/22 +120.92.198.0/23 +120.92.200.0/21 +120.92.208.0/20 +120.92.224.0/19 +120.94.0.0/15 +120.128.0.0/13 +120.136.16.0/21 +120.136.128.0/18 +120.137.0.0/17 +120.143.128.0/19 +120.192.0.0/10 +121.0.8.0/21 +121.0.16.0/20 +121.4.0.0/22 +121.4.8.0/21 +121.4.16.0/20 +121.4.32.0/19 +121.4.64.0/18 +121.4.128.0/17 +121.5.0.0/16 +121.8.0.0/13 +121.16.0.0/12 +121.32.0.0/13 +121.40.0.0/14 +121.46.0.0/18 +121.46.76.0/22 +121.46.128.0/17 +121.47.0.0/16 +121.48.0.0/15 +121.50.8.0/21 +121.51.0.0/16 +121.52.160.0/19 +121.52.208.0/20 +121.52.224.0/19 +121.54.176.0/21 +121.55.0.0/18 +121.56.0.0/15 +121.58.0.0/17 +121.58.136.0/21 +121.58.144.0/20 +121.58.160.0/21 +121.59.0.0/20 +121.59.16.0/21 +121.59.24.0/22 +121.59.28.0/24 +121.59.31.0/24 +121.59.33.0/24 +121.59.36.0/22 +121.59.40.0/21 +121.59.48.0/20 +121.59.64.0/18 +121.59.128.0/20 +121.59.144.0/21 +121.59.152.0/24 +121.59.154.0/23 +121.59.156.0/22 +121.59.160.0/19 +121.59.192.0/18 +121.60.0.0/14 +121.68.0.0/14 +121.76.0.0/15 +121.79.128.0/18 +121.89.0.0/16 +121.100.128.0/17 +121.101.0.0/18 +121.101.208.0/20 +121.192.0.0/13 +121.200.192.0/23 +121.200.194.0/24 +121.200.196.0/22 +121.201.0.0/16 +121.204.0.0/14 +121.224.0.0/12 +121.248.0.0/14 +121.255.0.0/16 +122.0.64.0/18 +122.0.128.0/17 +122.4.0.0/14 +122.9.0.0/16 +122.10.132.0/23 +122.10.136.0/23 +122.10.216.0/22 +122.10.228.0/22 +122.10.232.0/21 +122.10.240.0/22 +122.11.0.0/17 +122.12.0.0/15 +122.14.0.0/17 +122.14.192.0/18 +122.48.0.0/16 +122.49.0.0/18 +122.51.0.0/16 +122.64.0.0/14 +122.68.0.0/15 +122.70.0.0/18 +122.70.64.0/19 +122.70.96.0/20 +122.70.112.0/21 +122.70.120.0/22 +122.70.124.0/23 +122.70.126.0/24 +122.70.128.0/17 +122.71.0.0/16 +122.72.0.0/13 +122.80.0.0/12 +122.96.0.0/15 +122.98.144.0/20 +122.98.160.0/21 +122.98.172.0/22 +122.98.176.0/20 +122.98.192.0/21 +122.98.232.0/21 +122.98.240.0/20 +122.102.0.0/20 +122.102.64.0/19 +122.112.0.0/18 +122.112.64.0/19 +122.112.96.0/22 +122.112.118.0/24 +122.112.122.0/24 +122.112.125.0/24 +122.112.128.0/17 +122.113.0.0/16 +122.114.0.0/16 +122.115.0.0/18 +122.115.80.0/20 +122.115.96.0/19 +122.115.128.0/17 +122.119.0.0/16 +122.128.100.0/22 +122.128.120.0/21 +122.136.0.0/13 +122.144.128.0/17 +122.152.192.0/18 +122.156.0.0/14 +122.188.0.0/14 +122.192.0.0/14 +122.198.0.0/16 +122.200.40.0/21 +122.200.64.0/18 +122.201.48.0/20 +122.204.0.0/14 +122.224.0.0/12 +122.240.0.0/13 +122.248.24.0/21 +122.248.48.0/20 +122.255.64.0/21 +123.0.128.0/21 +123.0.136.0/23 +123.0.139.0/24 +123.0.140.0/22 +123.0.144.0/20 +123.0.160.0/19 +123.4.0.0/14 +123.8.0.0/13 +123.49.130.0/23 +123.49.132.0/22 +123.49.136.0/22 +123.49.152.0/21 +123.49.160.0/19 +123.49.192.0/18 +123.50.160.0/19 +123.52.0.0/14 +123.56.0.0/15 +123.58.0.0/18 +123.58.64.0/20 +123.58.80.0/21 +123.58.88.0/22 +123.58.96.0/19 +123.58.128.0/18 +123.58.224.0/19 +123.59.0.0/16 +123.60.0.0/15 +123.62.0.0/16 +123.64.0.0/11 +123.96.0.0/15 +123.98.0.0/17 +123.99.128.0/17 +123.100.0.0/19 +123.100.232.0/24 +123.101.0.0/16 +123.103.0.0/20 +123.103.16.0/21 +123.103.24.0/22 +123.103.28.0/23 +123.103.30.0/24 +123.103.32.0/19 +123.103.64.0/18 +123.108.134.0/24 +123.108.138.0/23 +123.108.140.0/24 +123.108.142.0/24 +123.108.208.0/20 +123.112.0.0/12 +123.128.0.0/13 +123.137.0.0/16 +123.138.0.0/15 +123.144.0.0/12 +123.160.0.0/12 +123.176.60.0/22 +123.176.80.0/20 +123.177.0.0/16 +123.178.0.0/15 +123.180.0.0/14 +123.184.0.0/13 +123.196.0.0/15 +123.199.128.0/17 +123.206.0.0/15 +123.232.0.0/14 +123.242.0.0/17 +123.242.192.0/21 +123.244.0.0/14 +123.249.0.0/16 +123.253.240.0/22 +123.254.96.0/21 +124.6.64.0/18 +124.14.0.0/15 +124.16.0.0/15 +124.20.0.0/14 +124.28.192.0/18 +124.29.0.0/17 +124.31.0.0/16 +124.40.112.0/20 +124.40.128.0/18 +124.40.192.0/19 +124.40.240.0/22 +124.42.0.0/16 +124.47.0.0/18 +124.64.0.0/15 +124.66.0.0/17 +124.67.0.0/16 +124.68.0.0/19 +124.68.32.0/20 +124.68.48.0/21 +124.68.56.0/22 +124.68.60.0/23 +124.68.63.0/24 +124.68.64.0/18 +124.68.128.0/18 +124.68.192.0/19 +124.68.224.0/23 +124.68.226.0/24 +124.68.228.0/22 +124.68.232.0/21 +124.68.240.0/23 +124.68.242.0/24 +124.68.244.0/23 +124.68.254.0/24 +124.69.0.0/16 +124.70.0.0/15 +124.72.0.0/13 +124.88.0.0/13 +124.108.8.0/21 +124.108.40.0/21 +124.109.96.0/21 +124.112.0.0/14 +124.116.0.0/15 +124.118.0.0/16 +124.119.0.0/17 +124.119.128.0/18 +124.119.192.0/19 +124.119.224.0/20 +124.119.240.0/22 +124.119.244.0/23 +124.119.246.0/25 +124.119.246.128/26 +124.119.246.192/27 +124.119.246.224/28 +124.119.246.240/29 +124.119.246.248/30 +124.119.246.254/31 +124.119.247.0/24 +124.119.248.0/21 +124.126.0.0/15 +124.128.0.0/13 +124.147.128.0/17 +124.150.137.0/24 +124.151.0.0/16 +124.152.0.0/16 +124.160.0.0/13 +124.172.0.0/16 +124.173.32.0/19 +124.173.64.0/18 +124.173.128.0/17 +124.174.0.0/15 +124.192.0.0/15 +124.196.0.0/16 +124.200.0.0/13 +124.220.0.0/14 +124.224.0.0/12 +124.240.0.0/17 +124.240.128.0/18 +124.242.0.0/16 +124.243.192.0/18 +124.248.0.0/17 +124.249.0.0/16 +124.250.0.0/15 +124.254.0.0/18 +125.31.192.0/18 +125.32.0.0/12 +125.58.128.0/17 +125.61.128.0/17 +125.62.0.0/18 +125.64.0.0/11 +125.96.0.0/15 +125.98.0.0/16 +125.104.0.0/13 +125.112.0.0/12 +125.169.0.0/16 +125.171.0.0/16 +125.208.0.0/19 +125.208.37.0/24 +125.208.40.0/24 +125.208.45.0/24 +125.208.46.0/23 +125.208.48.0/20 +125.210.0.0/15 +125.213.0.0/17 +125.214.96.0/19 +125.215.0.0/18 +125.216.0.0/13 +125.254.128.0/17 +128.108.0.0/16 +129.28.0.0/16 +129.204.0.0/16 +129.211.0.0/16 +129.223.254.0/24 +130.36.146.0/23 +130.214.218.0/23 +131.228.96.0/24 +131.253.12.0/29 +131.253.12.80/28 +131.253.12.240/29 +132.232.0.0/16 +132.237.134.0/24 +134.175.0.0/16 +135.84.254.0/23 +135.159.208.0/20 +135.244.80.0/20 +137.59.59.0/24 +137.59.88.0/22 +138.32.244.0/24 +139.5.56.0/22 +139.5.61.0/24 +139.5.62.0/23 +139.5.80.0/22 +139.5.92.0/22 +139.5.128.0/22 +139.5.160.0/22 +139.5.192.0/22 +139.5.204.0/22 +139.5.244.0/22 +139.9.0.0/16 +139.129.0.0/16 +139.138.238.0/28 +139.148.0.0/16 +139.155.0.0/16 +139.159.0.0/19 +139.159.32.0/21 +139.159.40.0/22 +139.159.52.0/22 +139.159.56.0/21 +139.159.64.0/19 +139.159.96.0/20 +139.159.112.0/24 +139.159.113.24/29 +139.159.113.32/27 +139.159.113.64/26 +139.159.113.128/25 +139.159.114.0/23 +139.159.116.0/23 +139.159.120.0/21 +139.159.128.0/17 +139.170.0.0/16 +139.176.0.0/16 +139.183.0.0/16 +139.186.0.0/16 +139.189.0.0/16 +139.196.0.0/15 +139.198.0.0/18 +139.198.66.0/23 +139.198.68.0/22 +139.198.72.0/21 +139.198.80.0/20 +139.198.96.0/20 +139.198.116.0/22 +139.198.122.0/23 +139.198.124.0/22 +139.198.128.0/17 +139.199.0.0/16 +139.200.0.0/13 +139.208.0.0/13 +139.217.0.0/16 +139.219.0.0/16 +139.220.0.0/17 +139.220.128.0/18 +139.220.192.0/22 +139.220.196.0/23 +139.220.200.0/21 +139.220.208.0/23 +139.220.212.0/22 +139.220.216.0/21 +139.220.224.0/19 +139.221.0.0/16 +139.224.0.0/16 +139.226.0.0/15 +140.75.0.0/16 +140.101.208.0/24 +140.143.0.0/16 +140.179.0.0/16 +140.205.0.0/16 +140.206.0.0/15 +140.210.0.0/16 +140.224.0.0/16 +140.237.0.0/16 +140.240.0.0/16 +140.242.223.0/24 +140.242.224.0/24 +140.243.0.0/16 +140.246.0.0/16 +140.249.0.0/16 +140.250.0.0/16 +140.255.0.0/16 +142.70.0.0/16 +142.86.0.0/16 +144.0.0.0/16 +144.7.0.0/16 +144.12.0.0/16 +144.36.146.0/23 +144.48.64.0/22 +144.48.88.0/22 +144.48.156.0/22 +144.48.180.0/22 +144.48.184.0/22 +144.48.204.0/22 +144.48.208.0/21 +144.52.0.0/16 +144.123.0.0/16 +144.211.80.0/24 +144.211.138.0/24 +144.255.0.0/16 +146.56.192.0/18 +146.88.175.0/24 +146.196.56.0/22 +146.196.68.0/22 +146.196.92.0/22 +146.196.112.0/21 +146.196.124.0/22 +146.217.137.0/24 +146.222.79.0/24 +146.222.81.0/24 +146.222.94.0/24 +147.243.13.32/27 +147.243.13.64/27 +147.243.14.32/27 +148.70.0.0/16 +150.0.0.0/16 +150.115.0.0/16 +150.121.0.0/16 +150.122.0.0/16 +150.129.136.0/22 +150.129.192.0/22 +150.129.252.0/22 +150.138.0.0/15 +150.158.0.0/16 +150.222.88.0/23 +150.223.0.0/16 +150.242.0.0/21 +150.242.8.0/22 +150.242.28.0/22 +150.242.44.0/22 +150.242.48.0/21 +150.242.56.0/22 +150.242.76.0/22 +150.242.80.0/22 +150.242.92.0/22 +150.242.96.0/22 +150.242.112.0/21 +150.242.120.0/22 +150.242.152.0/22 +150.242.158.0/24 +150.242.160.0/21 +150.242.168.0/22 +150.242.184.0/21 +150.242.192.0/22 +150.242.226.0/23 +150.242.232.0/21 +150.242.240.0/21 +150.242.248.0/22 +150.248.0.0/16 +150.255.0.0/16 +152.32.178.0/23 +152.104.128.0/17 +152.136.0.0/16 +153.0.0.0/16 +153.3.0.0/16 +153.34.0.0/15 +153.36.0.0/15 +153.99.0.0/16 +153.101.0.0/16 +153.118.0.0/15 +154.8.128.0/17 +154.209.251.0/24 +155.126.176.0/23 +156.107.160.0/24 +156.107.170.0/24 +156.107.179.0/24 +156.107.181.0/24 +156.154.62.0/23 +157.0.0.0/16 +157.18.0.0/16 +157.61.0.0/16 +157.119.8.0/21 +157.119.16.0/22 +157.119.28.0/22 +157.119.132.0/22 +157.119.136.0/21 +157.119.144.0/20 +157.119.160.0/21 +157.119.172.0/22 +157.119.192.0/21 +157.119.240.0/22 +157.119.252.0/22 +157.122.0.0/16 +157.133.186.0/23 +157.133.192.0/21 +157.133.212.0/24 +157.133.236.0/24 +157.148.0.0/16 +157.156.0.0/16 +157.255.0.0/16 +158.79.0.0/24 +158.79.2.0/23 +158.79.4.0/22 +158.79.8.0/21 +158.79.16.0/20 +158.79.32.0/19 +158.79.64.0/18 +158.79.128.0/17 +159.75.0.0/16 +159.221.232.0/22 +159.226.0.0/16 +160.19.208.0/21 +160.19.216.0/22 +160.20.48.0/22 +160.62.10.0/24 +160.83.109.0/24 +160.83.110.0/23 +160.202.60.0/23 +160.202.62.0/24 +160.202.148.0/22 +160.202.152.0/22 +160.202.212.0/22 +160.202.216.0/21 +160.202.224.0/19 +160.238.64.0/22 +161.120.0.0/16 +161.163.0.0/21 +161.163.28.0/23 +161.189.0.0/16 +161.207.0.0/16 +162.14.0.0/21 +162.14.12.0/22 +162.14.16.0/21 +162.14.24.0/23 +162.14.26.0/24 +162.14.27.0/25 +162.14.27.128/29 +162.14.27.136/32 +162.14.27.138/31 +162.14.27.140/30 +162.14.27.144/28 +162.14.27.160/29 +162.14.27.168/30 +162.14.27.173/32 +162.14.27.174/31 +162.14.27.176/29 +162.14.27.184/30 +162.14.27.188/31 +162.14.27.191/32 +162.14.27.192/26 +162.14.28.0/22 +162.14.32.0/19 +162.14.64.0/18 +162.14.128.0/17 +162.105.0.0/16 +163.0.0.0/16 +163.47.4.0/22 +163.53.0.0/20 +163.53.36.0/22 +163.53.40.0/22 +163.53.48.0/20 +163.53.64.0/22 +163.53.88.0/21 +163.53.96.0/19 +163.53.128.0/21 +163.53.136.0/22 +163.53.160.0/20 +163.53.188.0/22 +163.53.220.0/22 +163.53.240.0/22 +163.116.202.0/23 +163.125.0.0/16 +163.142.0.0/16 +163.177.0.0/16 +163.179.0.0/16 +163.204.0.0/16 +163.244.246.0/24 +164.52.80.0/24 +165.84.197.0/24 +165.84.251.0/25 +165.84.251.128/26 +165.84.251.192/27 +165.84.251.224/28 +165.84.251.240/29 +165.84.251.248/30 +165.84.251.253/32 +165.84.251.254/31 +165.154.10.0/23 +165.154.14.0/23 +165.154.16.0/20 +165.154.32.0/19 +165.154.64.0/18 +165.154.128.0/17 +165.156.30.0/24 +165.254.82.27/32 +166.111.0.0/16 +167.139.0.0/16 +167.189.0.0/16 +167.220.244.0/22 +168.159.144.0/21 +168.159.152.0/22 +168.159.156.0/23 +168.159.158.0/24 +168.160.0.0/16 +168.230.0.0/24 +170.179.0.0/16 +170.225.224.0/23 +170.252.152.0/21 +171.8.0.0/13 +171.34.0.0/15 +171.36.0.0/14 +171.40.0.0/13 +171.80.0.0/12 +171.104.0.0/13 +171.112.0.0/12 +171.208.0.0/12 +172.81.192.0/18 +173.39.200.0/23 +175.0.0.0/12 +175.16.0.0/13 +175.24.0.0/14 +175.30.0.0/15 +175.42.0.0/15 +175.44.0.0/16 +175.46.0.0/15 +175.48.0.0/12 +175.64.0.0/11 +175.102.0.0/16 +175.106.128.0/17 +175.111.144.0/20 +175.111.160.0/20 +175.111.184.0/22 +175.146.0.0/15 +175.148.0.0/14 +175.152.0.0/14 +175.158.96.0/22 +175.160.0.0/12 +175.176.156.0/22 +175.176.188.0/22 +175.178.0.0/16 +175.184.128.0/18 +175.185.0.0/16 +175.186.0.0/15 +175.188.0.0/14 +180.76.16.0/20 +180.76.32.0/19 +180.76.64.0/18 +180.76.128.0/17 +180.77.0.0/16 +180.78.0.0/15 +180.84.0.0/15 +180.86.0.0/16 +180.87.93.98/32 +180.88.0.0/14 +180.92.176.0/23 +180.94.56.0/21 +180.94.96.0/23 +180.94.98.0/24 +180.94.100.0/22 +180.94.104.0/21 +180.94.120.0/21 +180.95.128.0/17 +180.96.0.0/11 +180.129.128.0/17 +180.130.0.0/16 +180.136.0.0/13 +180.148.16.0/21 +180.148.152.0/21 +180.148.216.0/21 +180.148.224.0/19 +180.149.128.0/19 +180.150.160.0/21 +180.150.176.0/20 +180.152.0.0/13 +180.160.0.0/12 +180.178.112.0/21 +180.178.192.0/18 +180.184.0.0/14 +180.188.0.0/17 +180.189.148.0/22 +180.200.252.0/22 +180.201.0.0/16 +180.202.0.0/15 +180.208.0.0/15 +180.210.212.0/22 +180.210.233.0/24 +180.210.236.0/22 +180.212.0.0/15 +180.222.224.0/19 +180.223.0.0/18 +180.223.80.0/20 +180.223.96.0/19 +180.233.0.0/18 +180.233.64.0/19 +180.233.144.0/22 +180.235.64.0/19 +180.235.112.0/22 +182.16.144.0/21 +182.16.192.0/19 +182.18.0.0/17 +182.23.184.0/21 +182.23.200.0/21 +182.32.0.0/12 +182.48.96.0/19 +182.49.0.0/16 +182.50.0.0/22 +182.50.8.0/21 +182.50.112.0/20 +182.51.0.0/16 +182.54.0.0/17 +182.61.0.0/18 +182.61.128.0/19 +182.61.192.0/18 +182.80.0.0/13 +182.88.0.0/14 +182.92.0.0/16 +182.96.0.0/11 +182.128.0.0/12 +182.144.0.0/13 +182.157.0.0/16 +182.160.52.0/22 +182.160.56.0/22 +182.160.60.0/23 +182.160.62.0/24 +182.160.64.0/19 +182.174.0.0/15 +182.200.0.0/13 +182.236.128.0/17 +182.237.24.0/21 +182.238.0.0/16 +182.239.0.0/19 +182.240.0.0/13 +182.254.0.0/18 +182.254.64.0/19 +182.254.96.0/20 +182.254.112.0/22 +182.254.117.0/24 +182.254.119.0/24 +182.254.120.0/21 +182.254.128.0/17 +183.0.0.0/10 +183.64.0.0/13 +183.78.160.0/21 +183.78.180.0/22 +183.81.180.0/22 +183.84.0.0/15 +183.91.128.0/22 +183.91.136.0/21 +183.91.144.0/20 +183.92.0.0/14 +183.128.0.0/11 +183.160.0.0/13 +183.168.0.0/15 +183.170.0.0/16 +183.172.0.0/14 +183.184.0.0/13 +183.192.0.0/10 +185.109.236.0/24 +185.216.118.0/26 +185.216.118.64/27 +185.216.118.112/28 +185.216.118.128/25 +188.131.128.0/17 +192.11.23.0/24 +192.11.26.0/24 +192.11.39.0/24 +192.11.236.0/24 +192.23.191.0/24 +192.55.10.0/23 +192.55.40.0/24 +192.55.46.0/24 +192.55.68.0/22 +192.102.204.0/22 +192.124.154.0/24 +192.137.31.0/24 +192.140.128.0/21 +192.140.136.0/22 +192.140.156.0/22 +192.140.160.0/19 +192.140.192.0/20 +192.140.208.0/21 +192.144.128.0/17 +192.163.11.0/24 +192.232.97.0/24 +193.17.120.0/22 +193.20.64.0/22 +193.112.0.0/16 +193.200.222.160/28 +194.138.136.0/24 +194.138.202.0/23 +194.138.245.0/24 +195.142.215.0/24 +198.175.100.0/22 +198.208.17.0/24 +198.208.19.0/24 +199.7.72.0/24 +199.65.192.0/21 +199.244.144.0/24 +202.0.100.0/23 +202.0.122.0/23 +202.1.105.0/24 +202.1.106.0/24 +202.3.128.0/23 +202.4.128.0/19 +202.4.252.0/22 +202.5.208.0/21 +202.5.216.0/22 +202.6.6.0/23 +202.6.66.0/23 +202.6.72.0/23 +202.6.87.0/24 +202.6.88.0/23 +202.6.92.0/23 +202.6.103.0/24 +202.6.108.0/24 +202.6.110.0/23 +202.6.114.0/24 +202.6.176.0/20 +202.8.0.0/24 +202.8.2.0/23 +202.8.4.0/23 +202.8.12.0/24 +202.8.24.0/24 +202.8.77.0/24 +202.8.128.0/19 +202.8.192.0/20 +202.9.32.0/24 +202.9.34.0/23 +202.9.48.0/23 +202.9.51.0/24 +202.9.52.0/23 +202.9.54.0/24 +202.9.57.0/24 +202.9.58.0/23 +202.10.64.0/21 +202.10.74.0/23 +202.10.76.0/22 +202.10.112.0/20 +202.12.1.0/24 +202.12.2.0/24 +202.12.17.0/24 +202.12.18.0/23 +202.12.72.0/24 +202.12.84.0/23 +202.12.96.0/24 +202.12.98.0/23 +202.12.106.0/24 +202.12.111.0/24 +202.12.116.0/24 +202.14.64.0/23 +202.14.69.0/24 +202.14.73.0/24 +202.14.74.0/23 +202.14.76.0/24 +202.14.78.0/23 +202.14.88.0/24 +202.14.97.0/24 +202.14.104.0/23 +202.14.108.0/23 +202.14.111.0/24 +202.14.114.0/23 +202.14.118.0/23 +202.14.124.0/23 +202.14.127.0/24 +202.14.129.0/24 +202.14.135.0/24 +202.14.136.0/24 +202.14.149.0/24 +202.14.151.0/24 +202.14.157.0/24 +202.14.158.0/23 +202.14.169.0/24 +202.14.170.0/23 +202.14.172.0/22 +202.14.176.0/24 +202.14.184.0/23 +202.14.208.0/23 +202.14.213.0/24 +202.14.219.0/24 +202.14.220.0/24 +202.14.222.0/23 +202.14.225.0/24 +202.14.226.0/23 +202.14.231.0/24 +202.14.235.0/24 +202.14.236.0/22 +202.14.246.0/24 +202.14.251.0/24 +202.20.66.0/24 +202.20.79.0/24 +202.20.87.0/24 +202.20.88.0/23 +202.20.90.0/24 +202.20.94.0/23 +202.20.114.0/24 +202.20.117.0/24 +202.20.120.0/24 +202.20.125.0/24 +202.20.126.0/23 +202.21.48.0/20 +202.21.131.0/24 +202.21.132.0/24 +202.21.141.0/24 +202.21.142.0/24 +202.21.147.0/24 +202.21.148.0/24 +202.21.150.0/23 +202.21.152.0/23 +202.21.154.0/24 +202.21.156.0/24 +202.21.208.0/24 +202.22.248.0/21 +202.27.12.0/24 +202.27.14.0/24 +202.27.136.0/23 +202.36.226.0/24 +202.38.0.0/22 +202.38.8.0/21 +202.38.48.0/20 +202.38.64.0/18 +202.38.128.0/21 +202.38.136.0/23 +202.38.138.0/24 +202.38.140.0/22 +202.38.146.0/23 +202.38.149.0/24 +202.38.150.0/23 +202.38.152.0/22 +202.38.156.0/24 +202.38.158.0/23 +202.38.160.0/23 +202.38.164.0/22 +202.38.168.0/22 +202.38.176.0/23 +202.38.184.0/21 +202.38.192.0/18 +202.40.4.0/23 +202.40.7.0/24 +202.40.15.0/24 +202.40.135.0/24 +202.40.136.0/24 +202.40.140.0/24 +202.40.143.0/24 +202.40.144.0/23 +202.40.150.0/24 +202.40.155.0/24 +202.40.156.0/24 +202.40.158.0/23 +202.40.162.0/24 +202.41.8.0/23 +202.41.11.0/24 +202.41.12.0/23 +202.41.128.0/24 +202.41.130.0/23 +202.41.142.0/24 +202.41.152.0/21 +202.41.192.0/24 +202.41.196.0/22 +202.41.200.0/22 +202.41.240.0/20 +202.43.76.0/22 +202.43.144.0/20 +202.44.16.0/20 +202.44.48.0/22 +202.44.67.0/24 +202.44.74.0/24 +202.44.97.0/24 +202.44.129.0/24 +202.44.132.0/23 +202.44.146.0/23 +202.45.0.0/23 +202.45.2.0/24 +202.45.15.0/24 +202.45.16.0/20 +202.46.16.0/23 +202.46.18.0/24 +202.46.20.0/23 +202.46.128.0/24 +202.46.224.0/20 +202.47.82.0/23 +202.47.96.0/20 +202.47.126.0/24 +202.47.128.0/24 +202.47.130.0/23 +202.52.34.0/24 +202.52.143.0/24 +202.53.140.0/24 +202.53.143.0/24 +202.53.202.0/24 +202.57.212.0/22 +202.57.216.0/22 +202.57.240.0/20 +202.58.0.0/24 +202.58.112.0/22 +202.59.0.0/23 +202.59.212.0/22 +202.59.236.0/24 +202.59.240.0/24 +202.60.48.0/21 +202.60.96.0/21 +202.60.112.0/20 +202.60.132.0/22 +202.60.136.0/21 +202.60.144.0/20 +202.61.68.0/22 +202.61.76.0/22 +202.61.88.0/22 +202.61.123.0/24 +202.61.127.0/24 +202.62.112.0/22 +202.62.248.0/22 +202.62.252.0/24 +202.62.255.0/24 +202.63.80.0/20 +202.63.160.0/19 +202.63.248.0/22 +202.63.253.0/24 +202.65.0.0/21 +202.65.8.0/23 +202.67.0.0/22 +202.69.4.0/23 +202.69.16.0/20 +202.70.0.0/19 +202.70.96.0/20 +202.70.192.0/20 +202.71.32.0/20 +202.72.40.0/21 +202.72.80.0/20 +202.72.112.0/20 +202.73.128.0/22 +202.73.240.0/20 +202.74.8.0/21 +202.74.36.0/24 +202.74.42.0/24 +202.74.52.0/24 +202.74.80.0/20 +202.74.254.0/23 +202.75.208.0/20 +202.75.252.0/22 +202.76.247.0/24 +202.76.252.0/22 +202.77.80.0/21 +202.77.92.0/22 +202.78.8.0/21 +202.79.224.0/21 +202.79.248.0/22 +202.80.192.0/20 +202.81.0.0/22 +202.81.176.0/20 +202.83.252.0/22 +202.84.4.0/22 +202.84.8.0/21 +202.84.16.0/23 +202.84.22.0/24 +202.84.24.0/21 +202.85.208.0/20 +202.86.249.0/24 +202.87.80.0/20 +202.88.32.0/22 +202.89.8.0/21 +202.89.96.0/22 +202.89.108.0/22 +202.89.119.0/24 +202.89.232.0/21 +202.90.0.0/22 +202.90.16.0/20 +202.90.37.0/24 +202.90.96.0/19 +202.90.193.0/24 +202.90.196.0/24 +202.90.205.0/24 +202.90.224.0/20 +202.91.0.0/22 +202.91.96.0/20 +202.91.128.0/22 +202.91.176.0/20 +202.91.224.0/19 +202.92.0.0/22 +202.92.8.0/21 +202.92.48.0/20 +202.92.252.0/22 +202.93.0.0/22 +202.93.252.0/22 +202.94.0.0/19 +202.94.74.0/24 +202.94.81.0/24 +202.94.92.0/22 +202.95.240.0/21 +202.95.252.0/22 +202.96.0.0/12 +202.112.0.0/13 +202.120.0.0/15 +202.122.0.0/21 +202.122.32.0/21 +202.122.64.0/19 +202.122.112.0/20 +202.122.128.0/24 +202.122.132.0/24 +202.123.96.0/20 +202.123.116.0/22 +202.123.120.0/22 +202.124.16.0/21 +202.124.24.0/22 +202.125.107.0/24 +202.125.109.0/24 +202.125.112.0/20 +202.125.176.0/20 +202.127.0.0/21 +202.127.12.0/22 +202.127.16.0/20 +202.127.40.0/21 +202.127.48.0/20 +202.127.112.0/20 +202.127.128.0/19 +202.127.160.0/21 +202.127.192.0/20 +202.127.208.0/23 +202.127.212.0/22 +202.127.216.0/21 +202.127.224.0/19 +202.129.208.0/24 +202.130.0.0/19 +202.130.39.0/24 +202.130.224.0/19 +202.131.16.0/21 +202.131.59.0/24 +202.131.208.0/20 +202.133.32.0/20 +202.134.58.0/24 +202.134.128.0/20 +202.134.208.0/20 +202.136.48.0/20 +202.136.208.0/20 +202.136.224.0/20 +202.136.248.0/22 +202.136.254.0/23 +202.137.231.0/24 +202.140.140.0/22 +202.140.144.0/20 +202.141.160.0/19 +202.142.16.0/20 +202.143.4.0/22 +202.143.16.0/20 +202.143.32.0/20 +202.143.56.0/21 +202.143.100.0/22 +202.143.104.0/22 +202.146.160.0/20 +202.146.186.0/24 +202.146.188.0/22 +202.146.196.0/22 +202.146.200.0/21 +202.147.12.0/26 +202.147.12.64/27 +202.147.12.96/28 +202.147.12.128/29 +202.147.12.136/30 +202.147.12.140/31 +202.147.12.142/32 +202.147.12.144/28 +202.147.12.160/27 +202.147.12.192/26 +202.147.144.0/20 +202.148.32.0/20 +202.148.64.0/18 +202.149.32.0/19 +202.149.160.0/19 +202.149.224.0/19 +202.150.16.0/20 +202.150.32.0/20 +202.150.56.0/22 +202.150.192.0/20 +202.150.224.0/19 +202.151.0.0/22 +202.151.128.0/19 +202.152.176.0/20 +202.153.0.0/22 +202.153.7.0/24 +202.153.48.0/20 +202.157.192.0/19 +202.158.160.0/19 +202.158.242.0/24 +202.160.140.0/22 +202.160.156.0/22 +202.160.176.0/20 +202.162.67.0/24 +202.162.75.0/24 +202.164.0.0/20 +202.164.96.0/19 +202.165.176.0/20 +202.165.208.0/20 +202.165.239.0/24 +202.165.240.0/23 +202.165.243.0/24 +202.165.245.0/24 +202.165.251.0/24 +202.165.252.0/22 +202.166.224.0/19 +202.168.80.0/22 +202.168.128.0/20 +202.168.160.0/19 +202.170.128.0/19 +202.170.216.0/21 +202.170.224.0/19 +202.171.216.0/21 +202.171.232.0/24 +202.171.235.0/24 +202.172.0.0/22 +202.172.7.0/24 +202.173.0.0/22 +202.173.6.0/24 +202.173.8.0/21 +202.173.112.0/22 +202.173.224.0/19 +202.174.64.0/20 +202.174.124.0/22 +202.176.224.0/19 +202.179.160.0/20 +202.179.240.0/20 +202.180.128.0/19 +202.180.208.0/21 +202.181.8.0/22 +202.181.28.0/22 +202.181.112.0/20 +202.182.32.0/20 +202.182.192.0/19 +202.189.0.0/18 +202.189.80.0/20 +202.189.184.0/21 +202.191.0.0/24 +202.191.68.0/22 +202.191.72.0/21 +202.191.80.0/20 +202.192.0.0/12 +203.0.4.0/22 +203.0.10.0/23 +203.0.18.0/24 +203.0.24.0/24 +203.0.42.0/23 +203.0.45.0/24 +203.0.46.0/23 +203.0.81.0/24 +203.0.82.0/23 +203.0.90.0/23 +203.0.96.0/23 +203.0.104.0/21 +203.0.114.0/23 +203.0.122.0/24 +203.0.128.0/24 +203.0.130.0/23 +203.0.132.0/22 +203.0.137.0/24 +203.0.142.0/24 +203.0.144.0/24 +203.0.146.0/24 +203.0.148.0/24 +203.0.150.0/23 +203.0.152.0/24 +203.0.177.0/24 +203.0.224.0/24 +203.1.4.0/22 +203.1.18.0/24 +203.1.26.0/23 +203.1.65.0/24 +203.1.66.0/23 +203.1.70.0/23 +203.1.76.0/23 +203.1.90.0/24 +203.1.97.0/24 +203.1.98.0/23 +203.1.100.0/22 +203.1.108.0/24 +203.1.253.0/24 +203.1.254.0/24 +203.2.64.0/21 +203.2.73.0/24 +203.2.112.0/21 +203.2.126.0/23 +203.2.140.0/24 +203.2.150.0/24 +203.2.152.0/22 +203.2.156.0/23 +203.2.160.0/21 +203.2.180.0/23 +203.2.196.0/23 +203.2.209.0/24 +203.2.214.0/23 +203.2.226.0/23 +203.2.229.0/24 +203.2.236.0/23 +203.3.68.0/24 +203.3.72.0/23 +203.3.75.0/24 +203.3.80.0/21 +203.3.96.0/22 +203.3.105.0/24 +203.3.112.0/21 +203.3.120.0/24 +203.3.123.0/24 +203.3.135.0/24 +203.3.139.0/24 +203.3.143.0/24 +203.4.132.0/23 +203.4.134.0/24 +203.4.151.0/24 +203.4.152.0/22 +203.4.174.0/23 +203.4.180.0/24 +203.4.186.0/24 +203.4.205.0/24 +203.4.208.0/22 +203.4.227.0/24 +203.4.230.0/23 +203.5.4.0/23 +203.5.7.0/24 +203.5.8.0/23 +203.5.11.0/24 +203.5.21.0/24 +203.5.22.0/24 +203.5.44.0/24 +203.5.46.0/23 +203.5.52.0/22 +203.5.56.0/23 +203.5.60.0/23 +203.5.114.0/23 +203.5.118.0/24 +203.5.120.0/24 +203.5.172.0/24 +203.5.180.0/23 +203.5.182.0/24 +203.5.185.0/24 +203.5.186.0/24 +203.5.188.0/23 +203.5.190.0/24 +203.5.195.0/24 +203.5.214.0/23 +203.5.218.0/23 +203.6.131.0/24 +203.6.136.0/24 +203.6.138.0/23 +203.6.142.0/24 +203.6.150.0/23 +203.6.157.0/24 +203.6.159.0/24 +203.6.224.0/20 +203.6.248.0/23 +203.7.129.0/24 +203.7.138.0/23 +203.7.147.0/24 +203.7.150.0/23 +203.7.158.0/24 +203.7.192.0/23 +203.7.200.0/24 +203.8.0.0/24 +203.8.8.0/24 +203.8.23.0/24 +203.8.70.0/24 +203.8.82.0/24 +203.8.86.0/23 +203.8.91.0/24 +203.8.110.0/23 +203.8.115.0/24 +203.8.166.0/23 +203.8.169.0/24 +203.8.173.0/24 +203.8.184.0/24 +203.8.186.0/23 +203.8.190.0/23 +203.8.192.0/24 +203.8.197.0/24 +203.8.198.0/23 +203.8.203.0/24 +203.8.209.0/24 +203.8.210.0/23 +203.8.212.0/22 +203.8.217.0/24 +203.8.220.0/24 +203.9.32.0/24 +203.9.36.0/23 +203.9.57.0/24 +203.9.63.0/24 +203.9.65.0/24 +203.9.70.0/23 +203.9.72.0/24 +203.9.75.0/24 +203.9.76.0/23 +203.9.96.0/22 +203.9.100.0/23 +203.9.108.0/24 +203.9.158.0/24 +203.10.34.0/24 +203.10.56.0/24 +203.10.74.0/23 +203.10.84.0/22 +203.10.88.0/24 +203.10.95.0/24 +203.10.125.0/24 +203.11.70.0/24 +203.11.76.0/22 +203.11.82.0/24 +203.11.84.0/22 +203.11.100.0/22 +203.11.109.0/24 +203.11.117.0/24 +203.11.122.0/24 +203.11.126.0/24 +203.11.136.0/22 +203.11.141.0/24 +203.11.142.0/23 +203.11.180.0/22 +203.11.208.0/22 +203.12.16.0/24 +203.12.19.0/24 +203.12.24.0/24 +203.12.57.0/24 +203.12.65.0/24 +203.12.66.0/24 +203.12.70.0/23 +203.12.87.0/24 +203.12.100.0/23 +203.12.103.0/24 +203.12.114.0/24 +203.12.118.0/24 +203.12.130.0/24 +203.12.137.0/24 +203.12.196.0/22 +203.12.211.0/24 +203.12.219.0/24 +203.12.226.0/24 +203.12.240.0/22 +203.13.18.0/24 +203.13.24.0/24 +203.13.44.0/23 +203.13.88.0/23 +203.13.92.0/22 +203.13.173.0/24 +203.13.224.0/23 +203.13.227.0/24 +203.13.233.0/24 +203.14.24.0/22 +203.14.33.0/24 +203.14.56.0/24 +203.14.61.0/24 +203.14.62.0/24 +203.14.104.0/24 +203.14.114.0/23 +203.14.118.0/24 +203.14.162.0/24 +203.14.184.0/21 +203.14.192.0/24 +203.14.194.0/23 +203.14.214.0/24 +203.14.231.0/24 +203.14.246.0/24 +203.15.0.0/20 +203.15.20.0/23 +203.15.22.0/24 +203.15.87.0/24 +203.15.88.0/23 +203.15.105.0/24 +203.15.112.0/21 +203.15.130.0/23 +203.15.149.0/24 +203.15.151.0/24 +203.15.156.0/22 +203.15.174.0/24 +203.15.227.0/24 +203.15.232.0/22 +203.15.238.0/23 +203.15.240.0/23 +203.15.246.0/24 +203.16.10.0/24 +203.16.12.0/23 +203.16.16.0/21 +203.16.27.0/24 +203.16.38.0/24 +203.16.49.0/24 +203.16.50.0/23 +203.16.58.0/24 +203.16.63.0/24 +203.16.133.0/24 +203.16.161.0/24 +203.16.162.0/24 +203.16.186.0/23 +203.16.228.0/24 +203.16.238.0/24 +203.16.240.0/24 +203.16.245.0/24 +203.17.2.0/24 +203.17.18.0/24 +203.17.28.0/24 +203.17.39.0/24 +203.17.56.0/24 +203.17.74.0/23 +203.17.88.0/23 +203.17.136.0/24 +203.17.164.0/24 +203.17.187.0/24 +203.17.190.0/23 +203.17.231.0/24 +203.17.233.0/24 +203.17.248.0/23 +203.17.255.0/24 +203.18.2.0/23 +203.18.4.0/24 +203.18.7.0/24 +203.18.31.0/24 +203.18.37.0/24 +203.18.48.0/23 +203.18.52.0/24 +203.18.72.0/22 +203.18.80.0/23 +203.18.87.0/24 +203.18.100.0/23 +203.18.105.0/24 +203.18.107.0/24 +203.18.110.0/24 +203.18.129.0/24 +203.18.131.0/24 +203.18.132.0/23 +203.18.144.0/24 +203.18.153.0/24 +203.18.199.0/24 +203.18.208.0/24 +203.18.211.0/24 +203.18.215.0/24 +203.19.1.0/24 +203.19.18.0/24 +203.19.24.0/24 +203.19.30.0/24 +203.19.41.0/24 +203.19.44.0/23 +203.19.46.0/24 +203.19.58.0/24 +203.19.60.0/23 +203.19.64.0/24 +203.19.68.0/24 +203.19.72.0/24 +203.19.101.0/24 +203.19.111.0/24 +203.19.131.0/24 +203.19.133.0/24 +203.19.144.0/24 +203.19.147.0/24 +203.19.149.0/24 +203.19.156.0/24 +203.19.176.0/24 +203.19.178.0/23 +203.19.208.0/24 +203.19.228.0/22 +203.19.233.0/24 +203.19.242.0/24 +203.19.248.0/23 +203.19.255.0/24 +203.20.17.0/24 +203.20.40.0/23 +203.20.44.0/24 +203.20.48.0/24 +203.20.61.0/24 +203.20.65.0/24 +203.20.84.0/23 +203.20.89.0/24 +203.20.106.0/23 +203.20.115.0/24 +203.20.117.0/24 +203.20.118.0/23 +203.20.122.0/24 +203.20.126.0/23 +203.20.135.0/24 +203.20.140.0/22 +203.20.150.0/24 +203.20.230.0/24 +203.20.232.0/24 +203.20.236.0/24 +203.21.0.0/23 +203.21.2.0/24 +203.21.8.0/24 +203.21.10.0/24 +203.21.18.0/24 +203.21.33.0/24 +203.21.34.0/24 +203.21.41.0/24 +203.21.44.0/24 +203.21.68.0/24 +203.21.82.0/24 +203.21.96.0/22 +203.21.124.0/24 +203.21.136.0/23 +203.21.145.0/24 +203.21.206.0/24 +203.22.24.0/24 +203.22.28.0/23 +203.22.31.0/24 +203.22.68.0/24 +203.22.76.0/24 +203.22.84.0/24 +203.22.87.0/24 +203.22.92.0/22 +203.22.99.0/24 +203.22.106.0/24 +203.22.122.0/23 +203.22.131.0/24 +203.22.163.0/24 +203.22.166.0/24 +203.22.170.0/24 +203.22.182.0/30 +203.22.182.6/31 +203.22.182.8/29 +203.22.182.18/31 +203.22.182.20/30 +203.22.182.24/29 +203.22.182.32/27 +203.22.182.64/26 +203.22.182.128/25 +203.22.194.0/24 +203.22.242.0/23 +203.22.245.0/24 +203.22.246.0/24 +203.22.252.0/23 +203.23.0.0/24 +203.23.47.0/24 +203.23.61.0/24 +203.23.62.0/23 +203.23.73.0/24 +203.23.85.0/24 +203.23.92.0/22 +203.23.98.0/24 +203.23.107.0/24 +203.23.112.0/24 +203.23.130.0/24 +203.23.140.0/23 +203.23.172.0/24 +203.23.182.0/24 +203.23.186.0/23 +203.23.192.0/24 +203.23.197.0/24 +203.23.198.0/24 +203.23.204.0/22 +203.23.224.0/24 +203.23.226.0/23 +203.23.228.0/22 +203.23.249.0/24 +203.23.251.0/24 +203.24.13.0/24 +203.24.18.0/24 +203.24.27.0/24 +203.24.43.0/24 +203.24.56.0/24 +203.24.58.0/24 +203.24.67.0/24 +203.24.74.0/24 +203.24.79.0/24 +203.24.80.0/23 +203.24.84.0/23 +203.24.86.0/24 +203.24.90.0/24 +203.24.111.0/24 +203.24.112.0/24 +203.24.116.0/24 +203.24.122.0/23 +203.24.145.0/24 +203.24.152.0/23 +203.24.157.0/24 +203.24.161.0/24 +203.24.167.0/24 +203.24.186.0/23 +203.24.199.0/24 +203.24.202.0/24 +203.24.212.0/23 +203.24.217.0/24 +203.24.219.0/24 +203.24.244.0/24 +203.25.19.0/24 +203.25.20.0/23 +203.25.46.0/24 +203.25.64.0/23 +203.25.91.0/24 +203.25.99.0/24 +203.25.100.0/24 +203.25.106.0/24 +203.25.131.0/24 +203.25.135.0/24 +203.25.138.0/24 +203.25.147.0/24 +203.25.153.0/24 +203.25.154.0/23 +203.25.164.0/24 +203.25.166.0/24 +203.25.174.0/23 +203.25.180.0/24 +203.25.182.0/24 +203.25.191.0/24 +203.25.199.0/24 +203.25.200.0/24 +203.25.202.0/23 +203.25.208.0/20 +203.25.229.0/24 +203.25.235.0/24 +203.25.236.0/24 +203.25.242.0/24 +203.26.12.0/24 +203.26.34.0/24 +203.26.49.0/24 +203.26.50.0/24 +203.26.55.0/24 +203.26.56.0/23 +203.26.60.0/24 +203.26.65.0/24 +203.26.68.0/24 +203.26.76.0/24 +203.26.80.0/24 +203.26.84.0/24 +203.26.97.0/24 +203.26.102.0/23 +203.26.115.0/24 +203.26.116.0/24 +203.26.129.0/24 +203.26.143.0/24 +203.26.144.0/24 +203.26.148.0/23 +203.26.154.0/24 +203.26.158.0/23 +203.26.170.0/24 +203.26.173.0/24 +203.26.176.0/24 +203.26.185.0/24 +203.26.202.0/23 +203.26.210.0/24 +203.26.214.0/24 +203.26.222.0/24 +203.26.224.0/24 +203.26.228.0/24 +203.26.232.0/24 +203.27.0.0/24 +203.27.10.0/24 +203.27.15.0/24 +203.27.16.0/24 +203.27.20.0/24 +203.27.22.0/23 +203.27.40.0/24 +203.27.45.0/24 +203.27.53.0/24 +203.27.65.0/24 +203.27.66.0/24 +203.27.81.0/24 +203.27.88.0/24 +203.27.102.0/24 +203.27.109.0/24 +203.27.117.0/24 +203.27.121.0/24 +203.27.122.0/23 +203.27.125.0/24 +203.27.200.0/24 +203.27.202.0/24 +203.27.233.0/24 +203.27.241.0/24 +203.27.250.0/24 +203.28.10.0/24 +203.28.12.0/24 +203.28.33.0/24 +203.28.34.0/23 +203.28.43.0/24 +203.28.44.0/24 +203.28.54.0/24 +203.28.56.0/24 +203.28.73.0/24 +203.28.74.0/24 +203.28.76.0/24 +203.28.86.0/24 +203.28.88.0/24 +203.28.112.0/24 +203.28.131.0/24 +203.28.136.0/24 +203.28.140.0/24 +203.28.145.0/24 +203.28.165.0/24 +203.28.169.0/24 +203.28.170.0/24 +203.28.178.0/23 +203.28.185.0/24 +203.28.187.0/24 +203.28.196.0/24 +203.28.226.0/23 +203.28.239.0/24 +203.29.2.0/24 +203.29.8.0/23 +203.29.13.0/24 +203.29.14.0/24 +203.29.28.0/24 +203.29.46.0/24 +203.29.57.0/24 +203.29.61.0/24 +203.29.63.0/24 +203.29.69.0/24 +203.29.73.0/24 +203.29.81.0/24 +203.29.90.0/24 +203.29.95.0/24 +203.29.100.0/24 +203.29.103.0/24 +203.29.112.0/24 +203.29.120.0/22 +203.29.182.0/23 +203.29.187.0/24 +203.29.189.0/24 +203.29.190.0/24 +203.29.205.0/24 +203.29.210.0/24 +203.29.217.0/24 +203.29.227.0/24 +203.29.231.0/24 +203.29.233.0/24 +203.29.234.0/24 +203.29.248.0/24 +203.29.254.0/23 +203.30.16.0/23 +203.30.25.0/24 +203.30.27.0/24 +203.30.29.0/24 +203.30.66.0/24 +203.30.81.0/24 +203.30.87.0/24 +203.30.111.0/24 +203.30.121.0/24 +203.30.123.0/24 +203.30.152.0/24 +203.30.156.0/24 +203.30.162.0/24 +203.30.173.0/24 +203.30.175.0/24 +203.30.187.0/24 +203.30.194.0/24 +203.30.217.0/24 +203.30.220.0/24 +203.30.222.0/24 +203.30.232.0/23 +203.30.235.0/24 +203.30.240.0/23 +203.30.246.0/24 +203.30.250.0/23 +203.31.45.0/24 +203.31.46.0/24 +203.31.49.0/24 +203.31.51.0/24 +203.31.54.0/23 +203.31.69.0/24 +203.31.72.0/24 +203.31.80.0/24 +203.31.85.0/24 +203.31.97.0/24 +203.31.105.0/24 +203.31.106.0/24 +203.31.108.0/23 +203.31.124.0/24 +203.31.162.0/24 +203.31.174.0/24 +203.31.177.0/24 +203.31.181.0/24 +203.31.187.0/24 +203.31.189.0/24 +203.31.204.0/24 +203.31.220.0/24 +203.31.222.0/23 +203.31.225.0/24 +203.31.229.0/24 +203.31.248.0/23 +203.31.253.0/24 +203.32.20.0/24 +203.32.48.0/23 +203.32.56.0/24 +203.32.60.0/24 +203.32.62.0/24 +203.32.68.0/23 +203.32.76.0/24 +203.32.81.0/24 +203.32.84.0/23 +203.32.95.0/24 +203.32.102.0/24 +203.32.105.0/24 +203.32.130.0/24 +203.32.133.0/24 +203.32.140.0/24 +203.32.152.0/24 +203.32.186.0/23 +203.32.192.0/24 +203.32.196.0/24 +203.32.203.0/24 +203.32.204.0/23 +203.32.212.0/24 +203.33.4.0/24 +203.33.7.0/24 +203.33.12.0/23 +203.33.21.0/24 +203.33.26.0/24 +203.33.32.0/24 +203.33.63.0/24 +203.33.64.0/24 +203.33.67.0/24 +203.33.68.0/24 +203.33.73.0/24 +203.33.79.0/24 +203.33.100.0/24 +203.33.122.0/24 +203.33.129.0/24 +203.33.131.0/24 +203.33.145.0/24 +203.33.156.0/24 +203.33.158.0/23 +203.33.174.0/24 +203.33.185.0/24 +203.33.200.0/24 +203.33.202.0/23 +203.33.204.0/24 +203.33.206.0/23 +203.33.214.0/23 +203.33.224.0/23 +203.33.226.0/24 +203.33.233.0/24 +203.33.243.0/24 +203.33.250.0/24 +203.34.4.0/24 +203.34.21.0/24 +203.34.27.0/24 +203.34.39.0/24 +203.34.48.0/23 +203.34.54.0/24 +203.34.56.0/23 +203.34.67.0/24 +203.34.69.0/24 +203.34.76.0/24 +203.34.92.0/24 +203.34.106.0/24 +203.34.113.0/24 +203.34.147.0/24 +203.34.150.0/24 +203.34.152.0/23 +203.34.161.0/24 +203.34.162.0/24 +203.34.187.0/24 +203.34.197.0/24 +203.34.198.0/24 +203.34.204.0/22 +203.34.232.0/24 +203.34.240.0/24 +203.34.242.0/24 +203.34.245.0/24 +203.34.251.0/24 +203.55.2.0/23 +203.55.4.0/24 +203.55.10.0/24 +203.55.13.0/24 +203.55.22.0/24 +203.55.30.0/24 +203.55.93.0/24 +203.55.101.0/24 +203.55.109.0/24 +203.55.110.0/24 +203.55.116.0/23 +203.55.119.0/24 +203.55.128.0/23 +203.55.146.0/23 +203.55.192.0/24 +203.55.196.0/24 +203.55.218.0/23 +203.55.221.0/24 +203.55.224.0/24 +203.56.1.0/24 +203.56.4.0/24 +203.56.12.0/24 +203.56.24.0/24 +203.56.38.0/24 +203.56.40.0/24 +203.56.46.0/24 +203.56.68.0/23 +203.56.82.0/23 +203.56.84.0/23 +203.56.95.0/24 +203.56.110.0/24 +203.56.121.0/24 +203.56.161.0/24 +203.56.169.0/24 +203.56.172.0/23 +203.56.175.0/24 +203.56.183.0/24 +203.56.185.0/24 +203.56.187.0/24 +203.56.192.0/24 +203.56.198.0/24 +203.56.201.0/24 +203.56.208.0/23 +203.56.210.0/24 +203.56.214.0/24 +203.56.216.0/24 +203.56.227.0/24 +203.56.228.0/24 +203.56.232.0/24 +203.56.240.0/24 +203.56.252.0/24 +203.56.254.0/24 +203.57.5.0/24 +203.57.6.0/24 +203.57.12.0/23 +203.57.28.0/24 +203.57.39.0/24 +203.57.46.0/24 +203.57.58.0/24 +203.57.61.0/24 +203.57.66.0/24 +203.57.69.0/24 +203.57.70.0/23 +203.57.73.0/24 +203.57.90.0/24 +203.57.101.0/24 +203.57.109.0/24 +203.57.123.0/24 +203.57.157.0/24 +203.57.200.0/24 +203.57.202.0/24 +203.57.206.0/24 +203.57.222.0/24 +203.57.224.0/20 +203.57.246.0/23 +203.57.249.0/24 +203.57.253.0/24 +203.57.254.0/23 +203.62.2.0/24 +203.62.131.0/24 +203.62.139.0/24 +203.62.161.0/24 +203.62.197.0/24 +203.62.228.0/22 +203.62.234.0/24 +203.62.246.0/24 +203.65.240.0/22 +203.76.160.0/22 +203.76.168.0/22 +203.76.208.0/21 +203.76.216.0/22 +203.76.240.0/22 +203.77.180.0/22 +203.78.48.0/20 +203.78.156.0/22 +203.79.0.0/20 +203.80.4.0/23 +203.80.32.0/20 +203.80.57.0/24 +203.80.129.0/24 +203.80.132.0/22 +203.80.144.0/20 +203.81.16.0/20 +203.81.244.0/22 +203.82.0.0/23 +203.82.112.0/20 +203.82.224.0/20 +203.83.0.0/22 +203.83.12.0/22 +203.83.56.0/21 +203.83.224.0/20 +203.86.0.0/17 +203.86.250.0/24 +203.86.254.0/23 +203.88.32.0/19 +203.88.100.0/22 +203.88.192.0/19 +203.89.0.0/22 +203.89.136.0/22 +203.89.144.0/24 +203.90.0.0/22 +203.90.8.0/21 +203.90.128.0/18 +203.90.192.0/19 +203.91.32.0/19 +203.91.96.0/20 +203.91.120.0/21 +203.92.0.0/22 +203.92.6.0/24 +203.92.160.0/19 +203.93.0.0/16 +203.94.0.0/19 +203.95.0.0/21 +203.95.96.0/19 +203.95.128.0/18 +203.95.200.0/21 +203.95.208.0/22 +203.95.224.0/19 +203.99.16.0/22 +203.99.30.0/23 +203.99.80.0/20 +203.100.32.0/20 +203.100.58.0/24 +203.100.60.0/24 +203.100.63.0/24 +203.100.80.0/20 +203.100.96.0/19 +203.100.192.0/20 +203.104.32.0/20 +203.105.96.0/19 +203.105.128.0/19 +203.107.0.0/19 +203.107.32.0/20 +203.107.52.0/22 +203.107.56.0/21 +203.107.69.0/24 +203.107.70.0/23 +203.107.72.0/21 +203.107.80.0/20 +203.107.96.0/19 +203.110.160.0/19 +203.110.208.0/20 +203.110.232.0/23 +203.110.234.0/24 +203.114.80.0/20 +203.114.244.0/22 +203.118.192.0/19 +203.118.241.0/24 +203.118.248.0/22 +203.119.24.0/22 +203.119.28.0/23 +203.119.30.0/24 +203.119.32.0/24 +203.119.34.0/23 +203.119.80.0/22 +203.119.85.0/24 +203.119.113.0/24 +203.119.114.0/23 +203.119.116.0/22 +203.119.128.0/17 +203.123.58.0/24 +203.128.32.0/19 +203.128.96.0/19 +203.128.128.0/24 +203.130.32.0/20 +203.130.49.0/24 +203.130.51.0/24 +203.130.53.0/24 +203.130.54.0/23 +203.130.56.0/22 +203.130.60.0/23 +203.132.32.0/19 +203.134.240.0/22 +203.134.246.0/23 +203.135.96.0/19 +203.135.160.0/20 +203.142.12.0/23 +203.142.219.0/24 +203.142.224.0/19 +203.144.96.0/19 +203.145.0.0/19 +203.148.0.0/18 +203.148.64.0/20 +203.148.80.0/22 +203.148.86.0/23 +203.149.92.0/22 +203.152.64.0/19 +203.152.128.0/19 +203.153.0.0/22 +203.156.192.0/18 +203.158.16.0/21 +203.160.129.0/24 +203.160.192.0/19 +203.161.0.0/22 +203.161.180.0/24 +203.161.183.0/24 +203.161.192.0/19 +203.166.160.0/19 +203.167.28.0/22 +203.168.0.0/19 +203.170.58.0/23 +203.171.0.0/22 +203.171.208.0/24 +203.171.224.0/20 +203.174.4.0/24 +203.174.6.0/24 +203.174.96.0/20 +203.175.128.0/19 +203.175.192.0/18 +203.176.0.0/18 +203.176.64.0/19 +203.176.168.0/21 +203.184.80.0/20 +203.187.160.0/19 +203.189.0.0/23 +203.189.6.0/23 +203.189.112.0/22 +203.189.192.0/19 +203.189.240.0/22 +203.190.96.0/20 +203.190.249.0/24 +203.191.0.0/23 +203.191.2.0/24 +203.191.5.0/24 +203.191.7.0/24 +203.191.29.0/24 +203.191.31.0/24 +203.191.64.0/18 +203.191.133.0/24 +203.191.144.0/20 +203.192.0.0/19 +203.193.224.0/19 +203.195.64.0/19 +203.195.128.0/17 +203.196.0.0/21 +203.196.28.0/22 +203.201.181.0/24 +203.201.182.0/24 +203.202.236.0/22 +203.205.64.0/19 +203.207.64.0/18 +203.207.128.0/17 +203.208.0.0/20 +203.208.16.0/22 +203.208.32.0/19 +203.209.224.0/19 +203.212.0.0/20 +203.212.80.0/20 +203.217.164.0/22 +203.223.0.0/20 +204.55.160.0/24 +204.74.96.0/24 +204.114.176.0/23 +208.48.251.185/32 +208.48.251.186/32 +210.2.0.0/23 +210.2.2.0/24 +210.2.5.0/24 +210.2.6.0/23 +210.2.8.0/21 +210.2.24.0/21 +210.5.0.0/19 +210.5.60.0/24 +210.5.128.0/19 +210.7.56.0/21 +210.12.0.0/15 +210.14.64.0/19 +210.14.112.0/20 +210.14.128.0/17 +210.15.0.0/17 +210.15.128.0/18 +210.16.128.0/21 +210.16.136.0/22 +210.16.156.0/22 +210.16.160.0/19 +210.21.0.0/16 +210.22.0.0/16 +210.23.32.0/19 +210.25.0.0/17 +210.25.128.0/19 +210.25.160.0/20 +210.25.176.0/21 +210.25.184.0/23 +210.25.186.0/26 +210.25.186.128/25 +210.25.187.0/24 +210.25.188.0/22 +210.25.192.0/18 +210.26.0.0/15 +210.28.0.0/14 +210.32.0.0/12 +210.51.0.0/16 +210.52.0.0/18 +210.52.64.0/23 +210.52.66.0/24 +210.52.69.0/24 +210.52.70.0/23 +210.52.72.0/21 +210.52.80.0/20 +210.52.96.0/21 +210.52.104.0/22 +210.52.108.0/24 +210.52.110.0/23 +210.52.112.0/20 +210.52.128.0/17 +210.53.0.0/16 +210.56.192.0/19 +210.72.0.0/14 +210.76.0.0/15 +210.78.0.0/16 +210.79.64.0/18 +210.79.224.0/19 +210.82.0.0/15 +210.87.128.0/18 +210.185.192.0/18 +210.192.96.0/19 +211.64.0.0/13 +211.80.0.0/12 +211.96.0.0/14 +211.100.0.0/17 +211.100.128.0/19 +211.100.160.0/20 +211.100.184.0/21 +211.100.192.0/18 +211.101.0.0/16 +211.102.0.0/15 +211.136.0.0/13 +211.144.0.0/13 +211.152.0.0/17 +211.152.134.0/23 +211.152.140.0/22 +211.152.150.0/23 +211.152.157.0/24 +211.152.160.0/19 +211.152.192.0/18 +211.153.0.0/16 +211.154.0.0/19 +211.154.32.0/20 +211.154.48.0/21 +211.154.64.0/18 +211.154.128.0/17 +211.155.0.0/18 +211.155.67.0/24 +211.155.68.0/24 +211.155.72.0/21 +211.155.80.0/20 +211.155.97.0/24 +211.155.98.0/23 +211.155.100.0/22 +211.155.104.0/21 +211.155.113.0/24 +211.155.116.0/22 +211.155.120.0/21 +211.155.128.0/17 +211.156.0.0/18 +211.156.64.0/19 +211.156.96.0/21 +211.156.104.0/22 +211.156.108.0/23 +211.156.112.0/20 +211.156.128.0/17 +211.157.0.0/16 +211.158.0.0/15 +211.160.0.0/13 +212.64.0.0/17 +212.129.128.0/17 +213.199.169.0/24 +213.255.230.0/23 +218.0.0.0/12 +218.16.0.0/13 +218.24.0.0/14 +218.28.0.0/15 +218.30.0.0/19 +218.30.64.0/18 +218.30.128.0/18 +218.30.192.0/19 +218.30.224.0/20 +218.30.240.0/21 +218.30.248.0/22 +218.30.252.0/25 +218.30.252.128/26 +218.30.252.194/31 +218.30.252.196/30 +218.30.252.200/29 +218.30.252.208/28 +218.30.252.224/27 +218.30.253.0/24 +218.30.254.0/23 +218.31.0.0/16 +218.56.0.0/13 +218.64.0.0/11 +218.96.0.0/15 +218.98.0.0/18 +218.98.96.0/19 +218.98.128.0/19 +218.98.192.0/18 +218.99.0.0/16 +218.100.96.0/19 +218.100.128.0/17 +218.104.0.0/14 +218.108.0.0/15 +218.185.192.0/19 +218.192.0.0/12 +218.240.0.0/14 +218.244.0.0/15 +218.246.0.0/19 +218.246.32.0/20 +218.246.48.0/21 +218.246.56.0/23 +218.246.58.0/24 +218.246.60.0/22 +218.246.64.0/18 +218.246.129.0/24 +218.246.131.0/24 +218.246.132.0/23 +218.246.134.0/24 +218.246.139.0/24 +218.246.144.0/20 +218.246.160.0/19 +218.246.192.0/18 +218.247.0.0/18 +218.247.96.0/19 +218.247.128.0/17 +218.249.0.0/16 +219.72.0.0/16 +219.82.0.0/16 +219.83.128.0/17 +219.90.68.0/22 +219.90.72.0/21 +219.128.0.0/11 +219.216.0.0/13 +219.224.0.0/13 +219.232.0.0/15 +219.234.0.0/21 +219.234.10.0/23 +219.234.12.0/22 +219.234.32.0/19 +219.234.64.0/18 +219.234.128.0/17 +219.235.0.0/16 +219.236.0.0/14 +219.242.0.0/15 +219.244.0.0/14 +220.101.192.0/18 +220.112.0.0/14 +220.152.128.0/17 +220.154.0.0/16 +220.155.0.0/21 +220.155.9.0/24 +220.155.10.0/23 +220.155.12.0/22 +220.155.16.0/21 +220.155.24.0/22 +220.155.28.0/23 +220.155.31.0/24 +220.155.32.0/19 +220.155.64.0/18 +220.155.128.0/17 +220.158.241.0/24 +220.158.243.0/24 +220.160.0.0/11 +220.192.0.0/12 +220.231.0.0/18 +220.231.128.0/17 +220.232.64.0/18 +220.234.0.0/16 +220.242.0.0/23 +220.242.6.0/24 +220.242.8.0/24 +220.242.12.0/23 +220.242.14.0/24 +220.242.17.0/24 +220.242.18.0/23 +220.242.20.0/24 +220.242.32.0/20 +220.242.48.0/23 +220.242.53.0/24 +220.242.55.0/24 +220.242.56.0/22 +220.242.60.0/23 +220.242.62.0/24 +220.242.64.0/19 +220.242.96.0/20 +220.242.112.0/21 +220.242.120.0/22 +220.242.124.0/23 +220.242.126.0/24 +220.242.134.0/23 +220.242.173.0/24 +220.242.183.0/24 +220.242.197.0/24 +220.242.205.0/24 +220.242.207.0/24 +220.242.217.0/24 +220.242.218.0/23 +220.242.220.0/22 +220.242.224.0/19 +220.243.0.0/17 +220.243.128.0/18 +220.243.192.0/23 +220.243.196.0/24 +220.243.198.0/23 +220.243.201.0/24 +220.243.204.0/24 +220.243.214.0/24 +220.243.216.0/23 +220.243.218.0/24 +220.243.220.0/23 +220.243.223.0/24 +220.243.225.0/24 +220.243.226.0/23 +220.243.229.0/24 +220.243.230.0/24 +220.243.234.0/23 +220.243.237.0/24 +220.243.238.0/23 +220.243.243.0/24 +220.243.244.0/24 +220.243.246.0/24 +220.243.249.0/24 +220.243.250.0/24 +220.243.252.0/24 +220.243.254.0/23 +220.247.136.0/21 +220.248.0.0/14 +220.252.0.0/16 +221.0.0.0/13 +221.8.0.0/14 +221.12.0.0/17 +221.12.128.0/18 +221.13.0.0/16 +221.14.0.0/15 +221.122.0.0/15 +221.128.128.0/17 +221.129.0.0/16 +221.130.0.0/15 +221.133.224.0/19 +221.136.0.0/15 +221.172.0.0/14 +221.176.0.0/19 +221.176.32.0/20 +221.176.48.0/21 +221.176.56.0/24 +221.176.58.0/23 +221.176.60.0/22 +221.176.64.0/18 +221.176.128.0/17 +221.177.0.0/16 +221.178.0.0/15 +221.180.0.0/14 +221.192.0.0/14 +221.196.0.0/15 +221.198.0.0/16 +221.199.0.0/17 +221.199.128.0/18 +221.199.192.0/20 +221.199.224.0/19 +221.200.0.0/13 +221.208.0.0/12 +221.224.0.0/12 +222.16.0.0/12 +222.32.0.0/11 +222.64.0.0/11 +222.125.0.0/16 +222.126.128.0/19 +222.126.160.0/21 +222.126.168.0/22 +222.126.172.0/23 +222.126.174.40/29 +222.126.174.76/30 +222.126.174.88/29 +222.126.174.144/28 +222.126.178.0/23 +222.126.180.0/22 +222.126.184.0/21 +222.126.192.0/21 +222.126.200.104/29 +222.126.206.0/23 +222.126.208.0/22 +222.126.212.0/26 +222.126.212.64/27 +222.126.212.96/28 +222.126.212.112/29 +222.126.212.128/25 +222.126.213.0/24 +222.126.214.0/23 +222.126.216.0/21 +222.126.224.0/19 +222.128.0.0/12 +222.160.0.0/14 +222.168.0.0/13 +222.176.0.0/12 +222.192.0.0/11 +222.240.0.0/13 +222.248.0.0/15 +223.0.0.0/12 +223.20.0.0/15 +223.27.184.0/22 +223.29.208.0/22 +223.64.0.0/11 +223.96.0.0/12 +223.112.0.0/14 +223.116.0.0/15 +223.120.0.0/13 +223.128.0.0/15 +223.144.0.0/12 +223.160.0.0/14 +223.166.0.0/15 +223.192.0.0/15 +223.198.0.0/15 +223.201.8.0/21 +223.201.16.0/20 +223.201.32.0/19 +223.201.64.0/18 +223.201.128.0/17 +223.202.0.0/15 +223.208.0.0/13 +223.220.0.0/15 +223.223.176.0/20 +223.223.192.0/20 +223.240.0.0/13 +223.248.0.0/14 +223.252.128.0/19 +223.252.192.0/18 +223.254.0.0/16 +223.255.0.0/17 +223.255.236.0/22 +223.255.252.0/23 \ No newline at end of file diff --git a/misc/learn/china/rule.sh b/misc/learn/china/rule.sh new file mode 100755 index 0000000..89778b6 --- /dev/null +++ b/misc/learn/china/rule.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -ex + +ips="china.list" + +origin="3398" +outside="3396" + +originGw="192.168.7.1" +outsideGw="192.168.10.11" + +clean_rule() { + local table=$1 + local tmp="${table}.rules" + + ip rule show | grep "lookup ${table}" | awk -F ':' '{print $2}' > $tmp + while read -r line; do ip rule del ${line}; done < $tmp +} + +ip route flush table ${outside} +ip route add default via ${outsideGw} table ${outside} + +ip route flush table ${origin} +ip route add default via ${originGw} table ${origin} + +clean_rule ${outside} +ip rule add from 172.33.196.0/24 lookup ${outside} + +clean_rule ${origin} +for i in $(cat ${ips}); do + ip rule add from 172.33.196.0/24 to $i lookup ${origin}; +done diff --git a/misc/learn/ebtables b/misc/learn/ebtables new file mode 100755 index 0000000..b43883c --- /dev/null +++ b/misc/learn/ebtables @@ -0,0 +1,36 @@ +# new chain + +ebtables -N acl-1 +ebtables -P acl-1 DROP + +# enable tap3 + +ebtables -A INPUT -i tap3 -j acl-1 + + +# ipv4 + +ebtables -A acl-1 -p ipv4 --ip-dst 192.168.70.0/24 -j ACCEPT +ebtables -A acl-1 -p ipv4 --ip-dst 192.168.10.0/24 -j ACCEPT +ebtables -A acl-1 -p ipv4 --ip-src 172.16.100.0/24 -j ACCEPT + +# icmp 1 + +ebtavles -A acl-1 -p ipv4 --ip-proto 1 --ip-dst 192.168.7.1 -j ACCEPT + + +# udp 17 + +ebtavles -A acl-1 -p ipv4 --ip-proto 17 --ip-dst 192.168.7.1 --ip-dport 53 -j ACCEPT +ebtavles -A acl-1 -p ipv4 --ip-proto 17 --ip-src 192.168.7.2 --ip-sport 68 -j ACCEPT + + +# tcp 6 + +ebtavles -A acl-1 -p ipv4 --ip-proto 6 --ip-dst 192.168.7.1 --ip-dport 80 -j ACCEPT +ebtavles -A acl-1 -p ipv4 --ip-proto 6 --ip-dst 192.168.7.2 --ip-dport 443 -j ACCEPT + + +# remove chain + +ebtables -X acl-1 diff --git a/misc/learn/flags.go b/misc/learn/flags.go new file mode 100755 index 0000000..c96f864 --- /dev/null +++ b/misc/learn/flags.go @@ -0,0 +1,17 @@ +package main + +import ( + "flag" + "os" +) + +func main() { + var alias string + + var cl0 = flag.NewFlagSet(os.Args[0], flag.ExitOnError) + flag.StringVar(&alias, "alias", "", "the alias for this point") + cl0.Var(&alias, name, usage) + cl0.Parse(os.Args[1:]) + + print(alias) +} diff --git a/misc/learn/json.go b/misc/learn/json.go new file mode 100755 index 0000000..8df747d --- /dev/null +++ b/misc/learn/json.go @@ -0,0 +1,69 @@ +package main + +import ( + "encoding/json" + "fmt" + "net" +) + +// +type Hi struct { + Name string +} + +// +type HardwareAddr struct { + net.HardwareAddr +} + +// +func (h HardwareAddr) MarshalText() ([]byte, error) { + if len([]byte(h.HardwareAddr)) == 0 { + return []byte(""), nil + } + + return []byte(h.String()), nil +} + +// +func (h *HardwareAddr) UnmarshalText(text []byte) error { + if len(text) == 0 { + *h = HardwareAddr{nil} + return nil + } + + s := string(text) + x, err := net.ParseMAC(s) + if err != nil { + return &net.ParseError{Type: "Hardware address", Text: s} + } + + *h = HardwareAddr{x} + return nil +} + +type Test struct { + Username string `json:"Password,omitempty"` + Password string `json:"Password,omit"` + HwAddr HardwareAddr `json:"HwAddr"` + Hi int `json:"Hi,string"` +} + +func main() { + t := Test{ + Username: "hi", + Password: "daniel", + Hi: 0x21, + } + + hw, _ := net.ParseMAC("2a:60:84:bd:fe:50") + t.HwAddr = HardwareAddr{hw} + + str, err := json.Marshal(t) + fmt.Println(string(str), err) + + o := &Test{} + + err = json.Unmarshal([]byte(str), o) + fmt.Println(o, err) +} diff --git a/misc/learn/kcp/client.go b/misc/learn/kcp/client.go new file mode 100755 index 0000000..d07886a --- /dev/null +++ b/misc/learn/kcp/client.go @@ -0,0 +1,19 @@ +package main + +import ( + "github.com/xtaci/kcp-go/v5" + "time" +) + +func main() { + conn, err := kcp.DialWithOptions("192.168.7.30:9999", nil, 10, 3) + if err != nil { + panic(err) + } + + for { + data := make([]byte, 4096) + _, _ = conn.Write(data) + time.Sleep(time.Second) + } +} diff --git a/misc/learn/kcp/server.go b/misc/learn/kcp/server.go new file mode 100755 index 0000000..f7125b3 --- /dev/null +++ b/misc/learn/kcp/server.go @@ -0,0 +1,37 @@ +package main + +import ( + "fmt" + "github.com/xtaci/kcp-go/v5" + "io" + "net" +) + +func main() { + fmt.Println("kcp listens on 10000") + lis, err := kcp.ListenWithOptions(":10000", nil, 10, 3) + if err != nil { + panic(err) + } + for { + conn, e := lis.AcceptKCP() + if e != nil { + panic(e) + } + go func(conn net.Conn) { + var buffer = make([]byte, 4096) + for { + n, e := conn.Read(buffer) + if e != nil { + if e == io.EOF { + fmt.Println("receive EOF") + break + } + fmt.Println(e) + break + } + fmt.Println("receive from client:", buffer[:n]) + } + }(conn) + } +} diff --git a/misc/learn/ldap.go b/misc/learn/ldap.go new file mode 100755 index 0000000..d1a320b --- /dev/null +++ b/misc/learn/ldap.go @@ -0,0 +1,30 @@ +package main + +import ( + "fmt" + "github.com/luscis/openlan/pkg/libol" + "os" +) + +func main() { + cfg := libol.LDAPConfig{} + cfg.Server = os.Getenv("LDAPServer") + cfg.Password = os.Getenv("LDAPPassword") + cfg.BaseDN = os.Getenv("LDAPBaseDN") + cfg.BindDN = os.Getenv("LDAPBindDN") + cfg.Filter = os.Getenv("LDAPFilter") + cfg.Attr = os.Getenv("LDAPAttr") + + if l, err := libol.NewLDAPService(cfg); err != nil { + panic(err) + } else { + username := os.Getenv("username") + password := os.Getenv("password") + if ok, err := l.Login(username, password); !ok { + panic(err) + } else { + fmt.Println("success") + } + } + fmt.Println(cfg) +} diff --git a/misc/learn/map.go b/misc/learn/map.go new file mode 100755 index 0000000..a400b4f --- /dev/null +++ b/misc/learn/map.go @@ -0,0 +1,15 @@ +package main + +import ( + "fmt" +) + +func main() { + c := make(map[int]map[int]string, 32) + + for i := 0; i < 64; i++ { + c[i] = make(map[int]string, 2) + } + + fmt.Printf("%d,%s\n", len(c), c) +} diff --git a/misc/learn/ns.go b/misc/learn/ns.go new file mode 100755 index 0000000..3d4659b --- /dev/null +++ b/misc/learn/ns.go @@ -0,0 +1,13 @@ +package main + +import ( + "fmt" + "github.com/vishvananda/netns" +) + +func main() { + ns, err := netns.GetFromName("hi") + fmt.Println(ns, err) + ns, err = netns.GetFromName("dan") + fmt.Println(ns, err) +} diff --git a/misc/learn/openvpn/client.ovpn b/misc/learn/openvpn/client.ovpn new file mode 100644 index 0000000..86b069c --- /dev/null +++ b/misc/learn/openvpn/client.ovpn @@ -0,0 +1,135 @@ +# Generate by OpenLAN +client +dev tun +route-metric 300 +proto tcp +remote 19.68.0.26 1194 +reneg-sec 0 +resolv-retry infinite +nobind +persist-key +persist-tun + +-----BEGIN CERTIFICATE----- +MIIFKjCCBBKgAwIBAgIJANfUK13p8Z+CMA0GCSqGSIb3DQEBCwUAMIG+MQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEVMBMGA1UEBxMMU2FuRnJhbmNp +c2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xHTAbBgNVBAsTFE15T3JnYW5pemF0 +aW9uYWxVbml0MRgwFgYDVQQDEw9Gb3J0LUZ1bnN0b24gQ0ExEDAOBgNVBCkTB0Vh +c3lSU0ExITAfBgkqhkiG9w0BCQEWEm1lQG15aG9zdC5teWRvbWFpbjAeFw0yMTA4 +MDgyMDEyMTVaFw0zMTA4MDYyMDEyMTVaMIG+MQswCQYDVQQGEwJVUzETMBEGA1UE +CBMKQ2FsaWZvcm5pYTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xHTAbBgNVBAsTFE15T3JnYW5pemF0aW9uYWxVbml0MRgwFgYD +VQQDEw9Gb3J0LUZ1bnN0b24gQ0ExEDAOBgNVBCkTB0Vhc3lSU0ExITAfBgkqhkiG +9w0BCQEWEm1lQG15aG9zdC5teWRvbWFpbjCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAKwKj0xC7K5ZudeiZ/0fN00t0Hc7qhq/fGcdr/xpuueJANjfYmXX +caoUxjbvf4XuuqhBAHIoFnqQI18hPhoGH+PrEFvdolwjF0Dx2//33GzMlpkel+UY +V390Pg2TTsVs1m5uSN6CQkyoCQvAqFUwphf8iwBMqw1HEyh6SxgiS4MV2UQxXHRq +J/cVH+8+VxLXBEa55jo7zhYcLZLGHJFOfg4c8L4F5HYqUDxqHJY6XP1h81gtHSTy +Yd9iWCVuAQK9kz/cRyy9KQJvOhZKlXhAQelCVFhIDJe8hyB3WG0orJPN2KPOsHit +GsEfwtYYz6DX2lW5QamxoNXb1lOgI63HOvsCAwEAAaOCAScwggEjMB0GA1UdDgQW +BBTzxft5067dONyt5xPCjfYLy9swpjCB8wYDVR0jBIHrMIHogBTzxft5067dONyt +5xPCjfYLy9swpqGBxKSBwTCBvjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm +b3JuaWExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UEChMMRm9ydC1GdW5z +dG9uMR0wGwYDVQQLExRNeU9yZ2FuaXphdGlvbmFsVW5pdDEYMBYGA1UEAxMPRm9y +dC1GdW5zdG9uIENBMRAwDgYDVQQpEwdFYXN5UlNBMSEwHwYJKoZIhvcNAQkBFhJt +ZUBteWhvc3QubXlkb21haW6CCQDX1Ctd6fGfgjAMBgNVHRMEBTADAQH/MA0GCSqG +SIb3DQEBCwUAA4IBAQAA6V1dYzIe+eeCL2mR3mxsmlRa2M4qJ0ZNUt1nuTOKL9ky +q0u1jEKoOnXLR39a48s+XkI3D19gMH/I/1ZXlOzkoLai6xH2HuuYp52QfXFtniIc +n8hbghOZzN1+9l3QybsnLfT95kfaCzglFOjJgj93Zmm6eKrS2LRuoBHO0j/KjtUR +MQ/B0GRpBMvQ783ubFsJfaeroYQK7HPo8BkHvRxUZcfOJu18rIvFd+/7D7HXBXah +zBkaog+42DKatEgQTVpb+DzQyA25VFPLMlE/RrCKThDiG4mh8TZ51ypbiLlaEzSf +m8hzzWj/H2HUgCY7c0voDy/hX4B7CBAEh2FywdCm +-----END CERTIFICATE----- + + +-----BEGIN CERTIFICATE----- +MIIFZzCCBE+gAwIBAgIBAjANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMCVVMx +EzARBgNVBAgTCkNhbGlmb3JuaWExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG +A1UEChMMRm9ydC1GdW5zdG9uMR0wGwYDVQQLExRNeU9yZ2FuaXphdGlvbmFsVW5p +dDEYMBYGA1UEAxMPRm9ydC1GdW5zdG9uIENBMRAwDgYDVQQpEwdFYXN5UlNBMSEw +HwYJKoZIhvcNAQkBFhJtZUBteWhvc3QubXlkb21haW4wHhcNMjEwODA4MjAxMzA1 +WhcNMzEwODA2MjAxMzA1WjCBtTELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm +b3JuaWExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UEChMMRm9ydC1GdW5z +dG9uMR0wGwYDVQQLExRNeU9yZ2FuaXphdGlvbmFsVW5pdDEPMA0GA1UEAxMGZGFu +aWVsMRAwDgYDVQQpEwdFYXN5UlNBMSEwHwYJKoZIhvcNAQkBFhJtZUBteWhvc3Qu +bXlkb21haW4wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb7+9Ykthm +/YHo+zWm1/XYH/8l1TNSQvzElpYArrxrPYHF2+OKRsASerUAQoBWla8x+lQqWD0F +F8D2FDRK4c7jkN0tLJOlQknOeSqEV9rpiJD30T0AvLfXrNdQFoO9AGck3YTg9flp +P6YY2yVOXLi+LFj1l20EQk3N0nst13q3YqpCfI5FXosjjL7RBFMiXUp83y44kutZ +/x22ocytTzoixe8bkAk3SWYVJMVTQGyj79ZsPRd1yaRq0vD4QJv/zMOg+b6Bng6x ++/gQHo/obDBvXQeOPgExgCH78+BP1dNJuYVa+4Nj2PMzNT+6rwcHUGyMJQdrnbI5 +ojwdDBpOLhezAgMBAAGjggF1MIIBcTAJBgNVHRMEAjAAMC0GCWCGSAGG+EIBDQQg +Fh5FYXN5LVJTQSBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFDU2Caej +DI9yYch2Xp/xNYJdEnRfMIHzBgNVHSMEgeswgeiAFPPF+3nTrt043K3nE8KN9gvL +2zCmoYHEpIHBMIG+MQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEV +MBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xHTAb +BgNVBAsTFE15T3JnYW5pemF0aW9uYWxVbml0MRgwFgYDVQQDEw9Gb3J0LUZ1bnN0 +b24gQ0ExEDAOBgNVBCkTB0Vhc3lSU0ExITAfBgkqhkiG9w0BCQEWEm1lQG15aG9z +dC5teWRvbWFpboIJANfUK13p8Z+CMBMGA1UdJQQMMAoGCCsGAQUFBwMCMAsGA1Ud +DwQEAwIHgDANBgkqhkiG9w0BAQsFAAOCAQEAceXlLb/1hVmCfDecwuCEG+WQ3NUC +mVRV+fkLRghYqj2vxqxjU3d97ykeW9o2ksWujDIzHl65HPIj4+FRHH7ohDgkJxT8 +oER/JHuTGWTGVntfWCs7TbbnrNhW00574cri+VDgfhnT4SwKZNZXKKyQt4qMY8qO +2yr14o28A2aNb692SaqrDax/zcl3d0MR3SgPjxJ/suctFm7JuAPF+87JVogVFeoM +bTFQJDLirSrORpzHluAJ7zDfKsYsrSsCb3jUy2cuxETM+AnbAfOe4xVHzRdlqdfD +HVdTTPpUl+G2gHRLbZmdfZ2t5uaMM+lDOnr9sUfu7G+Zl4sKw9y3VAGTTg== +-----END CERTIFICATE----- + + +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDb7+9Ykthm/YHo ++zWm1/XYH/8l1TNSQvzElpYArrxrPYHF2+OKRsASerUAQoBWla8x+lQqWD0FF8D2 +FDRK4c7jkN0tLJOlQknOeSqEV9rpiJD30T0AvLfXrNdQFoO9AGck3YTg9flpP6YY +2yVOXLi+LFj1l20EQk3N0nst13q3YqpCfI5FXosjjL7RBFMiXUp83y44kutZ/x22 +ocytTzoixe8bkAk3SWYVJMVTQGyj79ZsPRd1yaRq0vD4QJv/zMOg+b6Bng6x+/gQ +Ho/obDBvXQeOPgExgCH78+BP1dNJuYVa+4Nj2PMzNT+6rwcHUGyMJQdrnbI5ojwd +DBpOLhezAgMBAAECggEBAJn5/r0qAWsJLDwqM0OdijZHIGAiRQ3gbApNyrKroqoJ +o6Mb8M9L/Qhy8+k8pZD4tnDs6qPCpuW73ZHqTznloOxc7RvFS5C1OerdlVCWGWLK +A4Qb7vNnK+ZI0Wz2dkCJ2axLkc3VJ0nyD5zzP8j+zTAG6Dj4TJBgwwvHwgk0BGwB +0FfXQpRJBndCn5ypzP9AH6CrfWRbVuCmqvQ5V5SaCbZ76R2WLeyGd5g1ttB+B2A0 +z7UFCtJaRcdx2ynEaVeHv5pjpp0UCtWEeQJ5FvXmSeUsGJTiyXOrOUT8ek7Jws6N +LU+9reUutwoarJ30Q8Xvkg5d7qnrQ2McgZwadkcHOEECgYEA/mKmIpaPWpEhkBBQ +w+ZYvVCuZtZEEalu8QUwc/VHDR5aesW9s/EF0nzXHJogr2ppi/J0e0ptavcq/z8I +96QUDImmR8R60Aj6iTeiTWNEVcMMJMlhBkyELch6dKQ1WOTZb96VTZU5z08Y7iMB +TODUZPtGkvxu84MMKIC3WsVbC1ECgYEA3VVPrDInV5UaGJVrLwmYSuIk2XZpnTJN +Czu5Y/qKxMFYQH44qhcuhMtTkVJJ3YdkNi2m//UCcBlqLOnLjJb+DSBvnM85DNCr +NDjJm0BgG7BVUTz7nBtbyF37Bp45db0tU+S4Q8jcM4rq+CYsxkpPCUjkoZWuh/+O +USkf39qcqcMCgYAH1T3MXtp3LXTdHMN/5L3ixnM4U2yxJoh1/3TofSr86mxaz2/x +9H/7ZUCybsSG4j3xwzt2+Gm6G3ZFeclq5rB/WlW2bARkF4KvavkibOYTxfe/wjus +l/qR9lRe9xd1b0aKvU3Xn7qx+XuJCabmmR2xJlOb/w3jjat4K855iw4soQKBgGaV +TssJp9BhC+pO/pGbidlTPS8JQeQY1zqYoNFk4wJUdnnVxp+XBPmJX58gG5owUVMH +D7rcLRgVv283oR71MKSW3wQ3y0JUSLV4iICLcsACl1iCwj9VMtIINPuZRBp85tBW +Vs0cAGtnPIZs8x2ofT9ZaJY30N+5o5WcUOio4ymPAoGBAMzSxeaIx5fexNjrQOHm +c2nJDNIVaFmPQOZiDdMSSsc7eciRo7JE9uD8pb4EJNW5PTOl2C5I4YUmeIiiFmmx +UW9kdY4lnF/aSMX5f5hc8f4Gr7gMmXMybql2U/FZNmeTWrHzgKxwTF3JdhRWdPnB +m9V7OVznMx7oy5lnk8oKYJ+F +-----END PRIVATE KEY----- + +remote-cert-tls server + +# +# 2048 bit OpenVPN static key +# +-----BEGIN OpenVPN Static key V1----- +16d05ff3aa529220207d9cde7607e882 +200a15fcc3e5a2dc7d6d35b201f49916 +a0c5d575f6bc320591ff61adeb53b45c +334792fc6bc676b6c19cf32755fa1409 +7abbc61c1bd8afb0cc74cf4bf1d3767a +4b49d624fe6f071f5080bdf286b65ccf +17782d9958442f820dfd0881712e51c8 +ec5aada1d4fde6c799e5ccc054e94717 +41df7968bba18ec69e44d49a7ab07515 +6da55ef36316e29b2ba0ce60ecccc1e1 +d01d6f2c949a6f85a4c10f6ffd6b7dd3 +d43f4dbdae69864024ba3b1967bc6ab1 +fb12a691c1d7733687e06580ea7eab78 +d96fd4599e474b23b2adfecefc9ec36f +a2ae116cfae64c7b9b1d37938554579a +d058d04e8179eac0714b7c53310bc42f +-----END OpenVPN Static key V1----- + +key-direction 1 +cipher AES-256-CBC +auth-nocache +verb 4 + diff --git a/misc/learn/openvpn/server.conf b/misc/learn/openvpn/server.conf new file mode 100644 index 0000000..1fd4700 --- /dev/null +++ b/misc/learn/openvpn/server.conf @@ -0,0 +1,143 @@ +# Generate by OpenLAN +local 0.0.0.0 +port 1194 +proto tcp +dev tun +reneg-sec 0 +keepalive 10 120 +persist-key +persist-tun +server 100.100.0.0 255.255.0.0 +push "route 192.168.122.0 255.255.255.0" + +-----BEGIN CERTIFICATE----- +MIIFKjCCBBKgAwIBAgIJANfUK13p8Z+CMA0GCSqGSIb3DQEBCwUAMIG+MQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEVMBMGA1UEBxMMU2FuRnJhbmNp +c2NvMRUwEwYDVQQKEwxGb3J0LUZ1bnN0b24xHTAbBgNVBAsTFE15T3JnYW5pemF0 +aW9uYWxVbml0MRgwFgYDVQQDEw9Gb3J0LUZ1bnN0b24gQ0ExEDAOBgNVBCkTB0Vh +c3lSU0ExITAfBgkqhkiG9w0BCQEWEm1lQG15aG9zdC5teWRvbWFpbjAeFw0yMTA4 +MDgyMDEyMTVaFw0zMTA4MDYyMDEyMTVaMIG+MQswCQYDVQQGEwJVUzETMBEGA1UE +CBMKQ2FsaWZvcm5pYTEVMBMGA1UEBxMMU2FuRnJhbmNpc2NvMRUwEwYDVQQKEwxG +b3J0LUZ1bnN0b24xHTAbBgNVBAsTFE15T3JnYW5pemF0aW9uYWxVbml0MRgwFgYD +VQQDEw9Gb3J0LUZ1bnN0b24gQ0ExEDAOBgNVBCkTB0Vhc3lSU0ExITAfBgkqhkiG +9w0BCQEWEm1lQG15aG9zdC5teWRvbWFpbjCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAKwKj0xC7K5ZudeiZ/0fN00t0Hc7qhq/fGcdr/xpuueJANjfYmXX +caoUxjbvf4XuuqhBAHIoFnqQI18hPhoGH+PrEFvdolwjF0Dx2//33GzMlpkel+UY +V390Pg2TTsVs1m5uSN6CQkyoCQvAqFUwphf8iwBMqw1HEyh6SxgiS4MV2UQxXHRq +J/cVH+8+VxLXBEa55jo7zhYcLZLGHJFOfg4c8L4F5HYqUDxqHJY6XP1h81gtHSTy +Yd9iWCVuAQK9kz/cRyy9KQJvOhZKlXhAQelCVFhIDJe8hyB3WG0orJPN2KPOsHit +GsEfwtYYz6DX2lW5QamxoNXb1lOgI63HOvsCAwEAAaOCAScwggEjMB0GA1UdDgQW +BBTzxft5067dONyt5xPCjfYLy9swpjCB8wYDVR0jBIHrMIHogBTzxft5067dONyt +5xPCjfYLy9swpqGBxKSBwTCBvjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm +b3JuaWExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UEChMMRm9ydC1GdW5z +dG9uMR0wGwYDVQQLExRNeU9yZ2FuaXphdGlvbmFsVW5pdDEYMBYGA1UEAxMPRm9y +dC1GdW5zdG9uIENBMRAwDgYDVQQpEwdFYXN5UlNBMSEwHwYJKoZIhvcNAQkBFhJt +ZUBteWhvc3QubXlkb21haW6CCQDX1Ctd6fGfgjAMBgNVHRMEBTADAQH/MA0GCSqG +SIb3DQEBCwUAA4IBAQAA6V1dYzIe+eeCL2mR3mxsmlRa2M4qJ0ZNUt1nuTOKL9ky +q0u1jEKoOnXLR39a48s+XkI3D19gMH/I/1ZXlOzkoLai6xH2HuuYp52QfXFtniIc +n8hbghOZzN1+9l3QybsnLfT95kfaCzglFOjJgj93Zmm6eKrS2LRuoBHO0j/KjtUR +MQ/B0GRpBMvQ783ubFsJfaeroYQK7HPo8BkHvRxUZcfOJu18rIvFd+/7D7HXBXah +zBkaog+42DKatEgQTVpb+DzQyA25VFPLMlE/RrCKThDiG4mh8TZ51ypbiLlaEzSf +m8hzzWj/H2HUgCY7c0voDy/hX4B7CBAEh2FywdCm +-----END CERTIFICATE----- + + +-----BEGIN CERTIFICATE----- +MIIFhDCCBGygAwIBAgIBATANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMCVVMx +EzARBgNVBAgTCkNhbGlmb3JuaWExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMG +A1UEChMMRm9ydC1GdW5zdG9uMR0wGwYDVQQLExRNeU9yZ2FuaXphdGlvbmFsVW5p +dDEYMBYGA1UEAxMPRm9ydC1GdW5zdG9uIENBMRAwDgYDVQQpEwdFYXN5UlNBMSEw +HwYJKoZIhvcNAQkBFhJtZUBteWhvc3QubXlkb21haW4wHhcNMjEwODA4MjAxMjM0 +WhcNMzEwODA2MjAxMjM0WjCBuDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm +b3JuaWExFTATBgNVBAcTDFNhbkZyYW5jaXNjbzEVMBMGA1UEChMMRm9ydC1GdW5z +dG9uMR0wGwYDVQQLExRNeU9yZ2FuaXphdGlvbmFsVW5pdDESMBAGA1UEAxMJZWFz +eXN0YWNrMRAwDgYDVQQpEwdFYXN5UlNBMSEwHwYJKoZIhvcNAQkBFhJtZUBteWhv +c3QubXlkb21haW4wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCeI6Cp +lQrmhPRBzwBgeYQlg87PhcwxX6c2D3knlNy1jQqlAsRLVK/wE3YJcMD5ikYo4wNj +H4p7vBRIjv+h9iauPTP7BiO4OBcBfdwwWzRwuaaVO5hu4AMcPXldk7yDn1qeJSyR +RRXsU0VVo/QeLSJgAa9dEdFVyIMw/Sqayi9FGmscDvnBup9sVghT3gc87KTq/fEv +gVi3YDUFiZTcumlFG4S5GW876XL8lAP6zP8nfCor7KDAU4FROLNP7EXOtDg16dMv +0eYEzHoj52Xv05sLy8hrt0I+TOnMX5eFC9Qf7HhWfHMkSo4yfvLl0RD6f6Xyc2rR +iT9Rb7Q9dmymYOQhAgMBAAGjggGPMIIBizAJBgNVHRMEAjAAMBEGCWCGSAGG+EIB +AQQEAwIGQDA0BglghkgBhvhCAQ0EJxYlRWFzeS1SU0EgR2VuZXJhdGVkIFNlcnZl +ciBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU3S7WHlhAeJZ2KQKCQQxgZc5OEK0wgfMG +A1UdIwSB6zCB6IAU88X7edOu3TjcrecTwo32C8vbMKahgcSkgcEwgb4xCzAJBgNV +BAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRUwEwYDVQQHEwxTYW5GcmFuY2lz +Y28xFTATBgNVBAoTDEZvcnQtRnVuc3RvbjEdMBsGA1UECxMUTXlPcmdhbml6YXRp +b25hbFVuaXQxGDAWBgNVBAMTD0ZvcnQtRnVuc3RvbiBDQTEQMA4GA1UEKRMHRWFz +eVJTQTEhMB8GCSqGSIb3DQEJARYSbWVAbXlob3N0Lm15ZG9tYWluggkA19QrXenx +n4IwEwYDVR0lBAwwCgYIKwYBBQUHAwEwCwYDVR0PBAQDAgWgMA0GCSqGSIb3DQEB +CwUAA4IBAQAvtZgEmALJ5ZQBX0yxE7NCvZrQ6Pe6bbABrPCgRv1dnehg10PKBXDc +YfLrdJlL0KZ2d3xfTT2WajR6VUTWXeGP0UeL1sNJUn5oGx8ek3g/nE88JbJeAjAX +uyqCNaRXSYZ30KqmymARarEABpBEizIjwYEjCjTFL3LdQYsE7V21FzkgwaAuNoK8 +egR3cCaSPzezauW7iKpwAEuJ5zjIzNaL2Cwlox0/IMUpUjvOKZ4wcyCp+E51gTUd +2TFShQpAEV5SfYNa2ym4VY4ozg4jCzO4py16EVfRXn1SecPfULelLCYyO4licOKa +W52YxuMH+A1BsfouZMbX/ulV1e7OchvV +-----END CERTIFICATE----- + + +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCeI6CplQrmhPRB +zwBgeYQlg87PhcwxX6c2D3knlNy1jQqlAsRLVK/wE3YJcMD5ikYo4wNjH4p7vBRI +jv+h9iauPTP7BiO4OBcBfdwwWzRwuaaVO5hu4AMcPXldk7yDn1qeJSyRRRXsU0VV +o/QeLSJgAa9dEdFVyIMw/Sqayi9FGmscDvnBup9sVghT3gc87KTq/fEvgVi3YDUF +iZTcumlFG4S5GW876XL8lAP6zP8nfCor7KDAU4FROLNP7EXOtDg16dMv0eYEzHoj +52Xv05sLy8hrt0I+TOnMX5eFC9Qf7HhWfHMkSo4yfvLl0RD6f6Xyc2rRiT9Rb7Q9 +dmymYOQhAgMBAAECggEBAJK/+ysD3X64KLcdMntUqNXrcDLSNgAnhNM9HrAli3+K +6DLi02gYqcA/Pw1ZriqkL6FnsU/+CBP1IFmJIh6Zf6G3uBIdy2371xWSR5A0QwDM +t5LLIBTcGubNAWBeZdU7Put9EdOpacPKLpBfun16Yuj3B1Z1324cyenfNfc44w7b +FMr7lCVDRuf7y7tA7s1qpytlamXJA2svqjnDi1RP7jqnQ6zv9ImbxT1c7LAU9goj +yD/h4EAamqIaqPQtxQ4TGY+NVd8yz5eUZEjqCZx3XUs/FIhp4a5y0gOOTb3MSKt5 +ZxKJ52es1BpJfH9CsP4fcFIeqQCcK9LlJTeHuaoDu8UCgYEAzC1ViZoOQI7+Zkxp +SfijXV8qdkY5vqJHaJ5nIzghF9aXshWxYs59z37AmdVvDQTizN+S2apiLjnkBj3S +FnPSuNNyp9JiFSVQd0fUO0hXpwgW/9j4OJYLHUuHmBz7BMZ6/xx4TCRa9RGWRzvF +Lhmt7xEV69MnEAZ+fUBWx4EMO6cCgYEAxkbr2lpy7MPQWY+N2iD3x6DJQ9BxW5bu +ABEN5/bT02WpHK8inyrMPETvj/Vhwk3L0f3B5TdgzlvkM7Yt/1Xc1uGyntZ7b0Gr ++Rr2EK9EcyA267CTfu2nioQVZwTrJZ3nYAea6zl8NDi254J3EnHTRAjIGf5QwZah +dKCshdVkuvcCgYBMmK1AQ+UY3wwdQIk3UOmoe60oVcwP8j0ryFEU61eu47hg/LZh +ROn5z6ldjK5uieizxyQGOF9AzEL4/HQ2LBlfcSPQRaK6IBeByBacbtVDOku2i6UD +RXbB9LXXoPeioPs/fWS729+rlH92FbwxQTz5NMWuhLZg0q7dOifUDFuUfQKBgQDA +CRyRhRzd9+EpfQIUi+2v6ShFh/LilFG6usnPKp6KlmGuKGnN4vOgQ9wKf8zJFfQc +VDuhN3uJIjLpar8uGAkmDb6kXHN8lAxt3hEIo9gV8NOAfqq8Oo0UeU0JVBgzfLYF +qhf/TtPe+DceDpwovxmxQRgdJNUuSifh/2RY0w8WjQKBgQCde1MEkZECPjWwn8u9 +v5C0yaA3WrJB1fI3TcKDMF7/3T5fgRzbKj8qB8R+oBmrlme2Tly8C3cAZUOmOII6 +5agxljDGxQClgnUiRswmRjBoFy8IyNkDYsuWkJubp42vMm6p6lvgzrjMLcpSPkWK +fvwoR2fqSWaBunrYXue21Jf4Ew== +-----END PRIVATE KEY----- + + +-----BEGIN DH PARAMETERS----- +MIGHAoGBAIx6EPzOa2XaBAWoDiTwex1REDfNYvgB3RC3qaTjxTPpYerfmAHiOx9k +CLGARNAKtMxwgpR0kdaEbl/XDPJ7OVkcXimo2xhGSyD6giwnWrfcuAPcY6vihJhT +KBa5SyZ7M4rZirbsJ8oYWTB8SyrSUU5cSHrLgkUBzu+6qL8UG1XjAgEC +-----END DH PARAMETERS----- + + +# +# 2048 bit OpenVPN static key +# +-----BEGIN OpenVPN Static key V1----- +16d05ff3aa529220207d9cde7607e882 +200a15fcc3e5a2dc7d6d35b201f49916 +a0c5d575f6bc320591ff61adeb53b45c +334792fc6bc676b6c19cf32755fa1409 +7abbc61c1bd8afb0cc74cf4bf1d3767a +4b49d624fe6f071f5080bdf286b65ccf +17782d9958442f820dfd0881712e51c8 +ec5aada1d4fde6c799e5ccc054e94717 +41df7968bba18ec69e44d49a7ab07515 +6da55ef36316e29b2ba0ce60ecccc1e1 +d01d6f2c949a6f85a4c10f6ffd6b7dd3 +d43f4dbdae69864024ba3b1967bc6ab1 +fb12a691c1d7733687e06580ea7eab78 +d96fd4599e474b23b2adfecefc9ec36f +a2ae116cfae64c7b9b1d37938554579a +d058d04e8179eac0714b7c53310bc42f +-----END OpenVPN Static key V1----- + +key-direction 0 +cipher AES-256-CBC +status es.status 5 +ifconfig-pool-persist es.ipp +script-security 3 +verb 3 diff --git a/misc/learn/packet.go b/misc/learn/packet.go new file mode 100755 index 0000000..2bd2e28 --- /dev/null +++ b/misc/learn/packet.go @@ -0,0 +1,128 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "log" + "os" + "time" +) + +type Package struct { + Version [2]byte // 协议版本 + Length int16 // 数据部分长度 + Timestamp int64 // 时间戳 + HostnameLength int16 // 主机名长度 + Hostname []byte // 主机名 + TagLength int16 // Tag长度 + Tag []byte // Tag + Msg []byte // 数据部分长度 +} + +func (p *Package) Pack(writer io.Writer) error { + var err error + err = binary.Write(writer, binary.BigEndian, &p.Version) + err = binary.Write(writer, binary.BigEndian, &p.Length) + err = binary.Write(writer, binary.BigEndian, &p.Timestamp) + err = binary.Write(writer, binary.BigEndian, &p.HostnameLength) + err = binary.Write(writer, binary.BigEndian, &p.Hostname) + err = binary.Write(writer, binary.BigEndian, &p.TagLength) + err = binary.Write(writer, binary.BigEndian, &p.Tag) + err = binary.Write(writer, binary.BigEndian, &p.Msg) + return err +} +func (p *Package) Unpack(reader io.Reader) error { + var err error + err = binary.Read(reader, binary.BigEndian, &p.Version) + err = binary.Read(reader, binary.BigEndian, &p.Length) + err = binary.Read(reader, binary.BigEndian, &p.Timestamp) + err = binary.Read(reader, binary.BigEndian, &p.HostnameLength) + p.Hostname = make([]byte, p.HostnameLength) + err = binary.Read(reader, binary.BigEndian, &p.Hostname) + err = binary.Read(reader, binary.BigEndian, &p.TagLength) + p.Tag = make([]byte, p.TagLength) + err = binary.Read(reader, binary.BigEndian, &p.Tag) + p.Msg = make([]byte, p.Length-8-2-p.HostnameLength-2-p.TagLength) + err = binary.Read(reader, binary.BigEndian, &p.Msg) + return err +} + +func (p *Package) String() string { + return fmt.Sprintf("version:%s length:%d timestamp:%d hostname:%s tag:%s msg:%s", + p.Version, + p.Length, + p.Timestamp, + p.Hostname, + p.Tag, + p.Msg, + ) +} + +func Split(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF { + return + } + + log.Printf("INDEX: 0x%02x\n", data[0]) + if data[0] == 'V' { + if len(data) > 4 { + length := int16(0) + binary.Read(bytes.NewReader(data[2:4]), binary.BigEndian, &length) + if int(length)+4 <= len(data) { + return int(length) + 4, data[:int(length)+4], nil + } + } + } + + //scroll to next package. + return 1, data[:1], nil +} + +func main() { + hostname, err := os.Hostname() + if err != nil { + log.Fatal(err) + } + + pack := &Package{ + Version: [2]byte{'V', '1'}, + Timestamp: time.Now().Unix(), + HostnameLength: int16(len(hostname)), + Hostname: []byte(hostname), + TagLength: 4, + Tag: []byte("demo"), + Msg: []byte(("现在时间是:" + time.Now().Format("2006-01-02 15:04:05"))), + } + pack.Length = 8 + 2 + pack.HostnameLength + 2 + pack.TagLength + int16(len(pack.Msg)) + + buf := new(bytes.Buffer) + // 写入四次,模拟TCP粘包效果 + pack.Pack(buf) + pack.Pack(buf) + pack.Pack(buf) + pack.Pack(buf) + buf.Write([]byte{0x00, 0x01, 0x02}) + pack.Pack(buf) + + buf.Write([]byte{'V', 0x01, 0x02, 0x11, 0x12}) + pack.Pack(buf) + + // scanner + scanner := bufio.NewScanner(buf) + scanner.Split(Split) + for scanner.Scan() { + scannedPack := new(Package) + data := scanner.Bytes() + if len(data) <= 1 { + continue + } + scannedPack.Unpack(bytes.NewReader(data)) + log.Println(scannedPack) + } + if err := scanner.Err(); err != nil { + log.Printf("无效数据包 %s", err) + } +} diff --git a/misc/learn/perf/README.md b/misc/learn/perf/README.md new file mode 100755 index 0000000..f974f49 --- /dev/null +++ b/misc/learn/perf/README.md @@ -0,0 +1,17 @@ +# TODO +To Implement OpenLAN prototype by C. +To Implement OpenLAN prototype by C++. + +# Golang +v5.2.10: 2 vcpu/ 1G memory +* prototype: 54MiB / 57MiB +* openlan-no-crypt: 32MiB / 57MiB +* openlan-xor-crypt: 21MiB / 57MiB + +v5.2.12: 2 vcpu / 1G memory +* openlan-no-trace-no-crypt: 42MiB / 57MiB +* openlan-no-trace-xor-crypt: 41MiB / 57MiB +* openlan-with-trace-xor-crypt: 30MiB / 57MiB + +# Protocol +tcp > ws > tls > wss > udp > kcp diff --git a/misc/learn/perf/tcpserver-0906.go b/misc/learn/perf/tcpserver-0906.go new file mode 100755 index 0000000..0a4dcff --- /dev/null +++ b/misc/learn/perf/tcpserver-0906.go @@ -0,0 +1,220 @@ +package main + +import ( + "encoding/binary" + "flag" + "fmt" + "github.com/luscis/openlan/pkg/libol" + "github.com/songgao/water" + "net" +) + +// 409Mib on 1000Mb + +type socket struct { + conn net.Conn + frames int + buffer []byte +} + +func (s *socket) ReadFull() (error, []byte) { + size := len(s.buffer) + if size > 0 { + buf := s.buffer + if size > 4 { + ps := binary.BigEndian.Uint16(buf[2:4]) + fs := int(ps) + 4 + //fmt.Printf("fs %d, size %d, % x\n", fs, size, buf) + if size >= fs { + s.buffer = buf[fs:] + return nil, buf[:fs] + } + } + } + tmp := make([]byte, 1518*s.frames) + if size > 0 { + copy(tmp[:size], s.buffer[:size]) + } + n, err := s.conn.Read(tmp[size:]) + if err != nil { + return err, nil + } + //fmt.Printf("n %d, size %d, % x\n", n, size, s.buffer) + rs := size + n + hs := binary.BigEndian.Uint16(tmp[2:4]) + fs := int(hs) + 4 + //fmt.Printf("rs %d, fs %d, % x\n", rs, fs, tmp[:rs]) + if rs >= fs { + s.buffer = tmp[fs:rs] + return nil, tmp[:fs] + } else { + s.buffer = tmp[:rs] + } + return nil, nil +} + +func (s *socket) WriteFull(buffer []byte) error { + offset := 0 + size := len(buffer) + left := size - offset + + for left > 0 { + tmp := buffer[offset:] + n, err := s.conn.Write(tmp) + if err != nil { + return err + } + offset += n + left = size - offset + } + return nil +} + +func xClient(addr string, frames int) { + srcAddr := &net.TCPAddr{IP: net.IPv4zero, Port: 0} + dstAddr, err := net.ResolveTCPAddr("tcp", addr) + if err != nil { + fmt.Println(err) + return + } + conn, err := net.DialTCP("tcp", srcAddr, dstAddr) + if err != nil { + fmt.Println(err) + return + } + device, err := water.New(water.Config{DeviceType: water.TAP}) + if err != nil { + fmt.Println(err) + return + } + + sock := &socket{ + conn: conn, + frames: frames, + } + fmt.Printf("Local: <%s> \n", device.Name()) + + go func() { + frameData := make([]byte, 1600+4) + + for { + n, err := device.Read(frameData[4:]) + if err != nil { + break + } + if n == 0 || conn == nil { + continue + } + + binary.BigEndian.PutUint16(frameData[2:4], uint16(n)) + //fmt.Printf("<%s> %d\n", device.Name(), n) + //fmt.Printf("<%s> % x\n", device.Name(), frameData[:20]) + err = sock.WriteFull(frameData[:n+4]) + if err != nil { + fmt.Println(err) + } + } + }() + + for { + err, data := sock.ReadFull() + if err != nil { + fmt.Printf("error during read: %s", err) + break + } + if data == nil { + continue + } + _, err = device.Write(data[4:]) + if err != nil { + fmt.Println(err) + break + } + } + + _ = conn.Close() + _ = device.Close() +} + +func xServer(addr string, frames int) { + laddr, err := net.ResolveTCPAddr("tcp", addr) + if err != nil { + fmt.Println(err) + return + } + listener, err := net.ListenTCP("tcp", laddr) + if err != nil { + fmt.Println(err) + return + } + conn, err := listener.Accept() + if err != nil { + fmt.Println(err) + } + device, err := water.New(water.Config{DeviceType: water.TAP}) + if err != nil { + fmt.Println(err) + return + } + + fmt.Printf("Local : <%s> \n", device.Name()) + fmt.Printf("Remote: <%s> \n", conn.LocalAddr().String()) + + sock := &socket{ + conn: conn, + frames: frames, + } + go func() { + for { + err, data := sock.ReadFull() + if err != nil { + fmt.Printf("error during read: %s", err) + break + } + if data == nil { + continue + } + _, err = device.Write(data[4:]) + if err != nil { + fmt.Println(err) + } + } + }() + + for { + frameData := make([]byte, 1600+4) + + n, err := device.Read(frameData[4:]) + if err != nil { + break + } + + binary.BigEndian.PutUint16(frameData[2:4], uint16(n)) + if n == 0 { + continue + } + + //fmt.Printf("<%s> %d %x\n", device.Name(), n, frameData[:20]) + err = sock.WriteFull(frameData[:n+4]) + if err != nil { + fmt.Println(err) + } + } +} + +func main() { + address := "127.0.0.1:9981" + mode := "server" + frames := 16 + flag.StringVar(&address, "addr", address, "the address listen.") + flag.StringVar(&mode, "mode", mode, "client or server.") + flag.IntVar(&frames, "frames", frames, "frames of buffer.") + flag.Parse() + + if mode == "server" { + go xServer(address, frames) + } else if mode == "client" { + go xClient(address, frames) + } + libol.Wait() +} diff --git a/misc/learn/perf/tcpserver.go b/misc/learn/perf/tcpserver.go new file mode 100755 index 0000000..684f5a8 --- /dev/null +++ b/misc/learn/perf/tcpserver.go @@ -0,0 +1,206 @@ +package main + +import ( + "encoding/binary" + "flag" + "fmt" + "github.com/luscis/openlan/pkg/libol" + "github.com/songgao/water" + "net" +) + +// 40MB on 1000Mb + +func ReadFull(conn net.Conn, buffer []byte) error { + offset := 0 + left := len(buffer) + + for left > 0 { + tmp := make([]byte, left) + n, err := conn.Read(tmp) + if err != nil { + return err + } + copy(buffer[offset:], tmp) + offset += n + left -= n + } + return nil +} + +func WriteFull(conn net.Conn, buffer []byte) error { + offset := 0 + size := len(buffer) + left := size - offset + + for left > 0 { + tmp := buffer[offset:] + n, err := conn.Write(tmp) + if err != nil { + return err + } + offset += n + left = size - offset + } + return nil +} + +func Client(addr string) { + srcAddr := &net.TCPAddr{IP: net.IPv4zero, Port: 0} + dstAddr, err := net.ResolveTCPAddr("tcp", addr) + if err != nil { + fmt.Println(err) + return + } + conn, err := net.DialTCP("tcp", srcAddr, dstAddr) + if err != nil { + fmt.Println(err) + return + } + device, err := water.New(water.Config{DeviceType: water.TAP}) + if err != nil { + fmt.Println(err) + return + } + + fmt.Printf("Local: <%s> \n", device.Name()) + + go func() { + frameData := make([]byte, 1600+4) + + for { + n, err := device.Read(frameData[4:]) + if err != nil { + break + } + if n == 0 || conn == nil { + continue + } + + binary.BigEndian.PutUint16(frameData[2:4], uint16(n)) + //fmt.Printf("<%s> %d\n", device.Name(), n) + //fmt.Printf("<%s> % x\n", device.Name(), frameData[:20]) + err = WriteFull(conn, frameData[:n+4]) + if err != nil { + fmt.Println(err) + } + } + }() + + for { + data := make([]byte, 1600+4) + + err := ReadFull(conn, data[:4]) + if err != nil { + fmt.Printf("error during read: %s", err) + break + } + + size := binary.BigEndian.Uint16(data[2:4]) + if size == 0 || size > 1600 { + continue + } + + err = ReadFull(conn, data[4:size+4]) + if err != nil { + fmt.Printf("error during read: %s", err) + break + } + + _, err = device.Write(data[4 : size+4]) + if err != nil { + fmt.Println(err) + break + } + } + + _ = conn.Close() + _ = device.Close() +} + +func Server(addr string) { + laddr, err := net.ResolveTCPAddr("tcp", addr) + if err != nil { + fmt.Println(err) + return + } + listener, err := net.ListenTCP("tcp", laddr) + if err != nil { + fmt.Println(err) + return + } + conn, err := listener.Accept() + if err != nil { + fmt.Println(err) + } + device, err := water.New(water.Config{DeviceType: water.TAP}) + if err != nil { + fmt.Println(err) + return + } + + fmt.Printf("Local : <%s> \n", device.Name()) + fmt.Printf("Remote: <%s> \n", conn.LocalAddr().String()) + + go func() { + data := make([]byte, 1600+4) //MTU:1500, 1500+14+4 + + for { + err := ReadFull(conn, data[:4]) + if err != nil { + fmt.Printf("error during read: %s", err) + } + + size := binary.BigEndian.Uint16(data[2:4]) + if size == 0 || size > 1600 { + continue + } + + //fmt.Printf("%d %x\n", size, data[:20]) + err = ReadFull(conn, data[4:size+4]) + if err != nil { + fmt.Printf("error during read: %s", err) + } + + _, err = device.Write(data[4 : size+4]) + if err != nil { + fmt.Println(err) + } + } + }() + + for { + frameData := make([]byte, 1600+4) + + n, err := device.Read(frameData[4:]) + if err != nil { + break + } + + binary.BigEndian.PutUint16(frameData[2:4], uint16(n)) + if n == 0 { + continue + } + + //fmt.Printf("<%s> %d %x\n", device.Name(), n, frameData[:20]) + err = WriteFull(conn, frameData[:n+4]) + if err != nil { + fmt.Println(err) + } + } +} + +func main() { + address := "127.0.0.1:9981" + mode := "server" + flag.StringVar(&address, "addr", address, "the address listen.") + flag.StringVar(&mode, "mode", mode, "client or server.") + flag.Parse() + + if mode == "server" { + go Server(address) + } else if mode == "client" { + go Client(address) + } + libol.Wait() +} diff --git a/misc/learn/perf/udpclient.go b/misc/learn/perf/udpclient.go new file mode 100755 index 0000000..153d527 --- /dev/null +++ b/misc/learn/perf/udpclient.go @@ -0,0 +1,66 @@ +package main + +import ( + "fmt" + "github.com/songgao/water" + "net" +) + +func main() { + sip := net.ParseIP("192.168.4.151") + srcAddr := &net.UDPAddr{IP: net.IPv4zero, Port: 0} + dstAddr := &net.UDPAddr{IP: sip, Port: 9981} + + conn, err := net.DialUDP("udp", srcAddr, dstAddr) + if err != nil { + fmt.Println(err) + } + + device, err := water.New(water.Config{DeviceType: water.TAP}) + if err != nil { + fmt.Println(err) + return + } + + fmt.Printf("Local: <%s> \n", device.Name()) + + frameData := make([]byte, 1448+16+8) //1472 + go func() { + for { + n, err := device.Read(frameData[8:]) + if err != nil { + break + } + if n == 0 || conn == nil { + continue + } + + fmt.Printf("<%s> %d\n", device.Name(), n) + fmt.Printf("<%s> % x\n", device.Name(), frameData[:20]) + + _, err = conn.Write(frameData[:n+8]) + if err != nil { + fmt.Println(err) + } + } + }() + + data := make([]byte, 1448+16+8) + for { + n, _, err := conn.ReadFromUDP(data) + if err != nil { + fmt.Printf("error during read: %s", err) + } + if n == 0 { + continue + } + fmt.Printf("<%s> %x\n", dstAddr.String(), data[:n]) + _, err = device.Write(data[8:n]) + if err != nil { + fmt.Println(err) + } + } + + conn.Close() + device.Close() +} diff --git a/misc/learn/perf/udpsever.go b/misc/learn/perf/udpsever.go new file mode 100755 index 0000000..145c0b1 --- /dev/null +++ b/misc/learn/perf/udpsever.go @@ -0,0 +1,67 @@ +package main + +import ( + "fmt" + "github.com/songgao/water" + "net" +) + +func main() { + var remote *net.UDPAddr + + listener, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP("0.0.0.0"), Port: 9981}) + if err != nil { + fmt.Println(err) + return + } + device, err := water.New(water.Config{DeviceType: water.TAP}) + if err != nil { + fmt.Println(err) + return + } + + fmt.Printf("Local: <%s> \n", device.Name()) + fmt.Printf("Local: <%s> \n", listener.LocalAddr().String()) + + //1500-20-8-8, 16 = 1448 + data := make([]byte, 1448+16+8) + go func() { + for { + n, remoteAddr, err := listener.ReadFromUDP(data) + if err != nil { + fmt.Printf("error during read: %s", err) + } + + if n == 0 { + continue + } + fmt.Printf("<%s> %d\n", remoteAddr, n) + remote = remoteAddr + //fmt.Printf("<%s> %s\n", remoteAddr, data[:n]) + _, err = device.Write(data[8:n]) + if err != nil { + fmt.Println(err) + } + } + }() + + //udpMtu := 1500-20-8 //1472 + frameData := make([]byte, 1448+16+8) + //header := make([]byte, 8) + for { + n, err := device.Read(frameData[8:]) + if err != nil { + break + } + + fmt.Printf("<%s> %d %x\n", device.Name(), n, frameData[:20]) + if n == 0 || remote == nil { + continue + } + + _, err = listener.WriteToUDP(frameData[:n+8], remote) + if err != nil { + fmt.Println(err) + } + } +} diff --git a/misc/learn/rate.go b/misc/learn/rate.go new file mode 100755 index 0000000..926b196 --- /dev/null +++ b/misc/learn/rate.go @@ -0,0 +1,55 @@ +package main + +import ( + "fmt" + "golang.org/x/time/rate" + "sync" + "sync/atomic" + "time" +) + +func test(limit rate.Limit, burst int, size uint32, wg *sync.WaitGroup) { + var ( + numOK = uint32(0) + numFail = uint32(0) + ) + + // Very slow replenishing bucket. + lim := rate.NewLimiter(limit, burst) + + now := time.Now().Unix() + at := time.Now().Add(15 * time.Second) + // Tries to take a token, atomically updates the counter and decreases the wait + // group counter. + + f := func() { + if ok := lim.AllowN(time.Now(), int(size)); ok { + //fmt.Printf("%d\n", time.Now().Unix()) + atomic.AddUint32(&numOK, size) + } else { + atomic.AddUint32(&numFail, size) + } + } + + for at.After(time.Now()) { + go f() + } + dt := time.Now().Unix() - now + fmt.Printf("size = %d rate: %d\n", size, numOK/uint32(dt)) + wg.Done() +} + +func main() { + const ( + limit = 10 * 1024 + burst = 10 * 1024 * 2 + numRequests = uint32(50) + ) + + wg := &sync.WaitGroup{} + wg.Add(int(numRequests)) + for i := uint32(0); i < numRequests; i++ { + go test(limit, burst, 64+(i*64), wg) + } + wg.Wait() +} diff --git a/misc/learn/readline.go b/misc/learn/readline.go new file mode 100755 index 0000000..3610a38 --- /dev/null +++ b/misc/learn/readline.go @@ -0,0 +1,166 @@ +package main + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "strconv" + "strings" + "time" + + "github.com/chzyer/readline" +) + +func usage(w io.Writer) { + io.WriteString(w, "commands:\n") + io.WriteString(w, completer.Tree(" ")) +} + +// Function constructor - constructs new function for listing given directory +func listFiles(path string) func(string) []string { + return func(line string) []string { + names := make([]string, 0) + files, _ := ioutil.ReadDir(path) + for _, f := range files { + names = append(names, f.Name()) + } + return names + } +} + +var completer = readline.NewPrefixCompleter( + readline.PcItem("mode", + readline.PcItem("vi"), + readline.PcItem("emacs"), + ), + readline.PcItem("login"), + readline.PcItem("say", + readline.PcItemDynamic(listFiles("./"), + readline.PcItem("with", + readline.PcItem("following"), + readline.PcItem("items"), + ), + ), + readline.PcItem("hello"), + readline.PcItem("bye"), + ), + readline.PcItem("setprompt"), + readline.PcItem("setpassword"), + readline.PcItem("bye"), + readline.PcItem("help"), + readline.PcItem("go", + readline.PcItem("build", readline.PcItem("-o"), readline.PcItem("-v")), + readline.PcItem("install", + readline.PcItem("-v"), + readline.PcItem("-vv"), + readline.PcItem("-vvv"), + ), + readline.PcItem("test"), + ), + readline.PcItem("sleep"), +) + +func filterInput(r rune) (rune, bool) { + switch r { + // block CtrlZ feature + case readline.CharCtrlZ: + return r, false + } + return r, true +} + +func main() { + l, err := readline.NewEx(&readline.Config{ + Prompt: "\033[31m»\033[0m ", + HistoryFile: "/tmp/readline.tmp", + AutoComplete: completer, + InterruptPrompt: "^C", + EOFPrompt: "exit", + HistorySearchFold: true, + FuncFilterInputRune: filterInput, + }) + if err != nil { + panic(err) + } + defer l.Close() + + setPasswordCfg := l.GenPasswordConfig() + setPasswordCfg.SetListener(func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { + l.SetPrompt(fmt.Sprintf("Enter password(%v): ", len(line))) + l.Refresh() + return nil, 0, false + }) + + log.SetOutput(l.Stderr()) + for { + line, err := l.Readline() + if err == readline.ErrInterrupt { + if len(line) == 0 { + break + } else { + continue + } + } else if err == io.EOF { + break + } + + line = strings.TrimSpace(line) + switch { + case strings.HasPrefix(line, "mode "): + switch line[5:] { + case "vi": + l.SetVimMode(true) + case "emacs": + l.SetVimMode(false) + default: + println("invalid mode:", line[5:]) + } + case line == "mode": + if l.IsVimMode() { + println("current mode: vim") + } else { + println("current mode: emacs") + } + case line == "login": + pswd, err := l.ReadPassword("please enter your password: ") + if err != nil { + break + } + println("you enter:", strconv.Quote(string(pswd))) + case line == "help": + usage(l.Stderr()) + case line == "setpassword": + pswd, err := l.ReadPasswordWithConfig(setPasswordCfg) + if err == nil { + println("you set:", strconv.Quote(string(pswd))) + } + case strings.HasPrefix(line, "setprompt"): + if len(line) <= 10 { + log.Println("setprompt ") + break + } + l.SetPrompt(line[10:]) + case strings.HasPrefix(line, "say"): + line := strings.TrimSpace(line[3:]) + if len(line) == 0 { + log.Println("say what?") + break + } + go func() { + for range time.Tick(time.Second) { + log.Println(line) + } + }() + case line == "bye": + goto exit + case line == "sleep": + log.Println("sleep 4 second") + time.Sleep(4 * time.Second) + case line == "": + default: + log.Println("you said:", strconv.Quote(line)) + } + } +exit: +} diff --git a/misc/learn/route.go b/misc/learn/route.go new file mode 100755 index 0000000..23c4f97 --- /dev/null +++ b/misc/learn/route.go @@ -0,0 +1,48 @@ +package main + +import ( + "fmt" + "github.com/vishvananda/netlink" + "net" + "os" +) + +func main() { + dest_str := os.Getenv("DEST") + dest := net.ParseIP(dest_str) + routes, err := netlink.RouteList(nil, netlink.FAMILY_V4) + if err != nil { + panic(err) + } + var hit *net.IPNet + for _, rte := range routes { + fmt.Println(rte) + if rte.Dst != nil && !rte.Dst.Contains(dest) { + continue + } + if hit != nil { + rts, _ := rte.Dst.Mask.Size() + ths, _ := hit.Mask.Size() + if rts < ths { + continue + } + } + hit = rte.Dst + ifIndex := rte.LinkIndex + gateway := rte.Gw + if gateway == nil { + gateway = rte.Src + } + fmt.Println("gw", rte.Gw) + link, _ := netlink.LinkByIndex(ifIndex) + addrs, err := netlink.AddrList(link, netlink.FAMILY_V4) + if err != nil { + panic(err) + } + for _, addr := range addrs { + if addr.Contains(gateway) { + fmt.Println("hit ", addr.IP) + } + } + } +} diff --git a/misc/learn/rule.go b/misc/learn/rule.go new file mode 100755 index 0000000..d601af4 --- /dev/null +++ b/misc/learn/rule.go @@ -0,0 +1,24 @@ +package main + +import ( + "fmt" + "github.com/vishvananda/netlink" + //"net" +) + +func main() { + rules, err := netlink.RuleList(netlink.FAMILY_V4) + if err != nil { + panic(err) + } + for _, ru := range rules { + fmt.Println(ru) + } + ru := netlink.NewRule() + //src := &net.IPNet{IP: net.IPv4(0, 0, 0, 0), Mask: net.CIDRMask(0, 32)} + ru.Table = 100 + ru.Priority = 16383 + if err := netlink.RuleAdd(ru); err != nil { + fmt.Printf("%s %s\n", ru, err) + } +} diff --git a/misc/learn/safemap.go b/misc/learn/safemap.go new file mode 100755 index 0000000..cfd57d9 --- /dev/null +++ b/misc/learn/safemap.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "sync" +) + +type SMap struct { + Data map[interface{}]interface{} + Lock sync.RWMutex +} + +func NewSMap(size int) *SMap { + this := &SMap{ + Data: make(map[interface{}]interface{}, size), + } + return this +} + +func (sm *SMap) Set(k interface{}, v interface{}) { + sm.Lock.Lock() + defer sm.Lock.Unlock() + sm.Data[k] = v +} + +func (sm *SMap) Get(k interface{}) interface{} { + sm.Lock.RLock() + defer sm.Lock.RUnlock() + return sm.Data[k] +} + +func (sm *SMap) GetEx(k string) (interface{}, bool) { + sm.Lock.RLock() + defer sm.Lock.RUnlock() + v, ok := sm.Data[k] + return v, ok +} + +func main() { + m := NewSMap(1024) + m.Set("hi", 1) + fmt.Println(m) + m.Set("hello", &m) + + fmt.Println(m) + a := m.Get("hi").(int) + a = 2 + fmt.Println(a) + m.Set("hip", &a) + fmt.Println(m) + + b := m.Get("hip").(*int) + *b = 3 + fmt.Println(*b) + c := m.Get("hip").(*int) + fmt.Println(m) + fmt.Println(*c) +} diff --git a/misc/learn/tcpmss.sh b/misc/learn/tcpmss.sh new file mode 100755 index 0000000..38deb23 --- /dev/null +++ b/misc/learn/tcpmss.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# modprobe ipt_LOG +# iptables -A OUTPUT -t raw -p icmp -j LOG +# iptables -A PREROUTING -t raw -p icmp -j LOG + +BR="br-vxlan" + +iptables -t mangle -A FORWARD -i $BR -p tcp --tcp-flags SYN,RST SYN -j TCPMSS --set-mss 1332 + + diff --git a/misc/learn/tls/client.go b/misc/learn/tls/client.go new file mode 100755 index 0000000..c8cc8d7 --- /dev/null +++ b/misc/learn/tls/client.go @@ -0,0 +1,36 @@ +package main + +import ( + "crypto/tls" + "log" +) + +func main() { + log.SetFlags(log.Lshortfile) + + conf := &tls.Config{ + InsecureSkipVerify: true, + } + + conn, err := tls.Dial("tcp", "127.0.0.1:443", conf) + if err != nil { + log.Println(err) + return + } + defer conn.Close() + + n, err := conn.Write([]byte("hello\n")) + if err != nil { + log.Println(n, err) + return + } + + buf := make([]byte, 100) + n, err = conn.Read(buf) + if err != nil { + log.Println(n, err) + return + } + + println(string(buf[:n])) +} diff --git a/misc/learn/tls/server.go b/misc/learn/tls/server.go new file mode 100755 index 0000000..9302c7a --- /dev/null +++ b/misc/learn/tls/server.go @@ -0,0 +1,57 @@ +package main + +import ( + "bufio" + "crypto/tls" + "log" + "net" +) + +func main() { + log.SetFlags(log.Lshortfile) + + cer, err := tls.LoadX509KeyPair( + "./resource/ca/crt.pem", + "./resource/ca/private.key") + if err != nil { + log.Println(err) + return + } + + config := &tls.Config{Certificates: []tls.Certificate{cer}} + ln, err := tls.Listen("tcp", ":443", config) + if err != nil { + log.Println(err) + return + } + defer ln.Close() + + for { + conn, err := ln.Accept() + if err != nil { + log.Println(err) + continue + } + go handleConnection(conn) + } +} + +func handleConnection(conn net.Conn) { + defer conn.Close() + r := bufio.NewReader(conn) + for { + msg, err := r.ReadString('\n') + if err != nil { + log.Println(err) + return + } + + println(msg) + + n, err := conn.Write([]byte("world\n")) + if err != nil { + log.Println(n, err) + return + } + } +} diff --git a/misc/learn/udp/client.go b/misc/learn/udp/client.go new file mode 100755 index 0000000..5d39835 --- /dev/null +++ b/misc/learn/udp/client.go @@ -0,0 +1,31 @@ +package main + +import ( + "fmt" + "net" + "time" +) + +func main() { + dip := net.ParseIP("192.168.7.30") + srcAddr := &net.UDPAddr{IP: net.IPv4zero, Port: 8888} + dstAddr := &net.UDPAddr{IP: dip, Port: 9999} + + conn, err := net.DialUDP("udp", srcAddr, dstAddr) + if err != nil { + fmt.Println(err) + } + data := make([]byte, 4096) + for i := 0; i < len(data); i++ { + data[i] = byte(i) + } + + for { + fmt.Printf("% x ... % x\n", data[:16], data[4080:4096]) + _, err = conn.Write(data) + if err != nil { + fmt.Println(err) + } + time.Sleep(time.Second) + } +} diff --git a/misc/learn/udp/server.go b/misc/learn/udp/server.go new file mode 100755 index 0000000..af56660 --- /dev/null +++ b/misc/learn/udp/server.go @@ -0,0 +1,24 @@ +package main + +import ( + "fmt" + "net" +) + +func main() { + listener, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP("0.0.0.0"), Port: 9999}) + if err != nil { + fmt.Println(err) + return + } + + for { + data := make([]byte, 4096*2) + n, remoteAddr, err := listener.ReadFromUDP(data) + if err != nil { + fmt.Printf("error during read: %s", err) + } + fmt.Printf("from %s and %d\n", remoteAddr, n) + fmt.Printf("% x ... % x\n", data[:16], data[4080:4096]) + } +} diff --git a/misc/learn/vxlan/vxlan.sh b/misc/learn/vxlan/vxlan.sh new file mode 100755 index 0000000..392785b --- /dev/null +++ b/misc/learn/vxlan/vxlan.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -ex + +vni="100" +local="192.168.7.41" +remote="192.168.7.42" +port="4789" + +ssh ${local} /bin/bash < 192.168.209.130 +# + +auth_key=$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64) +enc_key=$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64) + +sun_spi=$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8) +moon_spi=$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8) + +reqid=$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8) + +sun="$1"; shift +sun_net="$1"; shift +moon="$1"; shift +moon_net="$1"; shift + +if [ -z "${sun}${sun_net}${moon}${moon_net}" ]; then + echo "$0 moon moon-net sun sun-net" + exit 0 +fi + +sun_port="$1"; +moon_port="$2"; + +if [ -z "${sun_port}" ]; then + sun_port="22" +fi +if [ -z "${moon_port}" ]; then + moon_port="22" +fi + +ssh -p ${sun_port} ${sun} /bin/bash << EOF + # -- + ip xfrm state flush + + ip xfrm state add src ${moon} dst ${sun} proto esp spi 0x${moon_spi} reqid 0x${reqid} mode tunnel auth sha256 0x${auth_key} enc aes 0x${enc_key} + ip xfrm state add src ${sun} dst ${moon} proto esp spi 0x${sun_spi} reqid 0x${reqid} mode tunnel auth sha256 0x${auth_key} enc aes 0x${enc_key} + ip xfrm state ls + + # -- + ip xfrm policy flush + + ip xfrm policy add src ${moon_net} dst ${sun_net} dir in ptype main tmpl src ${moon} dst ${sun} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy add src ${moon_net} dst ${sun_net} dir fwd ptype main tmpl src ${moon} dst ${sun} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy add src ${sun_net} dst ${moon_net} dir out ptype main tmpl src ${sun} dst ${moon} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy ls + ip link show dummy0 || ip link add type dummy + ip link set dummy0 up + ip addr replace ${sun_net} dev dummy0 + ip route replace ${moon_net} via ${sun_net} +EOF + +ssh -p ${moon_port} ${moon} /bin/bash << EOF + # -- + ip xfrm state flush + + ip xfrm state add src ${sun} dst ${moon} proto esp spi 0x${sun_spi} reqid 0x${reqid} mode tunnel auth sha256 0x${auth_key} enc aes 0x${enc_key} + ip xfrm state add src ${moon} dst ${sun} proto esp spi 0x${moon_spi} reqid 0x${reqid} mode tunnel auth sha256 0x${auth_key} enc aes 0x${enc_key} + ip xfrm state ls + + # -- + ip xfrm policy flush + + ip xfrm policy add src ${sun_net} dst ${moon_net} dir in ptype main tmpl src ${sun} dst ${moon} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy add src ${sun_net} dst ${moon_net} dir fwd ptype main tmpl src ${sun} dst ${moon} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy add src ${moon_net} dst ${sun_net} dir out ptype main tmpl src ${moon} dst ${sun} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy ls + ip link show dummy0 || ip link add type dummy + ip link set dummy0 up + ip addr replace ${moon_net} dev dummy0 + ip route replace ${sun_net} via ${moon_net} +EOF diff --git a/misc/learn/xfrm/setup-udp.sh b/misc/learn/xfrm/setup-udp.sh new file mode 100755 index 0000000..df0d6aa --- /dev/null +++ b/misc/learn/xfrm/setup-udp.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +set -ex + +# Topo. +# +# 100.141 -- 200.130 +# | | +# 192.168.209.141 <=====> 192.168.209.130 +# + +auth_key=$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64) +enc_key=$(dd if=/dev/urandom count=32 bs=1 2> /dev/null| xxd -p -c 64) + +sun_spi=$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8) +moon_spi=$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8) + +reqid=$(dd if=/dev/urandom count=4 bs=1 2> /dev/null| xxd -p -c 8) + +sun="$1"; shift +sun_net="$1"; shift +moon="$1"; shift +moon_net="$1"; shift + +if [ -z "${sun}${sun_net}${moon}${moon_net}" ]; then + echo "$0 moon moon-net sun sun-net" + exit 0 +fi + +sun_port="$1"; +moon_port="$2"; + +if [ -z "${sun_port}" ]; then + sun_port="22" +fi +if [ -z "${moon_port}" ]; then + moon_port="22" +fi + +if [ -z "${sun_addr}" ]; then + sun_addr=${sun} +fi +if [ -z "${moon_addr}" ]; then + moon_addr=${moon} +fi + +ssh -p ${sun_port} ${sun} /bin/bash << EOF + # -- + ip xfrm state flush + + ip xfrm state add src ${moon} dst ${sun_addr} proto esp spi 0x${moon_spi} reqid 0x${reqid} mode tunnel auth sha256 0x${auth_key} enc aes 0x${enc_key} encap espinudp 4500 4500 0.0.0.0 + ip xfrm state add src ${sun_addr} dst ${moon} proto esp spi 0x${sun_spi} reqid 0x${reqid} mode tunnel auth sha256 0x${auth_key} enc aes 0x${enc_key} encap espinudp 4500 4500 0.0.0.0 + ip xfrm state ls + + # -- + ip xfrm policy flush + + ip xfrm policy add src ${moon_net} dst ${sun_net} dir in ptype main tmpl src ${moon} dst ${sun_addr} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy add src ${moon_net} dst ${sun_net} dir fwd ptype main tmpl src ${moon} dst ${sun_addr} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy add src ${sun_net} dst ${moon_net} dir out ptype main tmpl src ${sun_addr} dst ${moon} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy ls + ip link show dummy0 || ip link add type dummy + ip link set dummy0 up + ip addr replace ${sun_net} dev dummy0 + ip route replace ${moon_net} via ${sun_net} +EOF + +ssh -p ${moon_port} ${moon} /bin/bash << EOF + # -- + ip xfrm state flush + + ip xfrm state add src ${sun} dst ${moon_addr} proto esp spi 0x${sun_spi} reqid 0x${reqid} mode tunnel auth sha256 0x${auth_key} enc aes 0x${enc_key} encap espinudp 4500 4500 0.0.0.0 + ip xfrm state add src ${moon_addr} dst ${sun} proto esp spi 0x${moon_spi} reqid 0x${reqid} mode tunnel auth sha256 0x${auth_key} enc aes 0x${enc_key} encap espinudp 4500 4500 0.0.0.0 + ip xfrm state ls + + # -- + ip xfrm policy flush + + ip xfrm policy add src ${sun_net} dst ${moon_net} dir in ptype main tmpl src ${sun} dst ${moon_addr} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy add src ${sun_net} dst ${moon_net} dir fwd ptype main tmpl src ${sun} dst ${moon_addr} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy add src ${moon_net} dst ${sun_net} dir out ptype main tmpl src ${moon_addr} dst ${sun} proto esp reqid 0x${reqid} mode tunnel + ip xfrm policy ls + ip link show dummy0 || ip link add type dummy + ip link set dummy0 up + ip addr replace ${moon_net} dev dummy0 + ip route replace ${sun_net} via ${moon_net} +EOF diff --git a/misc/playbook/readme.md b/misc/playbook/readme.md new file mode 100644 index 0000000..5fd344f --- /dev/null +++ b/misc/playbook/readme.md @@ -0,0 +1,5 @@ +# ping +ansible openlan -m ping + +# upgrade +ansible-playbook upgrade.yaml -e "version=0.8.22" diff --git a/misc/playbook/upgrade.yaml b/misc/playbook/upgrade.yaml new file mode 100644 index 0000000..9159711 --- /dev/null +++ b/misc/playbook/upgrade.yaml @@ -0,0 +1,21 @@ +--- +- hosts: openlan + remote_user: root + vars: + version: 0.8.20 + + tasks: + - name: download openlan-{{ version }} + copy: src=/root/rpmbuild/RPMS/x86_64/openlan-{{ version }}-1.el7.x86_64.rpm dest=/tmp + + - name: upgrade openlan + yum: state=present name=/tmp/openlan-{{ version }}-1.el7.x86_64.rpm + notify: + - restart openlan-confd + - restart openlan-switch + + handlers: + - name: restart openlan-confd + service: name=openlan-confd enabled=yes state=restarted + - name: restart openlan-switch + service: name=openlan-switch enabled=yes state=restarted diff --git a/pkg/access/http/http.go b/pkg/access/http/http.go new file mode 100755 index 0000000..0658be7 --- /dev/null +++ b/pkg/access/http/http.go @@ -0,0 +1,115 @@ +package http + +import ( + "context" + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/libol" + "net/http" +) + +type Http struct { + pointer Pointer + listen string + server *http.Server + crtFile string + keyFile string + pubDir string + router *mux.Router + token string +} + +func NewHttp(pointer Pointer) (h *Http) { + h = &Http{ + pointer: pointer, + } + if config := pointer.Config(); config != nil { + if config.Http != nil { + h.listen = config.Http.Listen + h.pubDir = config.Http.Public + } + } + return h +} + +func (h *Http) Initialize() { + r := h.Router() + if h.server == nil { + h.server = &http.Server{ + Addr: h.listen, + Handler: r, + } + } + h.token = libol.GenRandom(32) + libol.Info("Http.Initialize: AdminToken: %s", h.token) + h.LoadRouter() +} + +func (h *Http) IsAuth(w http.ResponseWriter, r *http.Request) bool { + token, pass, ok := r.BasicAuth() + libol.Debug("Http.IsAuth token: %s, pass: %s", token, pass) + if !ok || token != h.token { + w.Header().Set("WWW-Authenticate", "Basic") + http.Error(w, "Authorization Required.", http.StatusUnauthorized) + return false + } + return true +} + +func (h *Http) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if h.IsAuth(w, r) { + next.ServeHTTP(w, r) + } else { + w.Header().Set("WWW-Authenticate", "Basic") + http.Error(w, "Authorization Required.", http.StatusUnauthorized) + } + }) +} + +func (h *Http) Router() *mux.Router { + if h.router == nil { + h.router = mux.NewRouter() + h.router.Use(h.Middleware) + } + return h.router +} + +func (h *Http) LoadRouter() { + router := h.Router() + + router.HandleFunc("/current/uuid", func(w http.ResponseWriter, r *http.Request) { + format := GetQueryOne(r, "format") + if format == "yaml" { + ResponseYaml(w, h.pointer.UUID()) + } else { + ResponseJson(w, h.pointer.UUID()) + } + }) + router.HandleFunc("/current/config", func(w http.ResponseWriter, r *http.Request) { + format := GetQueryOne(r, "format") + if format == "yaml" { + ResponseYaml(w, h.pointer.Config()) + } else { + ResponseJson(w, h.pointer.Config()) + } + }) +} + +func (h *Http) Start() { + h.Initialize() + libol.Info("Http.Start %s", h.listen) + if h.keyFile == "" || h.crtFile == "" { + if err := h.server.ListenAndServe(); err != nil { + libol.Error("Http.Start on %s: %s", h.listen, err) + return + } + } +} + +func (h *Http) Shutdown() { + libol.Info("Http.Shutdown %s", h.listen) + if err := h.server.Shutdown(context.Background()); err != nil { + // Error from closing listeners, or context timeout: + libol.Error("Http.Shutdown: %v", err) + } +} diff --git a/pkg/access/http/interface.go b/pkg/access/http/interface.go new file mode 100755 index 0000000..3e6b92e --- /dev/null +++ b/pkg/access/http/interface.go @@ -0,0 +1,8 @@ +package http + +import "github.com/luscis/openlan/pkg/config" + +type Pointer interface { + UUID() string + Config() *config.Point +} diff --git a/pkg/access/http/utils.go b/pkg/access/http/utils.go new file mode 100644 index 0000000..71576ab --- /dev/null +++ b/pkg/access/http/utils.go @@ -0,0 +1,35 @@ +package http + +import ( + "encoding/json" + "gopkg.in/yaml.v2" + "net/http" +) + +func ResponseJson(w http.ResponseWriter, v interface{}) { + str, err := json.MarshalIndent(v, "", " ") + if err == nil { + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(str) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func ResponseYaml(w http.ResponseWriter, v interface{}) { + str, err := yaml.Marshal(v) + if err == nil { + w.Header().Set("Content-Type", "application/yaml") + _, _ = w.Write(str) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func GetQueryOne(req *http.Request, name string) string { + query := req.URL.Query() + if values, ok := query[name]; ok { + return values[0] + } + return "" +} diff --git a/pkg/access/neighbor.go b/pkg/access/neighbor.go new file mode 100755 index 0000000..2316adc --- /dev/null +++ b/pkg/access/neighbor.go @@ -0,0 +1,149 @@ +package access + +import ( + "encoding/binary" + "github.com/luscis/openlan/pkg/libol" + "sync" + "time" +) + +type NeighborListener struct { + Interval func(dest []byte) + Expire func(dest []byte) +} + +type Neighbor struct { + HwAddr []byte + IpAddr []byte + Uptime int64 + NewTime int64 +} + +type Neighbors struct { + lock sync.RWMutex + neighbors map[uint32]*Neighbor + done chan bool + ticker *time.Ticker + timeout int64 + interval int64 + listener NeighborListener +} + +func (n *Neighbors) Expire() { + n.lock.Lock() + defer n.lock.Unlock() + deletes := make([]uint32, 0, 1024) + //collect need deleted. + for index, learn := range n.neighbors { + now := time.Now().Unix() + if now-learn.Uptime >= n.timeout { + deletes = append(deletes, index) + } + } + libol.Debug("Neighbors.Expire delete %d", len(deletes)) + //execute delete. + for _, d := range deletes { + if l, ok := n.neighbors[d]; ok { + delete(n.neighbors, d) + libol.Debug("Neighbors.Expire: delete %x", l.HwAddr) + } + } +} + +func (n *Neighbors) Interval() { + n.lock.Lock() + defer n.lock.Unlock() + intervals := make([]uint32, 0, 1024) + //collect need keepalive. + for index, learn := range n.neighbors { + now := time.Now().Unix() + if now-learn.Uptime >= n.interval { + intervals = append(intervals, index) + } + } + libol.Debug("Neighbors.Interval keepalive %d", len(intervals)) + //execute delete. + for _, d := range intervals { + if l, ok := n.neighbors[d]; ok { + if n.listener.Interval != nil { + n.listener.Interval(l.IpAddr) + } + } + } +} + +func (n *Neighbors) Start() { + for { + select { + case <-n.done: + return + case t := <-n.ticker.C: + libol.Log("Neighbors.Ticker: at %s", t) + n.Interval() + n.Expire() + } + } +} + +func (n *Neighbors) Stop() { + n.ticker.Stop() + n.done <- true +} + +func (n *Neighbors) Add(h *Neighbor) { + if h == nil { + return + } + n.lock.Lock() + defer n.lock.Unlock() + k := binary.BigEndian.Uint32(h.IpAddr) + if l, ok := n.neighbors[k]; ok { + l.Uptime = h.Uptime + copy(l.HwAddr[:6], h.HwAddr[:6]) + } else { + l := &Neighbor{ + Uptime: h.Uptime, + NewTime: h.NewTime, + HwAddr: make([]byte, 6), + IpAddr: make([]byte, 4), + } + copy(l.IpAddr[:4], h.IpAddr[:4]) + copy(l.HwAddr[:6], h.HwAddr[:6]) + n.neighbors[k] = l + } +} + +func (n *Neighbors) Get(d uint32) *Neighbor { + n.lock.RLock() + defer n.lock.RUnlock() + if l, ok := n.neighbors[d]; ok { + return l + } + return nil +} + +func (n *Neighbors) Clear() { + libol.Debug("Neighbor.Clear") + n.lock.Lock() + defer n.lock.Unlock() + deletes := make([]uint32, 0, 1024) + for index := range n.neighbors { + deletes = append(deletes, index) + } + //execute delete. + for _, d := range deletes { + if _, ok := n.neighbors[d]; ok { + delete(n.neighbors, d) + } + } +} + +func (n *Neighbors) GetByBytes(d []byte) *Neighbor { + n.lock.RLock() + defer n.lock.RUnlock() + k := binary.BigEndian.Uint32(d) + if l, ok := n.neighbors[k]; ok { + return l + } + return nil +} diff --git a/pkg/access/point_darwin.go b/pkg/access/point_darwin.go new file mode 100755 index 0000000..df26c43 --- /dev/null +++ b/pkg/access/point_darwin.go @@ -0,0 +1,102 @@ +package access + +import ( + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "strings" +) + +type Point struct { + MixPoint + // private + brName string + addr string + routes []*models.Route +} + +func NewPoint(config *config.Point) *Point { + p := Point{ + brName: config.Interface.Bridge, + MixPoint: NewMixPoint(config), + } + return &p +} + +func (p *Point) Initialize() { + p.worker.listener.AddAddr = p.AddAddr + p.worker.listener.DelAddr = p.DelAddr + p.worker.listener.AddRoutes = p.AddRoutes + p.worker.listener.DelRoutes = p.DelRoutes + p.MixPoint.Initialize() +} + +func (p *Point) AddAddr(ipStr string) error { + if ipStr == "" { + return nil + } + // add point-to-point + ips := strings.SplitN(ipStr, "/", 2) + out, err := libol.IpAddrAdd(p.IfName(), ips[0], ips[0]) + if err != nil { + p.out.Warn("Point.AddAddr: %s, %s", err, out) + return err + } + p.out.Info("Point.AddAddr: %s", ipStr) + // add directly route. + out, err = libol.IpRouteAdd(p.IfName(), ipStr, "") + if err != nil { + p.out.Warn("Point.AddAddr: %s, %s", err, out) + } + p.out.Info("Point.AddAddr: route %s via %s", ipStr, p.IfName()) + p.addr = ipStr + return nil +} + +func (p *Point) DelAddr(ipStr string) error { + // delete directly route. + out, err := libol.IpRouteDel(p.IfName(), ipStr, "") + if err != nil { + p.out.Warn("Point.DelAddr: %s, %s", err, out) + } + p.out.Info("Point.DelAddr: route %s via %s", ipStr, p.IfName()) + // delete point-to-point + ip4 := strings.SplitN(ipStr, "/", 2)[0] + out, err = libol.IpAddrDel(p.IfName(), ip4) + if err != nil { + p.out.Warn("Point.DelAddr: %s, %s", err, out) + return err + } + p.out.Info("Point.DelAddr: %s", ip4) + p.addr = "" + return nil +} + +func (p *Point) AddRoutes(routes []*models.Route) error { + if routes == nil { + return nil + } + for _, route := range routes { + out, err := libol.IpRouteAdd(p.IfName(), route.Prefix, "") + if err != nil { + p.out.Warn("Point.AddRoutes: %s %s", route.Prefix, out) + continue + } + p.out.Info("Point.AddRoutes: route %s via %s", route.Prefix, p.IfName()) + } + p.routes = routes + return nil +} + +func (p *Point) DelRoutes(routes []*models.Route) error { + for _, route := range routes { + out, err := libol.IpRouteDel(p.IfName(), route.Prefix, "") + if err != nil { + p.out.Warn("Point.DelRoutes: %s %s", route.Prefix, out) + continue + } + p.out.Info("Point.DelRoutes: route %s via %s", route.Prefix, p.IfName()) + } + p.routes = nil + return nil +} diff --git a/pkg/access/point_linux.go b/pkg/access/point_linux.go new file mode 100755 index 0000000..ff53460 --- /dev/null +++ b/pkg/access/point_linux.go @@ -0,0 +1,310 @@ +package access + +import ( + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/network" + "github.com/vishvananda/netlink" + "net" + "strings" +) + +type Point struct { + MixPoint + // private + brName string + ipMtu int + addr string + bypass *netlink.Route + routes []*models.Route + link netlink.Link + uuid string +} + +func NewPoint(config *config.Point) *Point { + ipMtu := config.Interface.IPMtu + if ipMtu == 0 { + ipMtu = 1500 + } + p := Point{ + ipMtu: ipMtu, + brName: config.Interface.Bridge, + MixPoint: NewMixPoint(config), + } + return &p +} + +func (p *Point) Initialize() { + p.worker.listener.AddAddr = p.AddAddr + p.worker.listener.DelAddr = p.DelAddr + p.worker.listener.AddRoutes = p.AddRoutes + p.worker.listener.DelRoutes = p.DelRoutes + p.worker.listener.OnTap = p.OnTap + p.MixPoint.Initialize() +} + +func (p *Point) DelAddr(ipStr string) error { + if p.link == nil || ipStr == "" { + return nil + } + ipAddr, err := netlink.ParseAddr(ipStr) + if err != nil { + p.out.Error("Point.AddAddr.ParseCIDR %s: %s", ipStr, err) + return err + } + if err := netlink.AddrDel(p.link, ipAddr); err != nil { + p.out.Warn("Point.DelAddr.UnsetLinkIp: %s", err) + } + p.out.Info("Point.DelAddr: %s", ipStr) + p.addr = "" + return nil +} + +func (p *Point) AddAddr(ipStr string) error { + if ipStr == "" || p.link == nil { + return nil + } + ipAddr, err := netlink.ParseAddr(ipStr) + if err != nil { + p.out.Error("Point.AddAddr.ParseCIDR %s: %s", ipStr, err) + return err + } + if err := netlink.AddrAdd(p.link, ipAddr); err != nil { + p.out.Warn("Point.AddAddr.SetLinkIp: %s", err) + return err + } + p.out.Info("Point.AddAddr: %s", ipStr) + p.addr = ipStr + return nil +} + +func (p *Point) UpBr(name string) *netlink.Bridge { + if name == "" { + return nil + } + la := netlink.LinkAttrs{TxQLen: -1, Name: name} + br := &netlink.Bridge{LinkAttrs: la} + if link, err := netlink.LinkByName(name); link == nil { + p.out.Warn("Point.UpBr: %s %s", name, err) + err := netlink.LinkAdd(br) + if err != nil { + p.out.Warn("Point.UpBr.newBr: %s %s", name, err) + } + } + link, err := netlink.LinkByName(name) + if link == nil { + p.out.Error("Point.UpBr: %s %s", name, err) + return nil + } + if err := netlink.LinkSetUp(link); err != nil { + p.out.Error("Point.UpBr.LinkUp: %s", err) + } + return br +} + +func (p *Point) OnTap(w *TapWorker) error { + p.out.Info("Point.OnTap") + tap := w.device + name := tap.Name() + if tap.Type() == network.ProviderVir { // virtual device + p.out.Error("Point.OnTap: not support %s", tap.Type()) + return nil + } + // kernel device + link, err := netlink.LinkByName(name) + if err != nil { + p.out.Error("Point.OnTap: Get %s: %s", name, err) + return err + } + if err := netlink.LinkSetMTU(link, p.ipMtu); err != nil { + p.out.Error("Point.OnTap.SetMTU: %s", err) + } + if br := p.UpBr(p.brName); br != nil { + if err := netlink.LinkSetMaster(link, br); err != nil { + p.out.Error("Point.OnTap.AddSlave: Switch dev %s: %s", name, err) + } + link, err = netlink.LinkByName(p.brName) + if err != nil { + p.out.Error("Point.OnTap: Get %s: %s", p.brName, err) + } + } + if p.config.Interface.Cost > 0 { + port := network.NewBrPort(name) + if err := port.Cost(p.config.Interface.Cost); err != nil { + p.out.Error("Point.OnTap: Cost %s: %s", err) + } + } + p.link = link + return nil +} + +func (p *Point) GetRemote() string { + conn := p.worker.conWorker + if conn == nil { + return "" + } + remote := conn.client.RemoteAddr() + remote = strings.SplitN(remote, ":", 2)[0] + return remote +} + +func (p *Point) AddBypass(routes []*models.Route) { + remote := p.GetRemote() + if !p.config.ByPass { + return + } + addr, dest, _ := net.ParseCIDR(remote + "/32") + gws, err := netlink.RouteGet(addr) + if err != nil || len(gws) == 0 { + p.out.Error("Point.AddBypass: RouteGet %s: %s", addr, err) + return + } + rt := &netlink.Route{ + LinkIndex: gws[0].LinkIndex, + Dst: dest, + Gw: gws[0].Gw, + Table: 100, + } + p.out.Debug("Point.AddBypass: %s") + if err := netlink.RouteReplace(rt); err != nil { + p.out.Warn("Point.AddBypass: %s %s", rt.Dst, err) + return + } + p.out.Info("Point.AddBypass: route %s via %s", rt.Dst, rt.Gw) + ru := netlink.NewRule() + ru.Table = 100 + ru.Priority = 16383 + if err := netlink.RuleAdd(ru); err != nil { + p.out.Warn("Point.AddBypass: %s %s", ru.Dst, err) + } + p.out.Info("Point.AddBypass: %s", ru) + p.bypass = rt + for _, rt := range routes { + if rt.Prefix != "0.0.0.0/0" { + continue + } + gw := net.ParseIP(rt.NextHop) + _, dst0, _ := net.ParseCIDR("0.0.0.0/1") + rt0 := netlink.Route{ + LinkIndex: p.link.Attrs().Index, + Dst: dst0, + Gw: gw, + Priority: rt.Metric, + } + p.out.Debug("Point.AddBypass: %s", rt0) + if err := netlink.RouteAdd(&rt0); err != nil { + p.out.Warn("Point.AddBypass: %s %s", rt0.Dst, err) + } + p.out.Info("Point.AddBypass: route %s via %s", rt0.Dst, rt0.Gw) + _, dst1, _ := net.ParseCIDR("128.0.0.0/1") + rt1 := netlink.Route{ + LinkIndex: p.link.Attrs().Index, + Dst: dst1, + Gw: gw, + Priority: rt.Metric, + } + p.out.Debug("Point.AddBypass: %s", rt1) + if err := netlink.RouteAdd(&rt1); err != nil { + p.out.Warn("Point.AddBypass: %s %s", rt1.Dst, err) + } + p.out.Info("Point.AddBypass: route %s via %s", rt1.Dst, rt1.Gw) + } +} + +func (p *Point) AddRoutes(routes []*models.Route) error { + if routes == nil || p.link == nil { + return nil + } + p.AddBypass(routes) + for _, rt := range routes { + _, dst, err := net.ParseCIDR(rt.Prefix) + if err != nil { + continue + } + nxt := net.ParseIP(rt.NextHop) + rte := netlink.Route{ + LinkIndex: p.link.Attrs().Index, + Dst: dst, + Gw: nxt, + Priority: rt.Metric, + } + p.out.Debug("Point.AddRoute: %s", rte) + if err := netlink.RouteAdd(&rte); err != nil { + p.out.Warn("Point.AddRoute: %s %s", rt.Prefix, err) + continue + } + p.out.Info("Point.AddRoutes: route %s via %s", rt.Prefix, rt.NextHop) + } + p.routes = routes + return nil +} + +func (p *Point) DelBypass(routes []*models.Route) { + if !p.config.ByPass || p.bypass == nil { + return + } + p.out.Debug("Point.DelRoute: %s") + rt := p.bypass + if err := netlink.RouteAdd(rt); err != nil { + p.out.Warn("Point.DelRoute: %s %s", rt.Dst, err) + } + p.out.Info("Point.DelBypass: route %s via %s", rt.Dst, rt.Gw) + p.bypass = nil + for _, rt := range routes { + if rt.Prefix != "0.0.0.0/0" { + continue + } + gw := net.ParseIP(rt.NextHop) + _, dst0, _ := net.ParseCIDR("0.0.0.0/1") + rt0 := netlink.Route{ + LinkIndex: p.link.Attrs().Index, + Dst: dst0, + Gw: gw, + Priority: rt.Metric, + } + p.out.Debug("Point.DelBypass: %s", rt0) + if err := netlink.RouteDel(&rt0); err != nil { + p.out.Warn("Point.DelBypass: %s %s", rt0.Dst, err) + } + p.out.Info("Point.DelBypass: route %s via %s", rt0.Dst, rt0.Gw) + _, dst1, _ := net.ParseCIDR("128.0.0.0/1") + rt1 := netlink.Route{ + LinkIndex: p.link.Attrs().Index, + Dst: dst1, + Gw: gw, + Priority: rt.Metric, + } + p.out.Debug("Point.DelBypass: %s", rt1) + if err := netlink.RouteDel(&rt1); err != nil { + p.out.Warn("Point.DelBypass: %s %s", rt1.Dst, err) + } + p.out.Info("Point.DelBypass: route %s via %s", rt1.Dst, rt1.Gw) + } +} + +func (p *Point) DelRoutes(routes []*models.Route) error { + if routes == nil || p.link == nil { + return nil + } + p.DelBypass(routes) + for _, rt := range routes { + _, dst, err := net.ParseCIDR(rt.Prefix) + if err != nil { + continue + } + nxt := net.ParseIP(rt.NextHop) + rte := netlink.Route{ + LinkIndex: p.link.Attrs().Index, + Dst: dst, + Gw: nxt, + Priority: rt.Metric, + } + if err := netlink.RouteDel(&rte); err != nil { + p.out.Warn("Point.DelRoute: %s %s", rt.Prefix, err) + continue + } + p.out.Info("Point.DelRoutes: route %s via %s", rt.Prefix, rt.NextHop) + } + p.routes = nil + return nil +} diff --git a/pkg/access/point_windows.go b/pkg/access/point_windows.go new file mode 100755 index 0000000..5663c90 --- /dev/null +++ b/pkg/access/point_windows.go @@ -0,0 +1,115 @@ +package access + +import ( + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "strings" +) + +type Point struct { + MixPoint + // private + brName string + addr string + routes []*models.Route + config *config.Point +} + +func NewPoint(config *config.Point) *Point { + p := Point{ + brName: config.Interface.Bridge, + MixPoint: NewMixPoint(config), + } + return &p +} + +func (p *Point) Initialize() { + p.worker.listener.AddAddr = p.AddAddr + p.worker.listener.DelAddr = p.DelAddr + p.worker.listener.AddRoutes = p.AddRoutes + p.worker.listener.DelRoutes = p.DelRoutes + p.worker.listener.OnTap = p.OnTap + p.MixPoint.Initialize() +} + +func (p *Point) OnTap(w *TapWorker) error { + // clean routes previous + routes := make([]*models.Route, 0, 32) + if err := libol.UnmarshalLoad(&routes, ".routes.json"); err == nil { + for _, route := range routes { + _, _ = libol.IpRouteDel(p.IfName(), route.Prefix, route.NextHop) + p.out.Debug("Point.OnTap: clear %s via %s", route.Prefix, route.NextHop) + } + } + if out, err := libol.IpMetricSet(p.IfName(), "235"); err != nil { + p.out.Warn("Point.OnTap: metricSet %s: %s", err, out) + } + return nil +} + +func (p *Point) Trim(out []byte) string { + return strings.TrimSpace(string(out)) +} + +func (p *Point) AddAddr(ipStr string) error { + if ipStr == "" { + return nil + } + addrExisted := libol.IpAddrShow(p.IfName()) + if len(addrExisted) > 0 { + for _, addr := range addrExisted { + _, _ = libol.IpAddrDel(p.IfName(), addr) + } + } + out, err := libol.IpAddrAdd(p.IfName(), ipStr) + if err != nil { + p.out.Warn("Point.AddAddr: %s, %s", err, p.Trim(out)) + return err + } + p.out.Info("Point.AddAddr: %s", ipStr) + p.addr = ipStr + return nil +} + +func (p *Point) DelAddr(ipStr string) error { + ipv4 := strings.Split(ipStr, "/")[0] + out, err := libol.IpAddrDel(p.IfName(), ipv4) + if err != nil { + p.out.Warn("Point.DelAddr: %s, %s", err, p.Trim(out)) + return err + } + p.out.Info("Point.DelAddr: %s", ipv4) + p.addr = "" + return nil +} + +func (p *Point) AddRoutes(routes []*models.Route) error { + if routes == nil { + return nil + } + _ = libol.MarshalSave(routes, ".routes.json", true) + for _, route := range routes { + out, err := libol.IpRouteAdd(p.IfName(), route.Prefix, route.NextHop) + if err != nil { + p.out.Warn("Point.AddRoutes: %s %s", route.Prefix, p.Trim(out)) + continue + } + p.out.Info("Point.AddRoutes: route %s via %s", route.Prefix, route.NextHop) + } + p.routes = routes + return nil +} + +func (p *Point) DelRoutes(routes []*models.Route) error { + for _, route := range routes { + out, err := libol.IpRouteDel(p.IfName(), route.Prefix, route.NextHop) + if err != nil { + p.out.Warn("Point.DelRoutes: %s %s", route.Prefix, p.Trim(out)) + continue + } + p.out.Info("Point.DelRoutes: route %s via %s", route.Prefix, route.NextHop) + } + p.routes = nil + return nil +} diff --git a/pkg/access/pointer.go b/pkg/access/pointer.go new file mode 100755 index 0000000..a098e36 --- /dev/null +++ b/pkg/access/pointer.go @@ -0,0 +1,149 @@ +package access + +import ( + "github.com/luscis/openlan/pkg/access/http" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/network" + "runtime" +) + +type Pointer interface { + Addr() string + IfName() string + IfAddr() string + Client() libol.SocketClient + Device() network.Taper + Status() libol.SocketStatus + UpTime() int64 + UUID() string + Protocol() string + User() string + Record() map[string]int64 + Tenant() string + Alias() string + Config() *config.Point + Network() *models.Network +} + +type MixPoint struct { + uuid string + worker *Worker + config *config.Point + out *libol.SubLogger + http *http.Http +} + +func NewMixPoint(config *config.Point) MixPoint { + return MixPoint{ + worker: NewWorker(config), + config: config, + out: libol.NewSubLogger(config.Id()), + } +} + +func (p *MixPoint) Initialize() { + libol.Info("MixPoint.Initialize") + p.worker.SetUUID(p.UUID()) + p.worker.Initialize() + if p.config.Http != nil { + p.http = http.NewHttp(p) + } +} + +func (p *MixPoint) Start() { + p.out.Info("MixPoint.Start %s", runtime.GOOS) + if p.config.PProf != "" { + f := libol.PProf{Listen: p.config.PProf} + f.Start() + } + p.worker.Start() +} + +func (p *MixPoint) Stop() { + defer libol.Catch("MixPoint.Stop") + if p.http != nil { + p.http.Shutdown() + } + p.worker.Stop() +} + +func (p *MixPoint) UUID() string { + if p.uuid == "" { + p.uuid = libol.GenRandom(13) + } + return p.uuid +} + +func (p *MixPoint) Status() libol.SocketStatus { + client := p.Client() + if client == nil { + return 0 + } + return client.Status() +} + +func (p *MixPoint) Addr() string { + return p.config.Connection +} + +func (p *MixPoint) IfName() string { + device := p.Device() + if device == nil { + return "" + } + return device.Name() +} + +func (p *MixPoint) Client() libol.SocketClient { + if p.worker.conWorker == nil { + return nil + } + return p.worker.conWorker.client +} + +func (p *MixPoint) Device() network.Taper { + if p.worker.tapWorker == nil { + return nil + } + return p.worker.tapWorker.device +} + +func (p *MixPoint) UpTime() int64 { + return p.worker.UpTime() +} + +func (p *MixPoint) IfAddr() string { + return p.worker.ifAddr +} + +func (p *MixPoint) Tenant() string { + return p.config.Network +} + +func (p *MixPoint) User() string { + return p.config.Username +} + +func (p *MixPoint) Alias() string { + return p.config.Alias +} + +func (p *MixPoint) Record() map[string]int64 { + rt := p.worker.conWorker.record + // TODO padding data from tapWorker + return rt.Data() +} + +func (p *MixPoint) Config() *config.Point { + return p.config +} + +func (p *MixPoint) Network() *models.Network { + return p.worker.network +} + +func (p *MixPoint) Protocol() string { + return p.config.Protocol +} diff --git a/pkg/access/socket.go b/pkg/access/socket.go new file mode 100755 index 0000000..ad96634 --- /dev/null +++ b/pkg/access/socket.go @@ -0,0 +1,585 @@ +package access + +import ( + "encoding/json" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "runtime" + "strings" + "sync" + "time" +) + +type SocketWorkerListener struct { + OnClose func(w *SocketWorker) error + OnSuccess func(w *SocketWorker) error + OnIpAddr func(w *SocketWorker, n *models.Network) error + ReadAt func(frame *libol.FrameMessage) error +} + +const ( + rtLast = "lastAt" // record time last frame received or connected. + rtConnected = "connAt" // record last connected time. + rtReConnect = "reconnAt" // record time when triggered reconnected. + rtSuccess = "succAt" // record success time when login. + rtSleeps = "sleeps" // record times to control connecting delay. + rtClosed = "clsAt" // close time + rtLive = "liveAt" // record received pong frame time. + rtIpAddr = "addrAt" // record last receive ipAddr message after success. + rtConnects = "conns" // record times of reconnecting + rtLatency = "latency" // latency by ping. +) + +type SocketWorker struct { + // private + listener SocketWorkerListener + client libol.SocketClient + lock sync.Mutex + user *models.User + network *models.Network + routes map[string]*models.Route + keepalive KeepAlive + done chan bool + ticker *time.Ticker + pinCfg *config.Point + eventQueue chan *WorkerEvent + writeQueue chan *libol.FrameMessage + jobber []jobTimer + record *libol.SafeStrInt64 + out *libol.SubLogger + wlFrame *libol.FrameMessage // Last frame from write. +} + +func NewSocketWorker(client libol.SocketClient, c *config.Point) *SocketWorker { + t := &SocketWorker{ + client: client, + network: models.NewNetwork(c.Network, c.Interface.Address), + routes: make(map[string]*models.Route, 64), + record: libol.NewSafeStrInt64(), + done: make(chan bool, 2), + ticker: time.NewTicker(2 * time.Second), + pinCfg: c, + eventQueue: make(chan *WorkerEvent, 32), + writeQueue: make(chan *libol.FrameMessage, c.Queue.SockWr), + jobber: make([]jobTimer, 0, 32), + out: libol.NewSubLogger(c.Id()), + } + t.user = &models.User{ + Alias: c.Alias, + Name: c.Username, + Password: c.Password, + Network: c.Network, + System: runtime.GOOS, + } + t.keepalive = KeepAlive{ + Interval: 15, + LastTime: time.Now().Unix(), + } + return t +} + +func (t *SocketWorker) sleepNow() int64 { + sleeps := t.record.Get(rtSleeps) + return sleeps * 5 +} + +func (t *SocketWorker) sleepIdle() int64 { + sleeps := t.record.Get(rtSleeps) + if sleeps < 20 { + t.record.Add(rtSleeps, 1) + } + return t.sleepNow() +} + +func (t *SocketWorker) Initialize() { + t.lock.Lock() + defer t.lock.Unlock() + t.out.Info("SocketWorker.Initialize") + t.client.SetMaxSize(t.pinCfg.Interface.IPMtu) + t.client.SetListener(libol.ClientListener{ + OnConnected: func(client libol.SocketClient) error { + t.record.Set(rtConnected, time.Now().Unix()) + t.eventQueue <- NewEvent(EvSocConed, "from socket") + return nil + }, + OnClose: func(client libol.SocketClient) error { + t.record.Set(rtClosed, time.Now().Unix()) + t.eventQueue <- NewEvent(EvSocClosed, "from socket") + return nil + }, + }) + t.record.Set(rtLast, time.Now().Unix()) + t.record.Set(rtReConnect, time.Now().Unix()) +} + +func (t *SocketWorker) Start() { + t.lock.Lock() + defer t.lock.Unlock() + t.out.Info("SocketWorker.Start") + _ = t.connect() + libol.Go(t.Loop) +} + +func (t *SocketWorker) sendLeave(client libol.SocketClient) error { + if client == nil { + return libol.NewErr("client is nil") + } + data := struct { + DateTime int64 `json:"datetime"` + UUID string `json:"uuid"` + Alias string `json:"alias"` + Connection string `json:"connection"` + Address string `json:"address"` + }{ + DateTime: time.Now().Unix(), + UUID: t.user.UUID, + Alias: t.user.Alias, + Address: t.client.LocalAddr(), + Connection: t.client.RemoteAddr(), + } + body, err := json.Marshal(data) + if err != nil { + return err + } + t.out.Cmd("SocketWorker.leave: left: %s", body) + m := libol.NewControlFrame(libol.LeftReq, body) + if err := client.WriteMsg(m); err != nil { + return err + } + return nil +} + +func (t *SocketWorker) leave() { + t.out.Info("SocketWorker.leave") + if err := t.sendLeave(t.client); err != nil { + t.out.Error("SocketWorker.leave: %s", err) + } +} + +func (t *SocketWorker) Stop() { + t.lock.Lock() + defer t.lock.Unlock() + t.out.Info("SocketWorker.Stop") + t.leave() + t.client.Terminal() + t.done <- true + t.client = nil + t.ticker.Stop() +} + +func (t *SocketWorker) close() { + if t.client != nil { + t.client.Close() + } +} + +func (t *SocketWorker) connect() error { + t.out.Warn("SocketWorker.connect: %s", t.client) + t.client.Close() + s := t.client.Status() + if s != libol.ClInit { + t.out.Warn("SocketWorker.connect: %s %s", t.client, s) + t.client.SetStatus(libol.ClInit) + } + t.record.Add(rtConnects, 1) + if err := t.client.Connect(); err != nil { + t.out.Error("SocketWorker.connect: %s %s", t.client, err) + return err + } + return nil +} + +func (t *SocketWorker) reconnect() { + if t.isStopped() { + return + } + t.record.Set(rtReConnect, time.Now().Unix()) + job := jobTimer{ + Time: time.Now().Unix() + t.sleepIdle(), + Call: func() error { + t.out.Debug("SocketWorker.reconnect: on jobber") + rtConn := t.record.Get(rtConnected) + rtReCon := t.record.Get(rtReConnect) + rtLast := t.record.Get(rtLast) + rtLive := t.record.Get(rtLive) + if rtConn >= rtReCon { // already connected after. + t.out.Cmd("SocketWorker.reconnect: dissed by connected") + return nil + } + t.out.Info("SocketWorker.reconnect: l: %d a: %d", rtLast, rtLive) + t.out.Info("SocketWorker.reconnect: c: %d r: %d", rtConn, rtReCon) + return t.connect() + }, + } + t.jobber = append(t.jobber, job) +} + +func (t *SocketWorker) sendLogin(client libol.SocketClient) error { + if client == nil { + return libol.NewErr("client is nil") + } + body, err := json.Marshal(t.user) + if err != nil { + return err + } + t.out.Cmd("SocketWorker.toLogin: %s", body) + m := libol.NewControlFrame(libol.LoginReq, body) + if err := client.WriteMsg(m); err != nil { + return err + } + return nil +} + +// toLogin request +func (t *SocketWorker) toLogin(client libol.SocketClient) error { + if err := t.sendLogin(client); err != nil { + t.out.Error("SocketWorker.toLogin: %s", err) + return err + } + return nil +} + +func (t *SocketWorker) sendIpAddr(client libol.SocketClient) error { + if client == nil { + return libol.NewErr("client is nil") + } + body, err := json.Marshal(t.network) + if err != nil { + return err + } + t.out.Cmd("SocketWorker.toNetwork: %s", body) + m := libol.NewControlFrame(libol.IpAddrReq, body) + if err := client.WriteMsg(m); err != nil { + return err + } + return nil +} + +func (t *SocketWorker) canReqAddr() bool { + if t.pinCfg.RequestAddr { + return true + } + // For link, need advise ipAddr with configured address. + if t.network.IfAddr != "" { + return true + } + return false +} + +// network request +func (t *SocketWorker) toNetwork(client libol.SocketClient) error { + if !t.canReqAddr() { + t.out.Info("SocketWorker.toNetwork: notNeed") + return nil + } + if err := t.sendIpAddr(client); err != nil { + t.out.Error("SocketWorker.toNetwork: %s", err) + return err + } + return nil +} + +func (t *SocketWorker) onLogin(resp []byte) error { + if t.client.Have(libol.ClAuth) { + t.out.Cmd("SocketWorker.onLogin: %s", resp) + return nil + } + if strings.HasPrefix(string(resp), "okay") { + t.client.SetStatus(libol.ClAuth) + if t.listener.OnSuccess != nil { + _ = t.listener.OnSuccess(t) + } + t.record.Set(rtSleeps, 0) + t.record.Set(rtIpAddr, 0) + t.record.Set(rtSuccess, time.Now().Unix()) + t.eventQueue <- NewEvent(EvSocSuccess, "from login") + t.out.Info("SocketWorker.onLogin: success") + } else { + t.client.SetStatus(libol.ClUnAuth) + t.out.Error("SocketWorker.onLogin: %s", resp) + } + return nil +} + +func (t *SocketWorker) onIpAddr(resp []byte) error { + if !t.pinCfg.RequestAddr { + t.out.Info("SocketWorker.onIpAddr: notAllowed") + return nil + } + n := &models.Network{} + if err := json.Unmarshal(resp, n); err != nil { + return libol.NewErr("SocketWorker.onIpAddr: invalid json data.") + } + t.network = n + if t.listener.OnIpAddr != nil { + _ = t.listener.OnIpAddr(t, n) + } + return nil +} + +func (t *SocketWorker) onLeft(resp []byte) error { + t.out.Info("SocketWorker.onLeft") + t.out.Cmd("SocketWorker.onLeft: %s", resp) + t.close() + return nil +} + +func (t *SocketWorker) onSignIn(resp []byte) error { + t.out.Info("SocketWorker.onSignIn") + t.out.Cmd("SocketWorker.onSignIn: %s", resp) + t.eventQueue <- NewEvent(EvSocSignIn, "request from server") + return nil +} + +func (t *SocketWorker) onPong(resp []byte) error { + m := &PingMsg{} + if err := json.Unmarshal(resp, m); err != nil { + return libol.NewErr("SocketWorker.onPong: invalid json data.") + } + latency := time.Now().UnixNano() - m.DateTime // ns + t.record.Set(rtLatency, latency/1e6) // ms + return nil +} + +// handle instruct from virtual switch +func (t *SocketWorker) onInstruct(frame *libol.FrameMessage) error { + if !frame.IsControl() { + return nil + } + action, resp := frame.CmdAndParams() + if libol.HasLog(libol.CMD) { + t.out.Cmd("SocketWorker.onInstruct %s %s", action, resp) + } + switch action { + case libol.LoginResp: + return t.onLogin(resp) + case libol.IpAddrResp: + t.record.Set(rtIpAddr, time.Now().Unix()) + return t.onIpAddr(resp) + case libol.PongResp: + t.record.Set(rtLive, time.Now().Unix()) + return t.onPong(resp) + case libol.SignReq: + return t.onSignIn(resp) + case libol.LeftReq: + return t.onLeft(resp) + default: + t.out.Warn("SocketWorker.onInstruct: %s %s", action, resp) + } + return nil +} + +type PingMsg struct { + DateTime int64 `json:"datetime"` + UUID string `json:"uuid"` + Alias string `json:"alias"` + Connection string `json:"connection"` + Address string `json:"address"` +} + +func (t *SocketWorker) sendPing(client libol.SocketClient) error { + if client == nil { + return libol.NewErr("client is nil") + } + data := &PingMsg{ + DateTime: time.Now().UnixNano(), + UUID: t.user.UUID, + Alias: t.user.Alias, + Address: t.client.LocalAddr(), + Connection: t.client.RemoteAddr(), + } + body, err := json.Marshal(data) + if err != nil { + return err + } + t.out.Cmd("SocketWorker.sendPing: ping= %s", body) + m := libol.NewControlFrame(libol.PingReq, body) + if err := client.WriteMsg(m); err != nil { + return err + } + return nil +} + +func (t *SocketWorker) keepAlive() { + if !t.keepalive.Should() { + return + } + t.keepalive.Update() + if t.client.Have(libol.ClAuth) { + // Whether ipAddr request was already? and try ipAddr? + rtIp := t.record.Get(rtIpAddr) + rtSuc := t.record.Get(rtSuccess) + if t.canReqAddr() && rtIp < rtSuc { + _ = t.toNetwork(t.client) + } + if err := t.sendPing(t.client); err != nil { + t.out.Error("SocketWorker.keepAlive: %s", err) + } + } else { + if err := t.sendLogin(t.client); err != nil { + t.out.Error("SocketWorker.keepAlive: %s", err) + } + } +} + +func (t *SocketWorker) checkJobber() { + // travel jobber and execute it expired. + now := time.Now().Unix() + newTimer := make([]jobTimer, 0, 32) + for _, t := range t.jobber { + if now >= t.Time { + _ = t.Call() + } else { + newTimer = append(newTimer, t) + } + } + t.jobber = newTimer + t.out.Debug("SocketWorker.checkJobber: %d", len(t.jobber)) +} + +func (t *SocketWorker) checkAlive() { + out := int64(t.pinCfg.Timeout) + now := time.Now().Unix() + if now-t.record.Get(rtLast) < out || now-t.record.Get(rtLive) < out { + return + } + if now-t.record.Get(rtReConnect) < out { // timeout and avoid send reconn frequently. + t.out.Cmd("SocketWorker.checkAlive: reconn frequently") + return + } + t.eventQueue <- NewEvent(EvSocRecon, "from alive check") +} + +func (t *SocketWorker) doTicker() error { + t.checkAlive() // period to check whether alive. + t.keepAlive() // send ping and wait pong to keep alive. + t.checkJobber() // period to check job whether timeout. + return nil +} + +func (t *SocketWorker) dispatch(ev *WorkerEvent) { + t.out.Event("SocketWorker.dispatch: %v", ev) + switch ev.Type { + case EvSocConed: + if t.client != nil { + _ = t.toLogin(t.client) + libol.Go(func() { + t.Read(t.client) + }) + } + case EvSocSuccess: + _ = t.toNetwork(t.client) + _ = t.sendPing(t.client) + case EvSocRecon: + t.out.Info("SocketWorker.dispatch: %v", ev) + t.reconnect() + case EvSocSignIn, EvSocLogin: + _ = t.toLogin(t.client) + } +} + +func (t *SocketWorker) Loop() { + for { + select { + case e := <-t.eventQueue: + t.lock.Lock() + t.dispatch(e) + t.lock.Unlock() + case d := <-t.writeQueue: + _ = t.DoWrite(d) + case <-t.done: + return + case c := <-t.ticker.C: + t.out.Log("SocketWorker.Ticker: at %s", c) + t.lock.Lock() + _ = t.doTicker() + t.lock.Unlock() + } + } +} + +func (t *SocketWorker) isStopped() bool { + return t.client == nil || t.client.Have(libol.ClTerminal) +} + +func (t *SocketWorker) Read(client libol.SocketClient) { + for { + data, err := client.ReadMsg() + if err != nil { + t.out.Error("SocketWorker.Read: %s", err) + client.Close() + break + } + if t.out.Has(libol.DEBUG) { + t.out.Debug("SocketWorker.Read: %x", data) + } + if data.Size() <= 0 { + continue + } + data.Decode() + if data.IsControl() { + t.lock.Lock() + _ = t.onInstruct(data) + t.lock.Unlock() + continue + } + t.record.Set(rtLast, time.Now().Unix()) + if t.listener.ReadAt != nil { + _ = t.listener.ReadAt(data) + } + } + if !t.isStopped() { + t.eventQueue <- NewEvent(EvSocRecon, "from read") + } +} + +func (t *SocketWorker) DoWrite(frame *libol.FrameMessage) error { + if t.out.Has(libol.DEBUG) { + t.out.Debug("SocketWorker.DoWrite: %x", frame) + } + t.checkAlive() // alive check immediately + t.lock.Lock() + if t.client == nil { + t.lock.Unlock() + return libol.NewErr("client is nil") + } + if !t.client.Have(libol.ClAuth) { + t.out.Debug("SocketWorker.DoWrite: dropping by unAuth") + t.lock.Unlock() + return nil + } + t.lock.Unlock() + if err := t.client.WriteMsg(frame); err != nil { + t.out.Debug("SocketWorker.DoWrite: %s", err) + return err + } + return nil +} + +func (t *SocketWorker) Write(frame *libol.FrameMessage) error { + t.writeQueue <- frame + return nil +} + +func (t *SocketWorker) Auth() (string, string) { + t.lock.Lock() + defer t.lock.Unlock() + return t.user.Name, t.user.Password +} + +func (t *SocketWorker) SetAuth(auth string) { + t.lock.Lock() + defer t.lock.Unlock() + values := strings.Split(auth, ":") + t.user.Name = values[0] + if len(values) > 1 { + t.user.Password = values[1] + } +} + +func (t *SocketWorker) SetUUID(v string) { + t.lock.Lock() + defer t.lock.Unlock() + t.user.UUID = v +} diff --git a/pkg/access/tap.go b/pkg/access/tap.go new file mode 100755 index 0000000..c4af391 --- /dev/null +++ b/pkg/access/tap.go @@ -0,0 +1,380 @@ +package access + +import ( + "bytes" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/network" + "net" + "strings" + "sync" + "time" +) + +type TapWorkerListener struct { + OnOpen func(w *TapWorker) error + OnClose func(w *TapWorker) + FindNext func(dest []byte) []byte + ReadAt func(frame *libol.FrameMessage) error +} + +type TunEther struct { + HwAddr []byte + IpAddr []byte +} + +type TapWorker struct { + // private + lock sync.Mutex + device network.Taper + listener TapWorkerListener + ether TunEther + neighbor Neighbors + devCfg network.TapConfig + pinCfg *config.Point + ifAddr string + writeQueue chan *libol.FrameMessage + done chan bool + out *libol.SubLogger + eventQueue chan *WorkerEvent +} + +func NewTapWorker(devCfg network.TapConfig, pinCfg *config.Point) (a *TapWorker) { + a = &TapWorker{ + devCfg: devCfg, + pinCfg: pinCfg, + done: make(chan bool, 2), + writeQueue: make(chan *libol.FrameMessage, pinCfg.Queue.TapWr), + out: libol.NewSubLogger(pinCfg.Id()), + eventQueue: make(chan *WorkerEvent, 32), + } + return +} + +func (a *TapWorker) Initialize() { + a.lock.Lock() + defer a.lock.Unlock() + + a.out.Info("TapWorker.Initialize") + a.neighbor = Neighbors{ + neighbors: make(map[uint32]*Neighbor, 1024), + done: make(chan bool), + ticker: time.NewTicker(5 * time.Second), + timeout: 3 * 60, + interval: 60, + listener: NeighborListener{ + Interval: func(dest []byte) { + a.OnArpAlive(dest) + }, + Expire: func(dest []byte) { + a.OnArpAlive(dest) + }, + }, + } + if a.IsTun() { + addr := a.pinCfg.Interface.Address + a.setEther(addr, libol.GenEthAddr(6)) + a.out.Info("TapWorker.Initialize: src %x", a.ether.HwAddr) + } + if err := a.open(); err != nil { + a.eventQueue <- NewEvent(EvTapOpenErr, err.Error()) + } +} + +func (a *TapWorker) IsTun() bool { + return a.devCfg.Type == network.TUN +} + +func (a *TapWorker) setEther(ipAddr string, hwAddr []byte) { + a.neighbor.Clear() + // format ip address. + ipAddr, err := libol.IPNetmask(ipAddr) + if err != nil { + a.out.Warn("TapWorker.setEther: %s: %s", ipAddr, err) + return + } + ifAddr := strings.SplitN(ipAddr, "/", 2)[0] + a.ether.IpAddr = net.ParseIP(ifAddr).To4() + if a.ether.IpAddr == nil { + a.ether.IpAddr = []byte{0x00, 0x00, 0x00, 0x00} + } + a.out.Info("TapWorker.setEther: srcIp % x", a.ether.IpAddr) + if hwAddr != nil { + a.ether.HwAddr = hwAddr + } + // changed address need open device again. + if a.ifAddr != "" && a.ifAddr != ipAddr { + a.out.Warn("TapWorker.setEther changed %s->%s", a.ifAddr, ipAddr) + a.eventQueue <- NewEvent(EvTapReset, "ifAddr changed") + } + a.ifAddr = ipAddr +} + +func (a *TapWorker) OnIpAddr(addr string) { + a.eventQueue <- NewEvent(EvTapIpAddr, addr) +} + +func (a *TapWorker) open() error { + a.close() + device, err := network.NewTaper(a.pinCfg.Network, a.devCfg) + if err != nil { + a.out.Error("TapWorker.open: %s", err) + return err + } + device.Up() // up device firstly + libol.Go(func() { + a.Read(device) + }) + a.out.Info("TapWorker.open: >>> %s <<<", device.Name()) + a.device = device + if a.listener.OnOpen != nil { + _ = a.listener.OnOpen(a) + } + return nil +} + +func (a *TapWorker) newEth(t uint16, dst []byte) *libol.Ether { + eth := libol.NewEther(t) + eth.Dst = dst + eth.Src = a.ether.HwAddr + return eth +} + +func (a *TapWorker) OnArpAlive(dest []byte) { + a.lock.Lock() + defer a.lock.Unlock() + a.onMiss(dest) +} + +// process if ethernet destination is missed +func (a *TapWorker) onMiss(dest []byte) { + a.out.Debug("TapWorker.onMiss: %v.", dest) + eth := a.newEth(libol.EthArp, libol.EthAll) + reply := libol.NewArp() + reply.OpCode = libol.ArpRequest + reply.SIpAddr = a.ether.IpAddr + reply.TIpAddr = dest + reply.SHwAddr = a.ether.HwAddr + reply.THwAddr = libol.EthZero + + frame := libol.NewFrameMessage(0) + frame.Append(eth.Encode()) + frame.Append(reply.Encode()) + a.out.Debug("TapWorker.onMiss: %x.", frame.Frame()[:64]) + if a.listener.ReadAt != nil { + _ = a.listener.ReadAt(frame) + } +} + +func (a *TapWorker) onFrame(frame *libol.FrameMessage, data []byte) int { + size := len(data) + if a.IsTun() { + iph, err := libol.NewIpv4FromFrame(data) + if err != nil { + a.out.Warn("TapWorker.onFrame: %s", err) + return 0 + } + dest := iph.Destination + if a.listener.FindNext != nil { + dest = a.listener.FindNext(dest) + } + neb := a.neighbor.GetByBytes(dest) + if neb == nil { + a.onMiss(dest) + a.out.Debug("TapWorker.onFrame: onMiss neighbor %v", dest) + return 0 + } + eth := a.newEth(libol.EthIp4, neb.HwAddr) + frame.Append(eth.Encode()) // insert ethernet header. + size += eth.Len + } + frame.SetSize(size) + return size +} + +func (a *TapWorker) Read(device network.Taper) { + for { + frame := libol.NewFrameMessage(0) + data := frame.Frame() + if a.IsTun() { + data = data[libol.EtherLen:] + } + if n, err := device.Read(data); err != nil { + a.out.Error("TapWorker.Read: %s", err) + break + } else { + if a.out.Has(libol.DEBUG) { + a.out.Debug("TapWorker.Read: %x", data[:n]) + } + if size := a.onFrame(frame, data[:n]); size == 0 { + continue + } + if a.listener.ReadAt != nil { + _ = a.listener.ReadAt(frame) + } + } + } + if !a.isStopped() { + a.eventQueue <- NewEvent(EvTapReadErr, "from read") + } +} + +func (a *TapWorker) dispatch(ev *WorkerEvent) { + a.out.Event("TapWorker.dispatch: %s", ev) + switch ev.Type { + case EvTapReadErr, EvTapOpenErr, EvTapReset: + if err := a.open(); err != nil { + time.Sleep(time.Second * 2) + a.eventQueue <- NewEvent(EvTapOpenErr, err.Error()) + } + case EvTapIpAddr: + a.setEther(ev.Reason, nil) + } +} + +func (a *TapWorker) Loop() { + for { + select { + case <-a.done: + return + case d := <-a.writeQueue: + _ = a.DoWrite(d) + case ev := <-a.eventQueue: + a.lock.Lock() + a.dispatch(ev) + a.lock.Unlock() + } + } +} + +func (a *TapWorker) DoWrite(frame *libol.FrameMessage) error { + data := frame.Frame() + if a.out.Has(libol.DEBUG) { + a.out.Debug("TapWorker.DoWrite: %x", data) + } + a.lock.Lock() + if a.device == nil { + a.lock.Unlock() + return libol.NewErr("device is nil") + } + if a.device.IsTun() { + // proxy arp request. + if a.toArp(data) { + a.lock.Unlock() + return nil + } + eth, err := libol.NewEtherFromFrame(data) + if err != nil { + a.out.Error("TapWorker.DoWrite: %s", err) + a.lock.Unlock() + return nil + } + if eth.IsIP4() { + data = data[14:] + } else { + a.out.Debug("TapWorker.DoWrite: 0x%04x not IPv4", eth.Type) + a.lock.Unlock() + return nil + } + } + a.lock.Unlock() + if _, err := a.device.Write(data); err != nil { + a.out.Error("TapWorker.DoWrite: %s", err) + return err + } + return nil +} + +func (a *TapWorker) Write(frame *libol.FrameMessage) error { + a.writeQueue <- frame + return nil +} + +// learn source from arp +func (a *TapWorker) toArp(data []byte) bool { + a.out.Debug("TapWorker.toArp") + eth, err := libol.NewEtherFromFrame(data) + if err != nil { + a.out.Warn("TapWorker.toArp: %s", err) + return false + } + if !eth.IsArp() { + return false + } + arp, err := libol.NewArpFromFrame(data[eth.Len:]) + if err != nil { + a.out.Error("TapWorker.toArp: %s.", err) + return false + } + if arp.IsIP4() { + if !bytes.Equal(eth.Src, arp.SHwAddr) { + a.out.Error("TapWorker.toArp: eth.dst not arp.shw %x.", arp.SIpAddr) + return true + } + switch arp.OpCode { + case libol.ArpRequest: + if bytes.Equal(arp.TIpAddr, a.ether.IpAddr) { + eth := a.newEth(libol.EthArp, arp.SHwAddr) + rep := libol.NewArp() + rep.OpCode = libol.ArpReply + rep.SIpAddr = a.ether.IpAddr + rep.TIpAddr = arp.SIpAddr + rep.SHwAddr = a.ether.HwAddr + rep.THwAddr = arp.SHwAddr + frame := libol.NewFrameMessage(0) + frame.Append(eth.Encode()) + frame.Append(rep.Encode()) + a.out.Event("TapWorker.toArp: reply %v on %x.", rep.SIpAddr, rep.SHwAddr) + if a.listener.ReadAt != nil { + _ = a.listener.ReadAt(frame) + } + } + case libol.ArpReply: + // TODO learn by request. + if bytes.Equal(arp.THwAddr, a.ether.HwAddr) { + a.neighbor.Add(&Neighbor{ + HwAddr: arp.SHwAddr, + IpAddr: arp.SIpAddr, + NewTime: time.Now().Unix(), + Uptime: time.Now().Unix(), + }) + a.out.Event("TapWorker.toArp: recv %v on %x.", arp.SIpAddr, arp.SHwAddr) + } + default: + a.out.Warn("TapWorker.toArp: not op %x.", arp.OpCode) + } + } + return true +} + +func (a *TapWorker) close() { + a.out.Info("TapWorker.close") + if a.device != nil { + if a.listener.OnClose != nil { + a.listener.OnClose(a) + } + _ = a.device.Close() + } +} + +func (a *TapWorker) Start() { + a.lock.Lock() + defer a.lock.Unlock() + a.out.Info("TapWorker.Start") + libol.Go(a.Loop) + libol.Go(a.neighbor.Start) +} + +func (a *TapWorker) isStopped() bool { + return a.device == nil +} + +func (a *TapWorker) Stop() { + a.lock.Lock() + defer a.lock.Unlock() + a.out.Info("TapWorker.Stop") + a.done <- true + a.neighbor.Stop() + a.close() + a.device = nil +} diff --git a/pkg/access/tap_test.go b/pkg/access/tap_test.go new file mode 100755 index 0000000..a072a20 --- /dev/null +++ b/pkg/access/tap_test.go @@ -0,0 +1,82 @@ +package access + +import ( + "github.com/songgao/water" + "testing" +) + +func TestTapWrite(t *testing.T) { + cfg := water.Config{DeviceType: water.TAP} + dev, err := water.New(cfg) + if err != nil { + t.Errorf("Tap.open %s", err) + return + } + + //t.Logf("Tap.write: %s\n", dev.Name()) + + frame := make([]byte, 65) + for i := 0; i < 64; i++ { + frame[i] = uint8(i) + } + //t.Logf("Tap.write: %x", frame) + n, err := dev.Write(frame) + if err != nil { + t.Errorf("Tap.write: %s", err) + } + if n != len(frame) { + t.Errorf("Tap.write: %d", n) + } +} + +func BenchmarkTapWrite64(b *testing.B) { + cfg := water.Config{DeviceType: water.TAP} + dev, err := water.New(cfg) + if err != nil { + b.Errorf("Tap.open %s", err) + return + } + + //b.Logf("Tap.write: to %s", dev.Name()) + for i := 0; i < b.N; i++ { + frame := make([]byte, 64) + for i := 0; i < len(frame); i++ { + frame[i] = uint8(i) + } + + //b.Logf("Tap.write: frame %d", len(frame)) + n, err := dev.Write(frame) + if err != nil { + b.Errorf("Tap.write: %s", err) + } + if n != len(frame) { + b.Errorf("Tap.write: %d", n) + } + } +} + +func BenchmarkTapWrite1500(b *testing.B) { + cfg := water.Config{DeviceType: water.TAP} + dev, err := water.New(cfg) + if err != nil { + b.Errorf("Tap.open %s", err) + return + } + + //b.Logf("Tap.write: to %s", dev.Name()) + for i := 0; i < b.N; i++ { + frame := make([]byte, 1500) + for i := 0; i < len(frame); i++ { + frame[i] = uint8(i) + } + + //b.Logf("Tap.write: frame %d", len(frame)) + n, err := dev.Write(frame) + if err != nil { + b.Errorf("Tap.write: %s", err) + } + if n != len(frame) { + b.Errorf("Tap.write: %d", n) + } + } +} diff --git a/pkg/access/terminal.go b/pkg/access/terminal.go new file mode 100755 index 0000000..4a13194 --- /dev/null +++ b/pkg/access/terminal.go @@ -0,0 +1,152 @@ +package access + +import ( + "fmt" + "github.com/chzyer/readline" + "github.com/luscis/openlan/pkg/libol" + "io" + "strings" +) + +type Terminal struct { + Pointer Pointer + Console *readline.Instance +} + +func NewTerminal(pointer Pointer) *Terminal { + t := &Terminal{Pointer: pointer} + completer := readline.NewPrefixCompleter( + readline.PcItem("quit"), + readline.PcItem("help"), + readline.PcItem("mode", + readline.PcItem("vi"), + readline.PcItem("emacs"), + ), + readline.PcItem("show", + readline.PcItem("config"), + readline.PcItem("network"), + readline.PcItem("record"), + readline.PcItem("statistics"), + ), + readline.PcItem("edit", + readline.PcItem("user"), + readline.PcItem("connection"), + ), + ) + + config := &readline.Config{ + Prompt: t.Prompt(), + HistoryFile: ".history", + InterruptPrompt: "^C", + EOFPrompt: "quit", + HistorySearchFold: true, + AutoComplete: completer, + } + if l, err := readline.NewEx(config); err == nil { + t.Console = l + } + return t +} + +func (t *Terminal) Prompt() string { + user := t.Pointer.User() + alias := t.Pointer.Alias() + tenant := t.Pointer.Tenant() + return fmt.Sprintf("[%s@%s %s]# ", user, alias, tenant) +} + +func (t *Terminal) CmdEdit(args string) { +} + +func (t *Terminal) CmdShow(args string) { + switch args { + case "record": + v := t.Pointer.Record() + if out, err := libol.Marshal(v, true); err == nil { + fmt.Printf("%s\n", out) + } + case "statistics": + if c := t.Pointer.Client(); c != nil { + v := c.Statistics() + if out, err := libol.Marshal(v, true); err == nil { + fmt.Printf("%s\n", out) + } + } + case "config": + cfg := t.Pointer.Config() + if str, err := libol.Marshal(cfg, true); err == nil { + fmt.Printf("%s\n", str) + } + case "network": + cfg := t.Pointer.Network() + if str, err := libol.Marshal(cfg, true); err == nil { + fmt.Printf("%s\n", str) + } + default: + v := struct { + UUID string + UpTime int64 + Device string + Status string + }{ + UUID: t.Pointer.UUID(), + UpTime: t.Pointer.UpTime(), + Device: t.Pointer.IfName(), + Status: t.Pointer.Status().String(), + } + if str, err := libol.Marshal(v, true); err == nil { + fmt.Printf("%s\n", str) + } + } +} + +func (t *Terminal) Trim(v string) string { + return strings.TrimSpace(v) +} + +func (t *Terminal) CmdBye() { +} + +func (t *Terminal) CmdMode(args string) { + switch args { + case "vi": + t.Console.SetVimMode(true) + case "emacs": + t.Console.SetVimMode(false) + } +} + +func (t *Terminal) Start() { + if t.Console == nil { + return + } + defer t.Console.Close() + for { + line, err := t.Console.Readline() + if err == readline.ErrInterrupt { + if len(line) == 0 { + break + } else { + continue + } + } else if err == io.EOF { + break + } + line = t.Trim(line) + switch { + case strings.HasPrefix(line, "mode "): + t.CmdMode(t.Trim(line[5:])) + case line == "show": + t.CmdShow("") + case line == "quit" || line == "exit": + t.CmdBye() + goto quit + case strings.HasPrefix(line, "show "): + t.CmdShow(t.Trim(line[5:])) + case strings.HasPrefix(line, "edit "): + t.CmdEdit(t.Trim(line[5:])) + } + } +quit: + fmt.Printf("Terminal.Start quit") +} diff --git a/pkg/access/worker.go b/pkg/access/worker.go new file mode 100755 index 0000000..6c023ac --- /dev/null +++ b/pkg/access/worker.go @@ -0,0 +1,397 @@ +package access + +import ( + "crypto/tls" + "fmt" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/network" + "github.com/luscis/openlan/pkg/schema" + "net" + "os" + "runtime" + "strings" + "time" +) + +type jobTimer struct { + Time int64 + Call func() error +} + +type KeepAlive struct { + Interval int64 + LastTime int64 +} + +func (k *KeepAlive) Should() bool { + return time.Now().Unix()-k.LastTime >= k.Interval +} + +func (k *KeepAlive) Update() { + k.LastTime = time.Now().Unix() +} + +var ( + EvSocConed = "conned" + EvSocRecon = "reconn" + EvSocClosed = "closed" + EvSocSuccess = "success" + EvSocSignIn = "signIn" + EvSocLogin = "login" + EvTapIpAddr = "ipAddr" + EvTapReadErr = "readErr" + EvTapReset = "reset" + EvTapOpenErr = "openErr" +) + +type WorkerEvent struct { + Type string + Reason string + Time int64 + Data interface{} +} + +func (e *WorkerEvent) String() string { + return e.Type + " " + e.Reason +} + +func NewEvent(newType, reason string) *WorkerEvent { + return &WorkerEvent{ + Type: newType, + Time: time.Now().Unix(), + Reason: reason, + } +} + +type WorkerListener struct { + AddAddr func(ipStr string) error + DelAddr func(ipStr string) error + OnTap func(w *TapWorker) error + AddRoutes func(routes []*models.Route) error + DelRoutes func(routes []*models.Route) error +} + +type PrefixRule struct { + Type int + Destination net.IPNet + NextHop net.IP +} + +func GetSocketClient(p *config.Point) libol.SocketClient { + switch p.Protocol { + case "kcp": + c := libol.NewKcpConfig() + c.Block = config.GetBlock(p.Crypt) + c.RdQus = p.Queue.SockRd + c.WrQus = p.Queue.SockWr + return libol.NewKcpClient(p.Connection, c) + case "tcp": + c := &libol.TcpConfig{ + Block: config.GetBlock(p.Crypt), + RdQus: p.Queue.SockRd, + WrQus: p.Queue.SockWr, + } + return libol.NewTcpClient(p.Connection, c) + case "udp": + c := &libol.UdpConfig{ + Block: config.GetBlock(p.Crypt), + Timeout: time.Duration(p.Timeout) * time.Second, + RdQus: p.Queue.SockRd, + WrQus: p.Queue.SockWr, + } + return libol.NewUdpClient(p.Connection, c) + case "ws": + c := &libol.WebConfig{ + Block: config.GetBlock(p.Crypt), + RdQus: p.Queue.SockRd, + WrQus: p.Queue.SockWr, + } + return libol.NewWebClient(p.Connection, c) + case "wss": + c := &libol.WebConfig{ + Block: config.GetBlock(p.Crypt), + RdQus: p.Queue.SockRd, + WrQus: p.Queue.SockWr, + } + if p.Cert != nil { + c.Cert = &libol.WebCert{ + Insecure: p.Cert.Insecure, + RootCa: p.Cert.CaFile, + } + } + return libol.NewWebClient(p.Connection, c) + default: + c := &libol.TcpConfig{ + Block: config.GetBlock(p.Crypt), + RdQus: p.Queue.SockRd, + WrQus: p.Queue.SockWr, + } + if p.Cert != nil { + c.Tls = &tls.Config{ + InsecureSkipVerify: p.Cert.Insecure, + RootCAs: p.Cert.GetCertPool(), + } + } + return libol.NewTcpClient(p.Connection, c) + } +} + +func GetTapCfg(c *config.Point) network.TapConfig { + cfg := network.TapConfig{ + Provider: c.Interface.Provider, + Name: c.Interface.Name, + Network: c.Interface.Address, + KernBuf: c.Queue.VirSnd, + VirBuf: c.Queue.VirWrt, + } + if c.Interface.Provider == "tun" { + cfg.Type = network.TUN + } else { + cfg.Type = network.TAP + } + return cfg +} + +type Worker struct { + // private + ifAddr string + listener WorkerListener + conWorker *SocketWorker + tapWorker *TapWorker + cfg *config.Point + uuid string + network *models.Network + routes []PrefixRule + out *libol.SubLogger + done chan bool + ticker *time.Ticker +} + +func NewWorker(cfg *config.Point) *Worker { + return &Worker{ + ifAddr: cfg.Interface.Address, + cfg: cfg, + routes: make([]PrefixRule, 0, 32), + out: libol.NewSubLogger(cfg.Id()), + done: make(chan bool), + ticker: time.NewTicker(2 * time.Second), + } +} + +func (w *Worker) Initialize() { + if w.cfg == nil { + return + } + pid := os.Getpid() + if fp, err := libol.OpenWrite(w.cfg.PidFile); err == nil { + _, _ = fp.WriteString(fmt.Sprintf("%d", pid)) + } + w.out.Info("Worker.Initialize") + client := GetSocketClient(w.cfg) + w.conWorker = NewSocketWorker(client, w.cfg) + + tapCfg := GetTapCfg(w.cfg) + // register listener + w.tapWorker = NewTapWorker(tapCfg, w.cfg) + + w.conWorker.SetUUID(w.UUID()) + w.conWorker.listener = SocketWorkerListener{ + OnClose: w.OnClose, + OnSuccess: w.OnSuccess, + OnIpAddr: w.OnIpAddr, + ReadAt: w.tapWorker.Write, + } + w.conWorker.Initialize() + + w.tapWorker.listener = TapWorkerListener{ + OnOpen: func(t *TapWorker) error { + if w.listener.OnTap != nil { + if err := w.listener.OnTap(t); err != nil { + return err + } + } + if w.network != nil { + n := w.network + // remove older firstly + w.FreeIpAddr() + _ = w.OnIpAddr(w.conWorker, n) + } + return nil + }, + ReadAt: w.conWorker.Write, + FindNext: w.FindNext, + } + w.tapWorker.Initialize() +} + +func (w *Worker) FlushStatus() { + file := w.cfg.StatusFile + device := w.tapWorker.device + client := w.conWorker.client + if file == "" || device == nil || client == nil { + return + } + sts := client.Statistics() + status := &schema.Point{ + RxBytes: sts[libol.CsRecvOkay], + TxBytes: sts[libol.CsSendOkay], + ErrPkt: sts[libol.CsSendError], + Uptime: client.UpTime(), + State: client.Status().String(), + Device: device.Name(), + Network: w.cfg.Network, + Protocol: w.cfg.Protocol, + User: strings.SplitN(w.cfg.Username, "@", 2)[0], + Remote: w.cfg.Connection, + AliveTime: client.AliveTime(), + UUID: w.uuid, + Alias: w.cfg.Alias, + System: runtime.GOOS, + } + if w.network != nil { + status.Address = models.NewNetworkSchema(w.network) + } + _ = libol.MarshalSave(status, file, true) +} + +func (w *Worker) Start() { + w.out.Debug("Worker.Start linux.") + w.FlushStatus() + w.tapWorker.Start() + w.conWorker.Start() + libol.Go(func() { + for { + select { + case <-w.done: + return + case <-w.ticker.C: + w.FlushStatus() + } + } + }) +} + +func (w *Worker) Stop() { + if w.tapWorker == nil || w.conWorker == nil { + return + } + w.done <- true + w.FreeIpAddr() + w.conWorker.Stop() + w.tapWorker.Stop() + w.conWorker = nil + w.tapWorker = nil +} + +func (w *Worker) UpTime() int64 { + client := w.conWorker.client + if client != nil { + return client.AliveTime() + } + return 0 +} + +func (w *Worker) FindNext(dest []byte) []byte { + for _, rt := range w.routes { + if !rt.Destination.Contains(dest) { + continue + } + if rt.Type == 0x00 { + break + } + if w.out.Has(libol.DEBUG) { + w.out.Debug("Worker.FindNext %v to %v", dest, rt.NextHop) + } + return rt.NextHop.To4() + } + return dest +} + +func (w *Worker) OnIpAddr(s *SocketWorker, n *models.Network) error { + addr := fmt.Sprintf("%s/%s", n.IfAddr, n.Netmask) + if models.NetworkEqual(w.network, n) { + w.out.Debug("Worker.OnIpAddr: %s noChanged", addr) + return nil + } + w.out.Cmd("Worker.OnIpAddr: %s", addr) + w.out.Cmd("Worker.OnIpAddr: %s", n.Routes) + prefix := libol.Netmask2Len(n.Netmask) + ipStr := fmt.Sprintf("%s/%d", n.IfAddr, prefix) + w.tapWorker.OnIpAddr(ipStr) + if w.listener.AddAddr != nil { + _ = w.listener.AddAddr(ipStr) + } + // Filter routes. + var routes []*models.Route + for _, rt := range n.Routes { + _, _, err := net.ParseCIDR(rt.Prefix) + if err != nil || rt.NextHop == n.IfAddr { + continue + } + routes = append(routes, rt) + } + if w.listener.AddRoutes != nil { + _ = w.listener.AddRoutes(routes) + } + w.network = n + // update routes + ip := net.ParseIP(w.network.IfAddr) + m := net.IPMask(net.ParseIP(w.network.Netmask).To4()) + w.routes = append(w.routes, PrefixRule{ + Type: 0x00, + Destination: net.IPNet{IP: ip.Mask(m), Mask: m}, + NextHop: libol.EthZero, + }) + for _, rt := range routes { + _, dest, _ := net.ParseCIDR(rt.Prefix) + w.routes = append(w.routes, PrefixRule{ + Type: 0x01, + Destination: *dest, + NextHop: net.ParseIP(rt.NextHop), + }) + } + return nil +} + +func (w *Worker) FreeIpAddr() { + if w.network == nil { + return + } + if w.listener.DelRoutes != nil { + _ = w.listener.DelRoutes(w.network.Routes) + } + if w.listener.DelAddr != nil { + prefix := libol.Netmask2Len(w.network.Netmask) + ipStr := fmt.Sprintf("%s/%d", w.network.IfAddr, prefix) + _ = w.listener.DelAddr(ipStr) + } + w.network = nil + w.routes = make([]PrefixRule, 0, 32) +} + +func (w *Worker) OnClose(s *SocketWorker) error { + w.out.Info("Worker.OnClose") + w.FreeIpAddr() + return nil +} + +func (w *Worker) OnSuccess(s *SocketWorker) error { + w.out.Info("Worker.OnSuccess") + if w.listener.AddAddr != nil { + _ = w.listener.AddAddr(w.ifAddr) + } + return nil +} + +func (w *Worker) UUID() string { + if w.uuid == "" { + w.uuid = libol.GenRandom(13) + } + return w.uuid +} + +func (w *Worker) SetUUID(v string) { + w.uuid = v +} diff --git a/pkg/api/config.go b/pkg/api/config.go new file mode 100755 index 0000000..73667ae --- /dev/null +++ b/pkg/api/config.go @@ -0,0 +1,35 @@ +package api + +import ( + "github.com/gorilla/mux" + "net/http" +) + +type Config struct { + Switcher Switcher +} + +func (c Config) Router(router *mux.Router) { + router.HandleFunc("/api/config", c.List).Methods("GET") + router.HandleFunc("/api/config/reload", c.Reload).Methods("PUT") + router.HandleFunc("/api/config/save", c.Save).Methods("PUT") +} + +func (c Config) List(w http.ResponseWriter, r *http.Request) { + format := GetQueryOne(r, "format") + if format == "yaml" { + ResponseYaml(w, c.Switcher.Config()) + } else { + ResponseJson(w, c.Switcher.Config()) + } +} + +func (c Config) Reload(w http.ResponseWriter, r *http.Request) { + c.Switcher.Reload() + ResponseMsg(w, 0, "success") +} + +func (c Config) Save(w http.ResponseWriter, r *http.Request) { + c.Switcher.Save() + ResponseMsg(w, 0, "success") +} diff --git a/pkg/api/device.go b/pkg/api/device.go new file mode 100755 index 0000000..0d0fcb5 --- /dev/null +++ b/pkg/api/device.go @@ -0,0 +1,91 @@ +package api + +import ( + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/network" + "github.com/luscis/openlan/pkg/schema" + "net" + "net/http" + "time" +) + +type Device struct { +} + +func (h Device) Router(router *mux.Router) { + router.HandleFunc("/api/device", h.List).Methods("GET") + router.HandleFunc("/api/device/{id}", h.Get).Methods("GET") +} + +func (h Device) List(w http.ResponseWriter, r *http.Request) { + dev := make([]schema.Device, 0, 1024) + for t := range network.Taps.List() { + if t == nil { + break + } + dev = append(dev, schema.Device{ + Name: t.Name(), + Mtu: t.Mtu(), + Provider: t.Type(), + }) + } + for t := range network.Bridges.List() { + if t == nil { + break + } + dev = append(dev, schema.Device{ + Name: t.Name(), + Mtu: t.Mtu(), + Provider: t.Type(), + }) + } + ResponseJson(w, dev) +} + +func (h Device) Get(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + name := vars["id"] + if dev := network.Taps.Get(name); dev != nil { + ResponseJson(w, schema.Device{ + Name: dev.Name(), + Mtu: dev.Mtu(), + Provider: dev.Type(), + }) + } else if br := network.Bridges.Get(name); br != nil { + now := time.Now().Unix() + macs := make([]schema.HwMacInfo, 0, 32) + for addr := range br.ListMac() { + if addr == nil { + break + } + macs = append(macs, schema.HwMacInfo{ + Address: net.HardwareAddr(addr.Address).String(), + Device: addr.Device.String(), + Uptime: now - addr.Uptime, + }) + } + slaves := make([]schema.Device, 0, 32) + for dev := range br.ListSlave() { + if dev == nil { + break + } + slaves = append(slaves, schema.Device{ + Name: dev.Name(), + Mtu: dev.Mtu(), + Provider: dev.Type(), + }) + } + ResponseJson(w, schema.Bridge{ + Device: schema.Device{ + Name: br.Name(), + Mtu: br.Mtu(), + Provider: br.Type(), + }, + Macs: macs, + Slaves: slaves, + Stats: br.Stats(), + }) + } else { + http.Error(w, vars["id"], http.StatusNotFound) + } +} diff --git a/pkg/api/esp.go b/pkg/api/esp.go new file mode 100755 index 0000000..a30a354 --- /dev/null +++ b/pkg/api/esp.go @@ -0,0 +1,74 @@ +package api + +import ( + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + "net/http" +) + +type Esp struct { + Switcher Switcher +} + +func (l Esp) Router(router *mux.Router) { + router.HandleFunc("/api/esp", l.List).Methods("GET") + router.HandleFunc("/api/esp/{id}", l.List).Methods("GET") +} + +func (l Esp) List(w http.ResponseWriter, r *http.Request) { + data := make([]schema.Esp, 0, 1024) + for e := range cache.Esp.List() { + if e == nil { + break + } + item := models.NewEspSchema(e) + data = append(data, item) + } + ResponseJson(w, data) +} + +type EspState struct { + Switcher Switcher +} + +func (l EspState) Router(router *mux.Router) { + router.HandleFunc("/api/state", l.List).Methods("GET") + router.HandleFunc("/api/state/{id}", l.List).Methods("GET") +} + +func (l EspState) List(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + name := vars["id"] + data := make([]schema.EspState, 0, 1024) + for e := range cache.EspState.List(name) { + if e == nil { + break + } + data = append(data, models.NewEspStateSchema(e)) + } + ResponseJson(w, data) +} + +type EspPolicy struct { + Switcher Switcher +} + +func (l EspPolicy) Router(router *mux.Router) { + router.HandleFunc("/api/policy", l.List).Methods("GET") + router.HandleFunc("/api/policy/{id}", l.List).Methods("GET") +} + +func (l EspPolicy) List(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + name := vars["id"] + data := make([]schema.EspPolicy, 0, 1024) + for e := range cache.EspPolicy.List(name) { + if e == nil { + break + } + data = append(data, models.NewEspPolicySchema(e)) + } + ResponseJson(w, data) +} diff --git a/pkg/api/lease.go b/pkg/api/lease.go new file mode 100755 index 0000000..1b39089 --- /dev/null +++ b/pkg/api/lease.go @@ -0,0 +1,27 @@ +package api + +import ( + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/schema" + "net/http" +) + +type Lease struct { +} + +func (l Lease) Router(router *mux.Router) { + router.HandleFunc("/api/lease", l.List).Methods("GET") + router.HandleFunc("/api/lease/{id}", l.List).Methods("GET") +} + +func (l Lease) List(w http.ResponseWriter, r *http.Request) { + nets := make([]schema.Lease, 0, 1024) + for u := range cache.Network.ListLease() { + if u == nil { + break + } + nets = append(nets, *u) + } + ResponseJson(w, nets) +} diff --git a/pkg/api/link.go b/pkg/api/link.go new file mode 100755 index 0000000..900cc39 --- /dev/null +++ b/pkg/api/link.go @@ -0,0 +1,42 @@ +package api + +import ( + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + "net/http" +) + +type Link struct { + Switcher Switcher +} + +func (h Link) Router(router *mux.Router) { + router.HandleFunc("/api/link", h.List).Methods("GET") + router.HandleFunc("/api/link/{id}", h.Get).Methods("GET") +} + +func (h Link) List(w http.ResponseWriter, r *http.Request) { + links := make([]schema.Link, 0, 1024) + for l := range cache.Link.List() { + if l == nil { + break + } + links = append(links, models.NewLinkSchema(l)) + } + ResponseJson(w, links) +} + +func (h Link) Get(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + libol.Info("GetPoint %s", vars["id"]) + + link := cache.Link.Get(vars["id"]) + if link != nil { + ResponseJson(w, models.NewLinkSchema(link)) + } else { + http.Error(w, vars["id"], http.StatusNotFound) + } +} diff --git a/pkg/api/neighbor.go b/pkg/api/neighbor.go new file mode 100755 index 0000000..406043c --- /dev/null +++ b/pkg/api/neighbor.go @@ -0,0 +1,27 @@ +package api + +import ( + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + "net/http" +) + +type Neighbor struct { +} + +func (h Neighbor) Router(router *mux.Router) { + router.HandleFunc("/api/neighbor", h.List).Methods("GET") +} + +func (h Neighbor) List(w http.ResponseWriter, r *http.Request) { + neighbors := make([]schema.Neighbor, 0, 1024) + for n := range cache.Neighbor.List() { + if n == nil { + break + } + neighbors = append(neighbors, models.NewNeighborSchema(n)) + } + ResponseJson(w, neighbors) +} diff --git a/pkg/api/network.go b/pkg/api/network.go new file mode 100755 index 0000000..e149744 --- /dev/null +++ b/pkg/api/network.go @@ -0,0 +1,51 @@ +package api + +import ( + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + "net/http" + "strings" +) + +type Network struct { +} + +func (h Network) Router(router *mux.Router) { + router.HandleFunc("/api/network", h.List).Methods("GET") + router.HandleFunc("/api/network/{id}", h.Get).Methods("GET") + router.HandleFunc("/get/network/{id}/{ie}.ovpn", h.Profile).Methods("GET") +} + +func (h Network) List(w http.ResponseWriter, r *http.Request) { + nets := make([]schema.Network, 0, 1024) + for u := range cache.Network.List() { + if u == nil { + break + } + nets = append(nets, models.NewNetworkSchema(u)) + } + ResponseJson(w, nets) +} + +func (h Network) Get(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + net := cache.Network.Get(vars["id"]) + if net != nil { + ResponseJson(w, models.NewNetworkSchema(net)) + } else { + http.Error(w, vars["id"], http.StatusNotFound) + } +} + +func (h Network) Profile(w http.ResponseWriter, r *http.Request) { + server := strings.SplitN(r.Host, ":", 2)[0] + vars := mux.Vars(r) + data, err := cache.VPNClient.GetClientProfile(vars["id"], vars["ie"], server) + if err == nil { + _, _ = w.Write([]byte(data)) + } else { + http.Error(w, err.Error(), http.StatusNotFound) + } +} diff --git a/pkg/api/online.go b/pkg/api/online.go new file mode 100755 index 0000000..dd1d49a --- /dev/null +++ b/pkg/api/online.go @@ -0,0 +1,27 @@ +package api + +import ( + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + "net/http" +) + +type OnLine struct { +} + +func (h OnLine) Router(router *mux.Router) { + router.HandleFunc("/api/online", h.List).Methods("GET") +} + +func (h OnLine) List(w http.ResponseWriter, r *http.Request) { + nets := make([]schema.OnLine, 0, 1024) + for u := range cache.Online.List() { + if u == nil { + break + } + nets = append(nets, models.NewOnLineSchema(u)) + } + ResponseJson(w, nets) +} diff --git a/pkg/api/openvpn.go b/pkg/api/openvpn.go new file mode 100755 index 0000000..cc84fbd --- /dev/null +++ b/pkg/api/openvpn.go @@ -0,0 +1,44 @@ +package api + +import ( + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/schema" + "net/http" +) + +type VPNClient struct { +} + +func (h VPNClient) Router(router *mux.Router) { + router.HandleFunc("/api/vpn/client", h.List).Methods("GET") + router.HandleFunc("/api/vpn/client/{id}", h.List).Methods("GET") +} + +func (h VPNClient) List(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + name := vars["id"] + + clients := make([]schema.VPNClient, 0, 1024) + if name == "" { + for n := range cache.Network.List() { + if n == nil { + break + } + for client := range cache.VPNClient.List(n.Name) { + if client == nil { + break + } + clients = append(clients, *client) + } + } + } else { + for client := range cache.VPNClient.List(name) { + if client == nil { + break + } + clients = append(clients, *client) + } + } + ResponseJson(w, clients) +} diff --git a/pkg/api/point.go b/pkg/api/point.go new file mode 100755 index 0000000..ee27c5c --- /dev/null +++ b/pkg/api/point.go @@ -0,0 +1,38 @@ +package api + +import ( + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + "net/http" +) + +type Point struct { +} + +func (h Point) Router(router *mux.Router) { + router.HandleFunc("/api/point", h.List).Methods("GET") + router.HandleFunc("/api/point/{id}", h.Get).Methods("GET") +} + +func (h Point) List(w http.ResponseWriter, r *http.Request) { + points := make([]schema.Point, 0, 1024) + for u := range cache.Point.List() { + if u == nil { + break + } + points = append(points, models.NewPointSchema(u)) + } + ResponseJson(w, points) +} + +func (h Point) Get(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + point := cache.Point.Get(vars["id"]) + if point != nil { + ResponseJson(w, models.NewPointSchema(point)) + } else { + http.Error(w, vars["id"], http.StatusNotFound) + } +} diff --git a/pkg/api/pprof.go b/pkg/api/pprof.go new file mode 100755 index 0000000..e6cef3d --- /dev/null +++ b/pkg/api/pprof.go @@ -0,0 +1,48 @@ +package api + +import ( + "encoding/json" + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/schema" + "io/ioutil" + "net/http" +) + +var pprof = &libol.PProf{} + +type PProf struct { +} + +func (h PProf) Router(router *mux.Router) { + router.HandleFunc("/api/pprof", h.Get).Methods("GET") + router.HandleFunc("/api/pprof", h.Add).Methods("POST") +} + +func (h PProf) Get(w http.ResponseWriter, r *http.Request) { + pp := schema.PProf{Listen: pprof.Listen} + ResponseJson(w, pp) +} + +func (h PProf) Add(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if pprof.Listen != "" && pprof.Error == nil { + http.Error(w, "already listen on "+pprof.Listen, http.StatusConflict) + return + } + + pp := &schema.PProf{} + if err := json.Unmarshal(body, pp); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + pprof.Listen = pp.Listen + pprof.Start() + ResponseMsg(w, 0, "") +} diff --git a/pkg/api/server.go b/pkg/api/server.go new file mode 100755 index 0000000..1a6f3a0 --- /dev/null +++ b/pkg/api/server.go @@ -0,0 +1,47 @@ +package api + +import ( + "github.com/gorilla/mux" + "net/http" +) + +type Server struct { + Switcher Switcher +} + +func (l Server) Router(router *mux.Router) { + router.HandleFunc("/api/server", l.List).Methods("GET") + router.HandleFunc("/api/server/{id}", l.List).Methods("GET") +} + +func (l Server) List(w http.ResponseWriter, r *http.Request) { + server := l.Switcher.Server() + data := &struct { + UpTime int64 `json:"uptime"` + Total int `json:"total"` + Statistic map[string]int64 `json:"statistic"` + Connection []interface{} `json:"connection"` + }{ + UpTime: l.Switcher.UpTime(), + Statistic: server.Statistics(), + Connection: make([]interface{}, 0, 1024), + Total: server.TotalClient(), + } + for u := range server.ListClient() { + if u == nil { + break + } + data.Connection = append(data.Connection, &struct { + UpTime int64 `json:"uptime"` + LocalAddr string `json:"localAddr"` + RemoteAddr string `json:"remoteAddr"` + Statistic map[string]int64 `json:"statistic"` + }{ + UpTime: u.UpTime(), + LocalAddr: u.LocalAddr(), + RemoteAddr: u.RemoteAddr(), + Statistic: u.Statistics(), + }) + } + ResponseJson(w, data) +} diff --git a/pkg/api/switcher.go b/pkg/api/switcher.go new file mode 100755 index 0000000..f4be940 --- /dev/null +++ b/pkg/api/switcher.go @@ -0,0 +1,32 @@ +package api + +import ( + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/network" + "github.com/luscis/openlan/pkg/schema" +) + +type Switcher interface { + UUID() string + UpTime() int64 + Alias() string + Config() *config.Switch + Server() libol.SocketServer + Firewall() *network.FireWall + Reload() + Save() +} + +func NewWorkerSchema(s Switcher) schema.Worker { + protocol := "" + if cfg := s.Config(); cfg != nil { + protocol = cfg.Protocol + } + return schema.Worker{ + UUID: s.UUID(), + Uptime: s.UpTime(), + Alias: s.Alias(), + Protocol: protocol, + } +} diff --git a/pkg/api/url.go b/pkg/api/url.go new file mode 100755 index 0000000..9596d08 --- /dev/null +++ b/pkg/api/url.go @@ -0,0 +1,23 @@ +package api + +import "github.com/gorilla/mux" + +func Add(router *mux.Router, switcher Switcher) { + Link{Switcher: switcher}.Router(router) + User{}.Router(router) + Neighbor{}.Router(router) + Point{}.Router(router) + Network{}.Router(router) + OnLine{}.Router(router) + Lease{}.Router(router) + Server{Switcher: switcher}.Router(router) + Device{}.Router(router) + VPNClient{}.Router(router) + PProf{}.Router(router) + VxLAN{}.Router(router) + Esp{}.Router(router) + EspState{}.Router(router) + EspPolicy{}.Router(router) + Config{Switcher: switcher}.Router(router) + Version{}.Router(router) +} diff --git a/pkg/api/user.go b/pkg/api/user.go new file mode 100755 index 0000000..94325fa --- /dev/null +++ b/pkg/api/user.go @@ -0,0 +1,102 @@ +package api + +import ( + "encoding/json" + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + "io/ioutil" + "net/http" + "sort" +) + +type User struct { +} + +func (h User) Router(router *mux.Router) { + router.HandleFunc("/api/user", h.List).Methods("GET") + router.HandleFunc("/api/user", h.Add).Methods("POST") + router.HandleFunc("/api/user/{id}", h.Get).Methods("GET") + router.HandleFunc("/api/user/{id}", h.Add).Methods("POST") + router.HandleFunc("/api/user/{id}", h.Del).Methods("DELETE") + router.HandleFunc("/api/user/{id}/check", h.Check).Methods("POST") +} + +func (h User) List(w http.ResponseWriter, r *http.Request) { + users := make([]schema.User, 0, 1024) + for u := range cache.User.List() { + if u == nil { + break + } + users = append(users, models.NewUserSchema(u)) + } + sort.SliceStable(users, func(i, j int) bool { + return users[i].Network+users[i].Name > users[j].Network+users[j].Name + }) + ResponseJson(w, users) +} + +func (h User) Get(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + user := cache.User.Get(vars["id"]) + if user != nil { + ResponseJson(w, models.NewUserSchema(user)) + } else { + http.Error(w, vars["id"], http.StatusNotFound) + } +} + +func (h User) Add(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + user := &schema.User{} + if err := json.Unmarshal(body, user); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + cache.User.Add(models.SchemaToUserModel(user)) + if err := cache.User.Save(); err != nil { + libol.Warn("AddUser %s", err) + } + ResponseMsg(w, 0, "") +} + +func (h User) Del(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + libol.Info("DelUser %s", vars["id"]) + + cache.User.Del(vars["id"]) + if err := cache.User.Save(); err != nil { + libol.Warn("DelUser %s", err) + } + ResponseMsg(w, 0, "") +} + +func (h User) Check(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + user := &schema.User{} + if err := json.Unmarshal(body, user); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + model := models.SchemaToUserModel(user) + if _, err := cache.User.Check(model); err == nil { + ResponseMsg(w, 0, "success") + } else { + http.Error(w, err.Error(), http.StatusUnauthorized) + return + } +} diff --git a/pkg/api/utils.go b/pkg/api/utils.go new file mode 100755 index 0000000..1129cfc --- /dev/null +++ b/pkg/api/utils.go @@ -0,0 +1,59 @@ +package api + +import ( + "encoding/json" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/schema" + "gopkg.in/yaml.v2" + "io/ioutil" + "net/http" +) + +func ResponseJson(w http.ResponseWriter, v interface{}) { + str, err := json.Marshal(v) + if err == nil { + libol.Debug("ResponseJson: %s", str) + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(str) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func ResponseMsg(w http.ResponseWriter, code int, message string) { + ret := &schema.Message{ + Code: code, + Message: message, + } + ResponseJson(w, ret) +} + +func ResponseYaml(w http.ResponseWriter, v interface{}) { + str, err := yaml.Marshal(v) + if err == nil { + w.Header().Set("Content-Type", "application/yaml") + _, _ = w.Write(str) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func GetData(r *http.Request, v interface{}) error { + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return err + } + if err := json.Unmarshal(body, v); err != nil { + return err + } + return nil +} + +func GetQueryOne(req *http.Request, name string) string { + query := req.URL.Query() + if values, ok := query[name]; ok { + return values[0] + } + return "" +} diff --git a/pkg/api/version.go b/pkg/api/version.go new file mode 100755 index 0000000..e025156 --- /dev/null +++ b/pkg/api/version.go @@ -0,0 +1,19 @@ +package api + +import ( + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/schema" + "net/http" +) + +type Version struct { +} + +func (l Version) Router(router *mux.Router) { + router.HandleFunc("/api/version", l.List).Methods("GET") +} + +func (l Version) List(w http.ResponseWriter, r *http.Request) { + ver := schema.NewVersionSchema() + ResponseJson(w, ver) +} diff --git a/pkg/api/vxlan.go b/pkg/api/vxlan.go new file mode 100755 index 0000000..e7e0db6 --- /dev/null +++ b/pkg/api/vxlan.go @@ -0,0 +1,19 @@ +package api + +import ( + "github.com/gorilla/mux" + "net/http" +) + +type VxLAN struct { + Switcher Switcher +} + +func (l VxLAN) Router(router *mux.Router) { + router.HandleFunc("/api/vxlan", l.List).Methods("GET") + router.HandleFunc("/api/vxlan/{id}", l.List).Methods("GET") +} + +func (l VxLAN) List(w http.ResponseWriter, r *http.Request) { + ResponseJson(w, nil) +} diff --git a/pkg/app/access.go b/pkg/app/access.go new file mode 100755 index 0000000..ab68f7d --- /dev/null +++ b/pkg/app/access.go @@ -0,0 +1,120 @@ +package app + +import ( + "encoding/json" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" +) + +type Access struct { + success int + failed int + master Master +} + +func NewAccess(m Master) *Access { + return &Access{ + master: m, + } +} + +func (p *Access) OnFrame(client libol.SocketClient, frame *libol.FrameMessage) error { + out := client.Out() + if out.Has(libol.LOG) { + out.Log("Access.OnFrame %s.", frame) + } + if frame.IsControl() { + action, params := frame.CmdAndParams() + out.Debug("Access.OnFrame: %s", action) + switch action { + case libol.LoginReq: + if err := p.handleLogin(client, params); err != nil { + out.Error("Access.OnFrame: %s", err) + m := libol.NewControlFrame(libol.LoginResp, []byte(err.Error())) + _ = client.WriteMsg(m) + //client.Close() + return err + } + m := libol.NewControlFrame(libol.LoginResp, []byte("okay")) + _ = client.WriteMsg(m) + } + //If instruct is not login and already auth, continue to process. + if client.Have(libol.ClAuth) { + return nil + } + } + //Dropped all frames if not auth. + if !client.Have(libol.ClAuth) { + out.Debug("Access.OnFrame: unAuth") + return libol.NewErr("unAuth client.") + } + return nil +} + +func (p *Access) handleLogin(client libol.SocketClient, data []byte) error { + out := client.Out() + out.Debug("Access.handleLogin: %s", data) + if client.Have(libol.ClAuth) { + out.Warn("Access.handleLogin: already auth") + return nil + } + user := &models.User{} + if err := json.Unmarshal(data, user); err != nil { + return libol.NewErr("Invalid json data.") + } + user.Update() + out.Info("Access.handleLogin: %s on %s", user.Id(), user.Alias) + if now, _ := cache.User.Check(user); now != nil { + if now.Role != "admin" && now.Last != nil { + // To offline lastly client if guest. + p.master.OffClient(now.Last) + } + p.success++ + now.Last = client + client.SetStatus(libol.ClAuth) + out.Info("Access.handleLogin: success") + _ = p.onAuth(client, user) + return nil + } + p.failed++ + client.SetStatus(libol.ClUnAuth) + return libol.NewErr("Auth failed.") +} + +func (p *Access) onAuth(client libol.SocketClient, user *models.User) error { + out := client.Out() + if !client.Have(libol.ClAuth) { + return libol.NewErr("not auth.") + } + out.Info("Access.onAuth") + dev, err := p.master.NewTap(user.Network) + if err != nil { + return err + } + out.Info("Access.onAuth: on >>> %s <<<", dev.Name()) + proto := p.master.Protocol() + m := models.NewPoint(client, dev, proto) + m.SetUser(user) + // free point has same uuid. + if om := cache.Point.GetByUUID(m.UUID); om != nil { + out.Info("Access.onAuth: OffClient %s", om.Client) + p.master.OffClient(om.Client) + } + client.SetPrivate(m) + cache.Point.Add(m) + libol.Go(func() { + p.master.ReadTap(dev, func(f *libol.FrameMessage) error { + if err := client.WriteMsg(f); err != nil { + p.master.OffClient(client) + return err + } + return nil + }) + }) + return nil +} + +func (p *Access) Stats() (success, failed int) { + return p.success, p.failed +} diff --git a/pkg/app/master.go b/pkg/app/master.go new file mode 100755 index 0000000..63962c5 --- /dev/null +++ b/pkg/app/master.go @@ -0,0 +1,14 @@ +package app + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/network" +) + +type Master interface { + UUID() string + Protocol() string + OffClient(client libol.SocketClient) + ReadTap(device network.Taper, readAt func(f *libol.FrameMessage) error) + NewTap(tenant string) (network.Taper, error) +} diff --git a/pkg/app/neighbors.go b/pkg/app/neighbors.go new file mode 100755 index 0000000..a929cd4 --- /dev/null +++ b/pkg/app/neighbors.go @@ -0,0 +1,65 @@ +package app + +import ( + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "net" + "time" +) + +type Neighbors struct { + master Master +} + +func NewNeighbors(m Master) *Neighbors { + return &Neighbors{ + master: m, + } +} + +func (e *Neighbors) OnFrame(client libol.SocketClient, frame *libol.FrameMessage) error { + if frame.IsControl() { + return nil + } + if libol.HasLog(libol.LOG) { + libol.Log("Neighbors.OnFrame %s.", frame) + } + proto, err := frame.Proto() + if err != nil { + libol.Warn("Neighbors.OnFrame %s", err) + return err + } + if eth := proto.Eth; !eth.IsArp() { + return nil + } + arp := proto.Arp + if arp.IsIP4() && (arp.IsReply() || arp.IsRequest()) { + n := models.NewNeighbor(arp.SHwAddr, arp.SIpAddr, client) + e.AddNeighbor(n, client) + } + return nil +} + +func (e *Neighbors) AddNeighbor(new *models.Neighbor, client libol.SocketClient) { + if n := cache.Neighbor.Get(new.IpAddr.String()); n != nil { + libol.Log("Neighbors.AddNeighbor: update %s.", new) + n.Update(client) + n.HitTime = time.Now().Unix() + } else { + libol.Log("Neighbors.AddNeighbor: new %s.", new) + cache.Neighbor.Add(new) + } +} + +func (e *Neighbors) DelNeighbor(ipAddr net.IP) { + libol.Info("Neighbors.DelNeighbor %s.", ipAddr) + if n := cache.Neighbor.Get(ipAddr.String()); n != nil { + cache.Neighbor.Del(ipAddr.String()) + } +} + +func (e *Neighbors) OnClientClose(client libol.SocketClient) { + //TODO + libol.Info("Neighbors.OnClientClose %s.", client) +} diff --git a/pkg/app/online.go b/pkg/app/online.go new file mode 100755 index 0000000..9e5c2ff --- /dev/null +++ b/pkg/app/online.go @@ -0,0 +1,95 @@ +package app + +import ( + "container/list" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "sync" + "time" +) + +type Online struct { + lock sync.RWMutex + maxSize int + lineMap map[string]*models.Line + lineList *list.List + master Master +} + +func NewOnline(m Master) *Online { + c := config.Manager.Switch + ms := c.Perf.OnLine + return &Online{ + maxSize: ms, + lineMap: make(map[string]*models.Line, ms), + lineList: list.New(), + master: m, + } +} + +func (o *Online) OnFrame(client libol.SocketClient, frame *libol.FrameMessage) error { + if frame.IsControl() { + return nil + } + if libol.HasLog(libol.LOG) { + libol.Log("Online.OnFrame %s.", frame) + } + proto, err := frame.Proto() + if err != nil { + libol.Warn("Online.OnFrame %s", err) + return err + } + if proto.Ip4 != nil { + ip := proto.Ip4 + line := models.NewLine(libol.EthIp4) + line.IpSource = ip.Source + line.IpDest = ip.Destination + line.IpProtocol = ip.Protocol + if proto.Tcp != nil { + tcp := proto.Tcp + line.PortDest = tcp.Destination + line.PortSource = tcp.Source + } else if proto.Udp != nil { + udp := proto.Udp + line.PortDest = udp.Destination + line.PortSource = udp.Source + } + o.AddLine(line) + } + return nil +} + +func (o *Online) popLine() { + if o.lineList.Len() < o.maxSize { + return + } + e := o.lineList.Front() + if e == nil { + return + } + if lastLine, ok := e.Value.(*models.Line); ok { + o.lineList.Remove(e) + cache.Online.Del(lastLine.String()) + delete(o.lineMap, lastLine.String()) + } +} + +func (o *Online) AddLine(line *models.Line) { + o.lock.Lock() + defer o.lock.Unlock() + if libol.HasLog(libol.LOG) { + libol.Log("Online.AddLine %s and len %d", line, o.lineList.Len()) + } + key := line.String() + if find, ok := o.lineMap[key]; !ok { + o.popLine() + o.lineList.PushBack(line) + o.lineMap[key] = line + cache.Online.Add(line) + } else if find != nil { + find.HitTime = time.Now().Unix() + cache.Online.Update(find) + } +} diff --git a/pkg/app/request.go b/pkg/app/request.go new file mode 100755 index 0000000..a5369e4 --- /dev/null +++ b/pkg/app/request.go @@ -0,0 +1,159 @@ +package app + +import ( + "encoding/json" + "github.com/luscis/openlan/pkg/cache" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + "strings" +) + +type Request struct { + master Master +} + +func NewRequest(m Master) *Request { + return &Request{ + master: m, + } +} + +func (r *Request) OnFrame(client libol.SocketClient, frame *libol.FrameMessage) error { + out := client.Out() + if frame.IsEthernet() { + return nil + } + if out.Has(libol.DEBUG) { + out.Log("Request.OnFrame %s.", frame) + } + action, body := frame.CmdAndParams() + if out.Has(libol.CMD) { + out.Cmd("Request.OnFrame: %s %s", action, body) + } + switch action { + case libol.NeighborReq: + r.onNeighbor(client, body) + case libol.IpAddrReq: + r.onIpAddr(client, body) + case libol.LeftReq: + r.onLeave(client, body) + case libol.LoginReq: + out.Debug("Request.OnFrame %s: %s", action, body) + default: + r.onDefault(client, body) + } + return nil +} + +func (r *Request) onDefault(client libol.SocketClient, data []byte) { + m := libol.NewControlFrame(libol.PongResp, data) + _ = client.WriteMsg(m) +} + +func (r *Request) onNeighbor(client libol.SocketClient, data []byte) { + resp := make([]schema.Neighbor, 0, 32) + for obj := range cache.Neighbor.List() { + if obj == nil { + break + } + resp = append(resp, models.NewNeighborSchema(obj)) + } + if respStr, err := json.Marshal(resp); err == nil { + m := libol.NewControlFrame(libol.NeighborResp, respStr) + _ = client.WriteMsg(m) + } +} + +func (r *Request) getLease(ifAddr string, p *models.Point, n *models.Network) *schema.Lease { + if n == nil { + return nil + } + uuid := p.UUID + alias := p.Alias + network := n.Name + lease := cache.Network.GetLeaseByAlias(alias) // try by alias firstly + if ifAddr == "" { + if lease == nil { // now to alloc it. + lease = cache.Network.NewLease(uuid, network) + if lease != nil { + lease.Alias = alias + } + } else { + lease.UUID = uuid + } + } else { + ipAddr := strings.SplitN(ifAddr, "/", 2)[0] + if lease != nil && lease.Address == ipAddr { + lease.UUID = uuid + } + if lease == nil || lease.Address != ipAddr { + lease = cache.Network.AddLease(uuid, ipAddr) + lease.Alias = alias + } + } + if lease != nil { + lease.Network = network + lease.Client = p.Client.String() + } + return lease +} +func (r *Request) onIpAddr(client libol.SocketClient, data []byte) { + out := client.Out() + out.Info("Request.onIpAddr: %s", data) + recv := models.NewNetwork("", "") + if err := json.Unmarshal(data, recv); err != nil { + out.Error("Request.onIpAddr: invalid json data.") + return + } + if recv.Name == "" { + recv.Name = recv.Tenant + } + if recv.Name == "" { + recv.Name = "default" + } + n := cache.Network.Get(recv.Name) + if n == nil { + out.Error("Request.onIpAddr: invalid network %s.", recv.Name) + return + } + out.Cmd("Request.onIpAddr: find %s", n) + p := cache.Point.Get(client.String()) + if p == nil { + out.Error("Request.onIpAddr: point notFound") + return + } + resp := &models.Network{ + Name: n.Name, + IfAddr: recv.IfAddr, + Netmask: recv.Netmask, + Routes: n.Routes, + } + if recv.IfAddr == "" { // not interface address, and try to alloc it. + lease := r.getLease(recv.IfAddr, p, n) + if lease != nil { + resp.IfAddr = lease.Address + resp.Netmask = n.Netmask + resp.Routes = n.Routes + } else { + resp.IfAddr = "169.254.0.0" + resp.Netmask = n.Netmask + if resp.Netmask == "" { + resp.Netmask = "255.255.0.0" + } + resp.Routes = n.Routes + } + } + out.Cmd("Request.onIpAddr: resp %s", resp) + if respStr, err := json.Marshal(resp); err == nil { + m := libol.NewControlFrame(libol.IpAddrResp, respStr) + _ = client.WriteMsg(m) + } + out.Info("Request.onIpAddr: %s", resp.IfAddr) +} + +func (r *Request) onLeave(client libol.SocketClient, data []byte) { + out := client.Out() + out.Info("Request.onLeave") + r.master.OffClient(client) +} diff --git a/pkg/cache/esp.go b/pkg/cache/esp.go new file mode 100755 index 0000000..c725537 --- /dev/null +++ b/pkg/cache/esp.go @@ -0,0 +1,141 @@ +package cache + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" +) + +type esp struct { + Esp *libol.SafeStrMap +} + +func (p *esp) Init(size int) { + p.Esp = libol.NewSafeStrMap(size) +} + +func (p *esp) Add(esp *models.Esp) { + _ = p.Esp.Set(esp.ID(), esp) +} + +func (p *esp) Get(key string) *models.Esp { + ret := p.Esp.Get(key) + if ret != nil { + return ret.(*models.Esp) + } + return nil +} + +func (p *esp) Del(key string) { + p.Esp.Del(key) +} + +func (p *esp) List() <-chan *models.Esp { + c := make(chan *models.Esp, 128) + go func() { + p.Esp.Iter(func(k string, v interface{}) { + m := v.(*models.Esp) + m.Update() + c <- m + }) + c <- nil //Finish channel by nil. + }() + return c +} + +var Esp = esp{ + Esp: libol.NewSafeStrMap(1024), +} + +type espState struct { + State *libol.SafeStrMap +} + +func (p *espState) Init(size int) { + p.State = libol.NewSafeStrMap(size) +} + +func (p *espState) Add(esp *models.EspState) { + _ = p.State.Set(esp.ID(), esp) +} + +func (p *espState) Get(key string) *models.EspState { + ret := p.State.Get(key) + if ret != nil { + return ret.(*models.EspState) + } + return nil +} + +func (p *espState) Del(key string) { + p.State.Del(key) +} + +func (p *espState) List(name string) <-chan *models.EspState { + c := make(chan *models.EspState, 128) + go func() { + p.State.Iter(func(k string, v interface{}) { + m := v.(*models.EspState) + if m.Name == name || name == "" { + m.Update() + c <- m + } + }) + c <- nil //Finish channel by nil. + }() + return c +} + +func (p *espState) Clear() { + p.State.Clear() +} + +var EspState = espState{ + State: libol.NewSafeStrMap(1024), +} + +type espPolicy struct { + Policy *libol.SafeStrMap +} + +func (p *espPolicy) Init(size int) { + p.Policy = libol.NewSafeStrMap(size) +} + +func (p *espPolicy) Add(esp *models.EspPolicy) { + _ = p.Policy.Set(esp.ID(), esp) +} + +func (p *espPolicy) Get(key string) *models.EspPolicy { + ret := p.Policy.Get(key) + if ret != nil { + return ret.(*models.EspPolicy) + } + return nil +} + +func (p *espPolicy) Del(key string) { + p.Policy.Del(key) +} + +func (p *espPolicy) List(name string) <-chan *models.EspPolicy { + c := make(chan *models.EspPolicy, 128) + go func() { + p.Policy.Iter(func(k string, v interface{}) { + m := v.(*models.EspPolicy) + if m.Name == name || name == "" { + m.Update() + c <- m + } + }) + c <- nil //Finish channel by nil. + }() + return c +} + +func (p *espPolicy) Clear() { + p.Policy.Clear() +} + +var EspPolicy = espPolicy{ + Policy: libol.NewSafeStrMap(1024), +} diff --git a/pkg/cache/link.go b/pkg/cache/link.go new file mode 100755 index 0000000..ecce2d4 --- /dev/null +++ b/pkg/cache/link.go @@ -0,0 +1,46 @@ +package cache + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" +) + +type link struct { + Links *libol.SafeStrMap +} + +func (p *link) Init(size int) { + p.Links = libol.NewSafeStrMap(size) +} + +func (p *link) Add(uuid string, link *models.Link) { + _ = p.Links.Set(uuid, link) +} + +func (p *link) Get(key string) *models.Link { + ret := p.Links.Get(key) + if ret != nil { + return ret.(*models.Link) + } + return nil +} + +func (p *link) Del(key string) { + p.Links.Del(key) +} + +func (p *link) List() <-chan *models.Link { + c := make(chan *models.Link, 128) + go func() { + p.Links.Iter(func(k string, v interface{}) { + m := v.(*models.Link) + c <- m + }) + c <- nil //Finish channel by nil. + }() + return c +} + +var Link = link{ + Links: libol.NewSafeStrMap(1024), +} diff --git a/pkg/cache/neighbor.go b/pkg/cache/neighbor.go new file mode 100755 index 0000000..6425a27 --- /dev/null +++ b/pkg/cache/neighbor.go @@ -0,0 +1,58 @@ +package cache + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" +) + +type neighbor struct { + Neighbors *libol.SafeStrMap +} + +func (p *neighbor) Init(size int) { + p.Neighbors = libol.NewSafeStrMap(size) +} + +func (p *neighbor) Add(m *models.Neighbor) { + if v := p.Neighbors.Get(m.IpAddr.String()); v != nil { + p.Neighbors.Del(m.IpAddr.String()) + } + _ = p.Neighbors.Set(m.IpAddr.String(), m) +} + +func (p *neighbor) Update(m *models.Neighbor) *models.Neighbor { + if v := p.Neighbors.Get(m.IpAddr.String()); v != nil { + n := v.(*models.Neighbor) + n.HwAddr = m.HwAddr + n.HitTime = m.HitTime + } + return nil +} + +func (p *neighbor) Get(key string) *models.Neighbor { + if v := p.Neighbors.Get(key); v != nil { + return v.(*models.Neighbor) + } + return nil +} + +func (p *neighbor) Del(key string) { + p.Neighbors.Del(key) +} + +func (p *neighbor) List() <-chan *models.Neighbor { + c := make(chan *models.Neighbor, 128) + + go func() { + p.Neighbors.Iter(func(k string, v interface{}) { + c <- v.(*models.Neighbor) + }) + c <- nil //Finish channel by nil. + }() + + return c +} + +var Neighbor = neighbor{ + Neighbors: libol.NewSafeStrMap(1024), +} diff --git a/pkg/cache/network.go b/pkg/cache/network.go new file mode 100755 index 0000000..5a74560 --- /dev/null +++ b/pkg/cache/network.go @@ -0,0 +1,152 @@ +package cache + +import ( + "encoding/binary" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + "net" +) + +type network struct { + Networks *libol.SafeStrMap + UUID *libol.SafeStrMap // TODO with network + Addr *libol.SafeStrMap // TODO with network +} + +func (w *network) Add(n *models.Network) { + libol.Debug("network.Add %v", *n) + _ = w.Networks.Set(n.Name, n) +} + +func (w *network) Del(name string) { + libol.Debug("network.Del %s", name) + w.Networks.Del(name) +} + +func (w *network) Get(name string) *models.Network { + if v := w.Networks.Get(name); v != nil { + return v.(*models.Network) + } + return nil +} + +//TODO add/del route + +func (w *network) List() <-chan *models.Network { + c := make(chan *models.Network, 128) + + go func() { + w.Networks.Iter(func(k string, v interface{}) { + c <- v.(*models.Network) + }) + c <- nil //Finish channel by nil. + }() + return c +} + +func (w *network) ListLease() <-chan *schema.Lease { + c := make(chan *schema.Lease, 128) + + go func() { + w.UUID.Iter(func(k string, v interface{}) { + c <- v.(*schema.Lease) + }) + c <- nil //Finish channel by nil. + }() + return c +} + +func (w *network) allocLease(sAddr, eAddr string) string { + sIp := net.ParseIP(sAddr) + eIp := net.ParseIP(eAddr) + if sIp == nil || eIp == nil { + return "" + } + start := binary.BigEndian.Uint32(sIp.To4()[:4]) + end := binary.BigEndian.Uint32(eIp.To4()[:4]) + for i := start; i <= end; i++ { + tmp := make([]byte, 4) + binary.BigEndian.PutUint32(tmp[:4], i) + tmpStr := net.IP(tmp).String() + if _, ok := w.Addr.GetEx(tmpStr); !ok { + return tmpStr + } + } + return "" +} + +func (w *network) NewLease(uuid, network string) *schema.Lease { + n := w.Get(network) + if n == nil || uuid == "" { + return nil + } + if obj, ok := w.UUID.GetEx(uuid); ok { + l := obj.(*schema.Lease) + return l // how to resolve conflict with new point?. + } + ipStr := w.allocLease(n.IpStart, n.IpEnd) + if ipStr == "" { + return nil + } + w.AddLease(uuid, ipStr) + return w.GetLease(uuid) +} + +func (w *network) GetLease(uuid string) *schema.Lease { + if obj, ok := w.UUID.GetEx(uuid); ok { + return obj.(*schema.Lease) + } + return nil +} + +func (w *network) GetLeaseByAlias(name string) *schema.Lease { + if obj, ok := w.UUID.GetEx(name); ok { + return obj.(*schema.Lease) + } + return nil +} + +func (w *network) AddLease(uuid, ipStr string) *schema.Lease { + libol.Info("network.AddLease %s %s", uuid, ipStr) + if ipStr != "" { + l := &schema.Lease{ + UUID: uuid, + Alias: uuid, + Address: ipStr, + } + _ = w.UUID.Set(uuid, l) + _ = w.Addr.Set(ipStr, l) + return l + } + return nil +} + +func (w *network) DelLease(uuid string) { + libol.Debug("network.DelLease %s", uuid) + // TODO record free address for alias and wait timeout to release. + addr := "" + if obj, ok := w.UUID.GetEx(uuid); ok { + lease := obj.(*schema.Lease) + addr = lease.Address + libol.Info("network.DelLease (%s, %s) by UUID", uuid, addr) + if lease.Type != "static" { + w.UUID.Del(uuid) + } + } + if obj, ok := w.Addr.GetEx(addr); ok { + lease := obj.(*schema.Lease) + if lease.UUID == uuid { // avoid address conflict by different points. + libol.Info("network.DelLease (%s, %s) by Addr", uuid, addr) + if lease.Type != "static" { + w.Addr.Del(addr) + } + } + } +} + +var Network = network{ + Networks: libol.NewSafeStrMap(1024), + UUID: libol.NewSafeStrMap(1024), + Addr: libol.NewSafeStrMap(1024), +} diff --git a/pkg/cache/online.go b/pkg/cache/online.go new file mode 100755 index 0000000..0268fab --- /dev/null +++ b/pkg/cache/online.go @@ -0,0 +1,55 @@ +package cache + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" +) + +type online struct { + Lines *libol.SafeStrMap +} + +func (p *online) Init(size int) { + p.Lines = libol.NewSafeStrMap(size) +} + +func (p *online) Add(m *models.Line) { + _ = p.Lines.Set(m.String(), m) +} + +func (p *online) Update(m *models.Line) *models.Line { + if v := p.Lines.Get(m.String()); v != nil { + l := v.(*models.Line) + l.HitTime = m.HitTime + } + return nil +} + +func (p *online) Get(key string) *models.Line { + if v := p.Lines.Get(key); v != nil { + return v.(*models.Line) + } + return nil +} + +func (p *online) Del(key string) { + p.Lines.Del(key) +} + +func (p *online) List() <-chan *models.Line { + c := make(chan *models.Line, 128) + + go func() { + + p.Lines.Iter(func(k string, v interface{}) { + c <- v.(*models.Line) + }) + c <- nil //Finish channel by nil. + }() + + return c +} + +var Online = online{ + Lines: libol.NewSafeStrMap(1024), +} diff --git a/pkg/cache/openvpn.go b/pkg/cache/openvpn.go new file mode 100755 index 0000000..c022f6d --- /dev/null +++ b/pkg/cache/openvpn.go @@ -0,0 +1,184 @@ +package cache + +import ( + "bufio" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/schema" + "io" + "os" + "path/filepath" + "strconv" + "strings" + "time" + "unicode" +) + +type vpnClient struct { + Directory string +} + +func ParseInt64(value string) (int64, error) { + return strconv.ParseInt(value, 10, 64) +} + +func (o *vpnClient) GetDevice(name string) string { + sw := config.Manager.Switch + if sw == nil { + return "" + } + for _, n := range sw.Network { + vpn := n.OpenVPN + if vpn == nil { + continue + } + if vpn.Network == name { + return vpn.Device + } + } + return "" +} + +func (o *vpnClient) scanStatus(network string, reader io.Reader, + clients map[string]*schema.VPNClient) error { + readAt := "header" + offset := 0 + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + if line == "OpenVPN CLIENT LIST" { + readAt = "common" + offset = 3 + } + if line == "ROUTING TABLE" { + readAt = "routing" + offset = 2 + } + if line == "GLOBAL STATS" { + readAt = "global" + offset = 1 + } + if offset > 0 { + offset -= 1 + continue + } + columns := strings.SplitN(line, ",", 5) + switch readAt { + case "common": + if len(columns) == 5 { + name := columns[0] + remote := columns[1] + client := &schema.VPNClient{ + Name: name, + Remote: remote, + State: "success", + Device: o.GetDevice(network), + } + if rxc, err := ParseInt64(columns[2]); err == nil { + client.RxBytes = rxc + } + if txc, err := ParseInt64(columns[3]); err == nil { + client.TxBytes = txc + } + if len(columns[4]) > 0 { + var uptime time.Time + var err error + if unicode.IsDigit(rune(columns[4][0])) { + uptime, err = libol.GetLocalTime(libol.SimpleTime, columns[4]) + } else { + uptime, err = libol.GetLocalTime(time.ANSIC, columns[4]) + } + if err == nil { + client.Uptime = uptime.Unix() + client.AliveTime = time.Now().Unix() - client.Uptime + } else { + libol.Warn("vpnClient.scanStatus %s", err) + } + } + clients[remote] = client + } + case "routing": + if len(columns) == 4 { + remote := columns[2] + address := columns[0] + if client, ok := clients[remote]; ok { + client.Address = address + } + } + } + } + if err := scanner.Err(); err != nil { + return err + } + return nil +} + +func (o *vpnClient) Dir(args ...string) string { + values := append([]string{o.Directory}, args...) + return filepath.Join(values...) +} + +func (o *vpnClient) statusFile(name string) []string { + files, err := filepath.Glob(o.Dir(name, "*server.status")) + if err != nil { + libol.Warn("vpnClient.statusFile %v", err) + } + return files +} + +func (o *vpnClient) readStatus(network string) map[string]*schema.VPNClient { + clients := make(map[string]*schema.VPNClient, 32) + for _, file := range o.statusFile(network) { + reader, err := os.Open(file) + if err != nil { + libol.Debug("vpnClient.readStatus %v", err) + return nil + } + if err := o.scanStatus(network, reader, clients); err != nil { + libol.Warn("vpnClient.readStatus %v", err) + } + reader.Close() + } + return clients +} + +func (o *vpnClient) List(name string) <-chan *schema.VPNClient { + c := make(chan *schema.VPNClient, 128) + + clients := o.readStatus(name) + go func() { + for _, v := range clients { + c <- v + } + c <- nil //Finish channel by nil. + }() + + return c +} + +func (o *vpnClient) GetClientProfile(network, client, remote string) (string, error) { + file := o.Dir(network, client+"client.ovpn") + reader, err := os.Open(file) + if err != nil { + return "", err + } + profile := "" + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "remote 0.0.0.0") { + profile += strings.Replace(line, "0.0.0.0", remote, 1) + } else { + profile += line + } + profile += "\n" + } + if err := scanner.Err(); err != nil { + return profile, err + } + return profile, nil +} + +var VPNClient = vpnClient{ + Directory: config.VarDir("openvpn"), +} diff --git a/pkg/cache/openvpn_test.go b/pkg/cache/openvpn_test.go new file mode 100755 index 0000000..7fa07cb --- /dev/null +++ b/pkg/cache/openvpn_test.go @@ -0,0 +1,23 @@ +package cache + +import ( + "fmt" + "testing" + "time" +) + +func Test_VPNClient_ListStatus(t *testing.T) { + fmt.Println(time.Now().Unix()) + for v := range VPNClient.List("yunex") { + if v == nil { + break + } + fmt.Println(v) + } + for v := range VPNClient.List("guest") { + if v == nil { + break + } + fmt.Println(v) + } +} diff --git a/pkg/cache/point.go b/pkg/cache/point.go new file mode 100755 index 0000000..8aa2e90 --- /dev/null +++ b/pkg/cache/point.go @@ -0,0 +1,82 @@ +package cache + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" +) + +type point struct { + Clients *libol.SafeStrMap + UUIDAddr *libol.SafeStrStr + AddrUUID *libol.SafeStrStr +} + +func (p *point) Init(size int) { + p.Clients = libol.NewSafeStrMap(size) +} + +func (p *point) Add(m *models.Point) { + _ = p.UUIDAddr.Reset(m.UUID, m.Client.String()) + _ = p.AddrUUID.Set(m.Client.String(), m.UUID) + _ = p.Clients.Set(m.Client.String(), m) +} + +func (p *point) Get(addr string) *models.Point { + if v := p.Clients.Get(addr); v != nil { + m := v.(*models.Point) + m.Update() + return m + } + return nil +} + +func (p *point) GetByUUID(uuid string) *models.Point { + if addr := p.GetAddr(uuid); addr != "" { + return p.Get(addr) + } + return nil +} + +func (p *point) GetUUID(addr string) string { + return p.AddrUUID.Get(addr) +} + +func (p *point) GetAddr(uuid string) string { + return p.UUIDAddr.Get(uuid) +} + +func (p *point) Del(addr string) { + if v := p.Clients.Get(addr); v != nil { + m := v.(*models.Point) + if m.Device != nil { + _ = m.Device.Close() + } + if p.UUIDAddr.Get(m.UUID) == addr { // not has newer + p.UUIDAddr.Del(m.UUID) + } + p.AddrUUID.Del(m.Client.String()) + p.Clients.Del(addr) + } +} + +func (p *point) List() <-chan *models.Point { + c := make(chan *models.Point, 128) + + go func() { + p.Clients.Iter(func(k string, v interface{}) { + if m, ok := v.(*models.Point); ok { + m.Update() + c <- m + } + }) + c <- nil //Finish channel by nil. + }() + + return c +} + +var Point = point{ + Clients: libol.NewSafeStrMap(1024), + UUIDAddr: libol.NewSafeStrStr(1024), + AddrUUID: libol.NewSafeStrStr(1024), +} diff --git a/pkg/cache/store.go b/pkg/cache/store.go new file mode 100755 index 0000000..cd97699 --- /dev/null +++ b/pkg/cache/store.go @@ -0,0 +1,21 @@ +package cache + +import ( + "github.com/luscis/openlan/pkg/config" +) + +func Init(cfg *config.Perf) { + Point.Init(cfg.Point) + Link.Init(cfg.Link) + Neighbor.Init(cfg.Neighbor) + Online.Init(cfg.OnLine) + User.Init(cfg.User) + Esp.Init(cfg.Esp) + EspState.Init(cfg.State) + EspPolicy.Init(cfg.Policy) +} + +func Reload() { + EspState.Clear() + EspPolicy.Clear() +} diff --git a/pkg/cache/user.go b/pkg/cache/user.go new file mode 100755 index 0000000..b5b39fd --- /dev/null +++ b/pkg/cache/user.go @@ -0,0 +1,222 @@ +package cache + +import ( + "bufio" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "strings" + "sync" + "time" +) + +type user struct { + Lock sync.RWMutex + File string + Users *libol.SafeStrMap + LdapCfg *libol.LDAPConfig + LdapSvc *libol.LDAPService +} + +func (w *user) Load() { + file := w.File + reader, err := libol.OpenRead(file) + if err != nil { + return + } + defer reader.Close() + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + columns := strings.SplitN(line, ":", 4) + if len(columns) < 2 { + continue + } + user := columns[0] + pass := columns[1] + role := "guest" + leStr := "" + if len(columns) > 2 { + role = columns[2] + } + if len(columns) > 3 { + leStr = columns[3] + } + lease, _ := libol.GetLocalTime(libol.LeaseTime, leStr) + obj := &models.User{ + Name: user, + Password: pass, + Role: role, + Lease: lease, + } + obj.Update() + w.Add(obj) + } + if err := scanner.Err(); err != nil { + libol.Warn("User.Load %v", err) + } +} + +func (w *user) Save() error { + if w.File == "" { + return nil + } + fp, err := libol.OpenTrunk(w.File) + if err != nil { + return err + } + for obj := range w.List() { + if obj == nil { + break + } + if obj.Role == "ldap" { + continue + } + line := obj.Id() + line += ":" + obj.Password + line += ":" + obj.Role + line += ":" + obj.Lease.Format(libol.LeaseTime) + _, _ = fp.WriteString(line + "\n") + } + return nil +} + +func (w *user) SetFile(value string) { + w.File = value +} + +func (w *user) Init(size int) { + w.Users = libol.NewSafeStrMap(size) +} + +func (w *user) Add(user *models.User) { + libol.Debug("user.Add %v", user) + key := user.Id() + if older := w.Get(key); older == nil { + _ = w.Users.Set(key, user) + } else { // Update pass and role. + if user.Role != "" { + older.Role = user.Role + } + if user.Password != "" { + older.Password = user.Password + } + if user.Alias != "" { + older.Alias = user.Alias + } + older.UpdateAt = user.UpdateAt + if !user.Lease.IsZero() { + older.Lease = user.Lease + } + } +} + +func (w *user) Del(key string) { + libol.Debug("user.Add %s", key) + w.Users.Del(key) +} + +func (w *user) Get(key string) *models.User { + if v := w.Users.Get(key); v != nil { + return v.(*models.User) + } + return nil +} + +func (w *user) List() <-chan *models.User { + c := make(chan *models.User, 128) + + go func() { + w.Users.Iter(func(k string, v interface{}) { + c <- v.(*models.User) + }) + c <- nil //Finish channel by nil. + }() + + return c +} + +func (w *user) CheckLdap(obj *models.User) *models.User { + svc := w.GetLdap() + if svc == nil { + return nil + } + u := w.Get(obj.Id()) + libol.Debug("CheckLdap %s", u) + if u != nil && u.Role != "ldap" { + return nil + } + if ok, err := svc.Login(obj.Id(), obj.Password); !ok { + libol.Warn("CheckLdap %s", err) + return nil + } + user := &models.User{ + Name: obj.Id(), + Password: obj.Password, + Role: "ldap", + Alias: obj.Alias, + } + user.Update() + w.Add(user) + return user +} + +func (w *user) Timeout(user *models.User) bool { + if user.Role == "ldap" { + return time.Now().Unix()-user.UpdateAt > w.LdapCfg.Timeout + } + return true +} + +func (w *user) Check(obj *models.User) (*models.User, error) { + if u := w.Get(obj.Id()); u != nil { + if u.Role == "" || u.Role == "admin" || u.Role == "guest" { + if u.Password == obj.Password { + t0 := time.Now() + t1 := u.Lease + if t1.Year() < 2000 || t1.After(t0) { + return u, nil + } + return nil, libol.NewErr("out of date") + } + } + } + if u := w.CheckLdap(obj); u != nil { + return u, nil + } + return nil, libol.NewErr("wrong user or password") +} + +func (w *user) GetLdap() *libol.LDAPService { + w.Lock.Lock() + defer w.Lock.Unlock() + if w.LdapCfg == nil { + return nil + } + if w.LdapSvc == nil || w.LdapSvc.Conn.IsClosing() { + if l, err := libol.NewLDAPService(*w.LdapCfg); err != nil { + libol.Warn("user.GetLdap %s", err) + w.LdapSvc = nil + } else { + w.LdapSvc = l + } + } + return w.LdapSvc +} + +func (w *user) SetLdap(cfg *libol.LDAPConfig) { + w.Lock.Lock() + defer w.Lock.Unlock() + if w.LdapCfg != cfg { + w.LdapCfg = cfg + } + if l, err := libol.NewLDAPService(*cfg); err != nil { + libol.Warn("user.SetLdap %s", err) + } else { + libol.Info("user.SetLdap %s", w.LdapCfg.Server) + w.LdapSvc = l + } +} + +var User = user{ + Users: libol.NewSafeStrMap(1024), +} diff --git a/pkg/config/acl.go b/pkg/config/acl.go new file mode 100755 index 0000000..6191e15 --- /dev/null +++ b/pkg/config/acl.go @@ -0,0 +1,20 @@ +package config + +type ACL struct { + File string `json:"file"` + Name string `json:"name"` + Rules []*ACLRule `json:"rules"` +} + +type ACLRule struct { + Name string `json:"name,omitempty" yaml:"name,omitempty"` + SrcIp string `json:"src,omitempty" yaml:"source,omitempty"` + DstIp string `json:"dst,omitempty" yaml:"destination,omitempty"` + Proto string `json:"proto,omitempty" yaml:"protocol,omitempty"` + SrcPort string `json:"sport,omitempty" yaml:"destPort,omitempty"` + DstPort string `json:"dport,omitempty" yaml:"sourcePort,omitempty"` + Action string `json:"action,omitempty" yaml:"action,omitempty"` +} + +func (ru *ACLRule) Correct() { +} diff --git a/pkg/config/bridge.go b/pkg/config/bridge.go new file mode 100755 index 0000000..f74eeea --- /dev/null +++ b/pkg/config/bridge.go @@ -0,0 +1,31 @@ +package config + +type Bridge struct { + Network string `json:"network"` + Peer string `json:"peer,omitempty" yaml:"peer,omitempty"` + Name string `json:"name,omitempty" yaml:"name,omitempty"` + IPMtu int `json:"mtu,omitempty" yaml:"mtu,omitempty"` + Address string `json:"address,omitempty" yaml:"address,omitempty"` + Provider string `json:"provider,omitempty" yaml:"provider,omitempty"` + Stp string `json:"stp,omitempty" yaml:"stpState,omitempty"` + Delay int `json:"delay,omitempty" yaml:"forwardDelay,omitempty"` + Mss int `json:"tcpMss,omitempty" yaml:"tcpMss,omitempty"` +} + +func (br *Bridge) Correct() { + if br.Name == "" { + br.Name = "br-" + br.Network + } + if br.Provider == "" { + br.Provider = "linux" + } + if br.IPMtu == 0 { + br.IPMtu = 1500 + } + if br.Delay == 0 { + br.Delay = 2 + } + if br.Stp == "" { + br.Stp = "on" + } +} diff --git a/pkg/config/cert.go b/pkg/config/cert.go new file mode 100755 index 0000000..8888ff4 --- /dev/null +++ b/pkg/config/cert.go @@ -0,0 +1,109 @@ +package config + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "github.com/luscis/openlan/pkg/libol" + "github.com/xtaci/kcp-go/v5" + "io/ioutil" +) + +type Crypt struct { + Algo string `json:"algo,omitempty" yaml:"algorithm"` + Secret string `json:"secret,omitempty"` +} + +func (c *Crypt) IsZero() bool { + return c.Algo == "" && c.Secret == "" +} + +func (c *Crypt) Default() { + if c.Secret != "" && c.Algo == "" { + c.Algo = "xor" + } +} + +type Cert struct { + Dir string `json:"dir" yaml:"directory"` + CrtFile string `json:"crt" yaml:"cert"` + KeyFile string `json:"key" yaml:"key"` + CaFile string `json:"ca" yaml:"rootCa"` + Insecure bool `json:"insecure"` +} + +func (c *Cert) Correct() { + if c.Dir == "" { + return + } + if c.CrtFile == "" { + c.CrtFile = fmt.Sprintf("%s/crt", c.Dir) + } + if c.KeyFile == "" { + c.KeyFile = fmt.Sprintf("%s/key", c.Dir) + } + if c.CaFile == "" { + c.CaFile = fmt.Sprintf("%s/ca-trusted.crt", c.Dir) + } +} + +func (c *Cert) GetTlsCfg() *tls.Config { + if c.KeyFile == "" || c.CrtFile == "" { + return nil + } + libol.Debug("Cert.GetTlsCfg: %v", c) + cer, err := tls.LoadX509KeyPair(c.CrtFile, c.KeyFile) + if err != nil { + libol.Error("Cert.GetTlsCfg: %s", err) + return nil + } + return &tls.Config{Certificates: []tls.Certificate{cer}} +} + +func (c *Cert) GetCertPool() *x509.CertPool { + if c.CaFile == "" { + return nil + } + if err := libol.FileExist(c.CaFile); err != nil { + libol.Debug("Cert.GetTlsCertPool: %s not such file", c.CaFile) + return nil + } + caCert, err := ioutil.ReadFile(c.CaFile) + if err != nil { + libol.Warn("Cert.GetTlsCertPool: %s", err) + return nil + } + pool := x509.NewCertPool() + if !pool.AppendCertsFromPEM(caCert) { + libol.Warn("Cert.GetTlsCertPool: invalid cert") + } + return pool +} + +func GetBlock(cfg *Crypt) kcp.BlockCrypt { + if cfg == nil || cfg.IsZero() { + return nil + } + var block kcp.BlockCrypt + pass := make([]byte, 64) + if len(cfg.Secret) <= 64 { + copy(pass, cfg.Secret) + } else { + copy(pass, []byte(cfg.Secret)[:64]) + } + switch cfg.Algo { + case "aes-128": + block, _ = kcp.NewAESBlockCrypt(pass[:16]) + case "aes-192": + block, _ = kcp.NewAESBlockCrypt(pass[:24]) + case "aes-256": + block, _ = kcp.NewAESBlockCrypt(pass[:32]) + case "tea": + block, _ = kcp.NewTEABlockCrypt(pass[:16]) + case "xtea": + block, _ = kcp.NewXTEABlockCrypt(pass[:16]) + default: + block, _ = kcp.NewSimpleXORBlockCrypt(pass) + } + return block +} diff --git a/pkg/config/common.go b/pkg/config/common.go new file mode 100755 index 0000000..6943efa --- /dev/null +++ b/pkg/config/common.go @@ -0,0 +1,51 @@ +package config + +import ( + "fmt" + "github.com/luscis/openlan/pkg/libol" + "os" + "runtime" + "strings" +) + +var index = 1024 + +func GenName(prefix string) string { + index += 1 + return fmt.Sprintf("%s%d", prefix, index) +} + +func VarDir(name ...string) string { + return "/var/openlan/" + strings.Join(name, "/") +} + +type Log struct { + File string `json:"file,omitempty"` + Verbose int `json:"level,omitempty"` +} + +func LogFile(file string) string { + if runtime.GOOS == "linux" { + return "/var/log/" + file + } + return file +} + +type Http struct { + Listen string `json:"listen,omitempty"` + Public string `json:"public,omitempty" yaml:"publicDir"` +} + +func CorrectAddr(listen *string, port int) { + values := strings.Split(*listen, ":") + if len(values) == 1 { + *listen = fmt.Sprintf("%s:%d", values[0], port) + } +} + +func GetAlias() string { + if hostname, err := os.Hostname(); err == nil { + return strings.ToLower(hostname) + } + return libol.GenRandom(13) +} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100755 index 0000000..46cb224 --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,22 @@ +package config + +type manager struct { + Point *Point + Switch *Switch + Proxy *Proxy +} + +var Manager = manager{ + Point: &Point{}, + Switch: &Switch{}, + Proxy: &Proxy{}, +} + +func GetNetwork(name string) *Network { + for _, network := range Manager.Switch.Network { + if network.Name == name { + return network + } + } + return nil +} diff --git a/pkg/config/dhcp.go b/pkg/config/dhcp.go new file mode 100755 index 0000000..09c4fd3 --- /dev/null +++ b/pkg/config/dhcp.go @@ -0,0 +1,9 @@ +package config + +type Dhcp struct { + Name string `json:"name,omitempty" yaml:"name"` + Bridge *Bridge `json:"bridge,omitempty" yaml:"bridge,omitempty"` + Subnet *IpSubnet `json:"subnet,omitempty" yaml:"subnet,omitempty"` + Hosts []HostLease `json:"hosts,omitempty" yaml:"hosts,omitempty"` + Routes []PrefixRoute `json:"routes,omitempty" yaml:"routes,omitempty"` +} diff --git a/pkg/config/esp.go b/pkg/config/esp.go new file mode 100755 index 0000000..4b6a8e6 --- /dev/null +++ b/pkg/config/esp.go @@ -0,0 +1,248 @@ +package config + +import ( + "fmt" + "github.com/luscis/openlan/pkg/libol" + "net" + "strconv" + "strings" +) + +var ( + EspAuth = "8bc736635c0642aebc20ba5420c3e93a" + EspCrypt = "4ac161f6635843b8b02c60cc36822515" + EspLocalUdp = 4500 + EspRemoteUdp = 4500 +) + +func Addr2Cidr(addr string) string { + if !strings.Contains(addr, "/") { + return addr + "/32" + } + return addr +} + +func SetLocalUdp(port string) { + if udp, err := strconv.Atoi(port); err == nil { + EspLocalUdp = udp + } +} + +type EspState struct { + Local string `json:"local,omitempty" yaml:"local,omitempty"` + LocalIp net.IP `json:"local_addr" yaml:"local_addr"` + Remote string `json:"remote,omitempty" yaml:"remote,omitempty"` + RemotePort int `json:"remote_port" yaml:"remote_port"` + RemoteIp net.IP `json:"remote_addr" yaml:"remote_addr"` + Encap string `json:"encap,omitempty" yaml:"encapsulation,omitempty"` + Auth string `json:"auth,omitempty" yaml:"auth,omitempty"` + Crypt string `json:"crypt,omitempty" yaml:"crypt,omitempty"` +} + +func (s *EspState) Padding(value string, size int) string { + return strings.Repeat(value, 64/len(value))[:size] +} + +func (s *EspState) Correct(obj *EspState) { + if obj != nil { + if s.Local == "" { + s.Local = obj.Local + } + if s.Auth == "" { + s.Auth = obj.Auth + } + if s.Crypt == "" { + s.Crypt = obj.Crypt + } + if s.RemotePort == 0 { + s.RemotePort = obj.RemotePort + } + } + if addr, _ := net.LookupIP(s.Local); len(addr) > 0 { + s.LocalIp = addr[0] + } + if addr, _ := net.LookupIP(s.Remote); len(addr) > 0 { + s.RemoteIp = addr[0] + } + if s.LocalIp == nil && s.RemoteIp != nil { + addr, _ := libol.GetLocalByGw(s.RemoteIp.String()) + s.Local = addr.String() + s.LocalIp = addr + } + if s.Crypt == "" { + s.Crypt = s.Auth + } + if s.Auth == "" { + s.Auth = EspAuth + } + if s.Crypt == "" { + s.Crypt = EspCrypt + } + if s.Encap == "" { + s.Encap = "udp" + } + if s.RemotePort == 0 { + s.RemotePort = EspRemoteUdp + } + s.Auth = s.Padding(s.Auth, 32) + s.Crypt = s.Padding(s.Crypt, 32) +} + +type ESPPolicy struct { + Source string `json:"source,omitempty"` + Dest string `json:"destination,omitempty"` +} + +type ESPMember struct { + Name string `json:"name"` + Address string `json:"address,omitempty"` + Peer string `json:"peer"` + Spi int `json:"spi"` + State EspState `json:"state"` + Policies []*ESPPolicy `json:"policies" yaml:"policies,omitempty"` +} + +func (m *ESPMember) Correct(state *EspState) { + if m.Name == "" { + m.Name = fmt.Sprintf("spi:%d", m.Spi) + } else if m.Spi == 0 { + _, _ = fmt.Sscanf(m.Name, "spi:%d", &m.Spi) + } + if m.Address == "" || m.Peer == "" { + return + } + m.Peer = Addr2Cidr(m.Peer) + m.Address = Addr2Cidr(m.Address) + ptr := &m.State + ptr.Correct(state) + if m.Policies == nil { + m.Policies = make([]*ESPPolicy, 0, 2) + } + found := -1 + for index, pol := range m.Policies { + if pol.Source == "" { + pol.Source = "0.0.0.0/0" + } + if pol.Dest != m.Peer { + continue + } + found = index + } + if found < 0 { + m.Policies = append(m.Policies, &ESPPolicy{ + Source: "0.0.0.0/0", + Dest: m.Peer, + }) + } +} + +func (m *ESPMember) AddPolicy(obj *ESPPolicy) { + found := -1 + for index, po := range m.Policies { + if po.Dest != obj.Dest { + continue + } + found = index + po.Source = obj.Source + break + } + if found < 0 { + m.Policies = append(m.Policies, obj) + } +} + +func (m *ESPMember) RemovePolicy(dest string) bool { + found := -1 + for index, po := range m.Policies { + if po.Dest != dest { + continue + } + found = index + break + } + if found >= 0 { + copy(m.Policies[found:], m.Policies[found+1:]) + m.Policies = m.Policies[:len(m.Policies)-1] + } + return found >= 0 +} + +type ESPSpecifies struct { + Name string `json:"name"` + Address string `json:"address,omitempty"` + State EspState `json:"state" yaml:"state,omitempty"` + Members []*ESPMember `json:"members"` + Listen string `json:"listen,omitempty"` +} + +func (n *ESPSpecifies) Correct() { + if n.Listen != "" { + addr, port := libol.GetHostPort(n.Listen) + if addr != "" { + n.State.Local = addr + } + if port != "" { + SetLocalUdp(port) + } + } + for _, m := range n.Members { + if m.Address == "" { + m.Address = n.Address + } + m.Correct(&n.State) + } +} + +func (n *ESPSpecifies) GetMember(name string) *ESPMember { + for _, mem := range n.Members { + if mem.Name == name { + return mem + } + } + return nil +} + +func (n *ESPSpecifies) HasRemote(name, addr string) bool { + for _, mem := range n.Members { + state := mem.State + if state.Remote != name || state.RemoteIp.String() == addr { + continue + } + return true + } + return false +} + +func (n *ESPSpecifies) AddMember(obj *ESPMember) { + found := -1 + for index, mem := range n.Members { + if mem.Spi != obj.Spi && mem.Name != obj.Name { + continue + } + found = index + if len(obj.Policies) == 0 { + obj.Policies = mem.Policies + } + n.Members[index] = obj + break + } + if found < 0 { + n.Members = append(n.Members, obj) + } +} + +func (n *ESPSpecifies) DelMember(name string) bool { + found := -1 + for index, mem := range n.Members { + if mem.Name != name { + continue + } + found = index + break + } + if found >= 0 { + copy(n.Members[found:], n.Members[found+1:]) + n.Members = n.Members[:len(n.Members)-1] + } + return found >= 0 +} diff --git a/pkg/config/fabric.go b/pkg/config/fabric.go new file mode 100755 index 0000000..02a844d --- /dev/null +++ b/pkg/config/fabric.go @@ -0,0 +1,63 @@ +package config + +type FabricSpecifies struct { + Mss int `json:"tcpMss,omitempty"` + Fragment bool `json:"fragment"` + Driver string `json:"driver,omitempty"` + Name string `json:"name"` + Tunnels []*FabricTunnel `json:"tunnels"` +} + +func (n *FabricSpecifies) Correct() { + for _, tun := range n.Tunnels { + tun.Correct() + if tun.DstPort == 0 { + if n.Driver == "stt" { + tun.DstPort = 7471 + } else { + tun.DstPort = 4789 // 8472 + } + } + } +} + +func (n *FabricSpecifies) AddTunnel(obj *FabricTunnel) { + found := -1 + for index, tun := range n.Tunnels { + if tun.Remote != obj.Remote { + continue + } + found = index + n.Tunnels[index] = obj + break + } + if found < 0 { + n.Tunnels = append(n.Tunnels, obj) + } +} + +func (n *FabricSpecifies) DelTunnel(remote string) bool { + found := -1 + for index, tun := range n.Tunnels { + if tun.Remote != remote { + continue + } + found = index + break + } + if found >= 0 { + copy(n.Tunnels[found:], n.Tunnels[found+1:]) + n.Tunnels = n.Tunnels[:len(n.Tunnels)-1] + } + return found >= 0 +} + +type FabricTunnel struct { + DstPort uint32 `json:"dport"` + Remote string `json:"remote"` + Local string `json:"local,omitempty" yaml:"local,omitempty"` + Mode string `json:"mode,omitempty" yaml:"mode,omitempty"` +} + +func (c *FabricTunnel) Correct() { +} diff --git a/pkg/config/firewall.go b/pkg/config/firewall.go new file mode 100755 index 0000000..42779ac --- /dev/null +++ b/pkg/config/firewall.go @@ -0,0 +1,18 @@ +package config + +type FlowRule struct { + Table string `json:"table,omitempty" yaml:"table,omitempty"` + Chain string `json:"chain,omitempty" yaml:"chain,omitempty"` + Input string `json:"input,omitempty" yaml:"input,omitempty"` + Source string `json:"source,omitempty" yaml:"source,omitempty"` + ToSource string `json:"to-source,omitempty" yaml:"toSource,omitempty"` + Dest string `json:"destination,omitempty" yaml:"destination,omitempty"` + ToDest string `json:"to-destination" yaml:"toDestination,omitempty"` + Output string `json:"output,omitempty" yaml:"output,omitempty"` + Comment string `json:"comment,omitempty" yaml:"comment,omitempty"` + Proto string `json:"protocol,omitempty" yaml:"protocol,omitempty"` + Match string `json:"match,omitempty" yaml:"match,omitempty"` + DstPort string `json:"dport,omitempty" yaml:"dstPort,omitempty"` + SrcPort string `json:"sport,omitempty" yaml:"srcPort,omitempty"` + Jump string `json:"jump,omitempty" yaml:"jump,omitempty"` // SNAT/RETURN/MASQUERADE +} diff --git a/pkg/config/ldap.go b/pkg/config/ldap.go new file mode 100755 index 0000000..064ed8a --- /dev/null +++ b/pkg/config/ldap.go @@ -0,0 +1,11 @@ +package config + +type LDAP struct { + Server string `json:"server"` + BindDN string `json:"bindDN"` + Password string `json:"password"` + BaseDN string `json:"baseDN"` + Attribute string `json:"attribute"` + Filter string `json:"filter"` + EnableTls bool `json:"enableTLS"` +} diff --git a/pkg/config/limit.go b/pkg/config/limit.go new file mode 100755 index 0000000..c82d103 --- /dev/null +++ b/pkg/config/limit.go @@ -0,0 +1,43 @@ +package config + +import "github.com/luscis/openlan/pkg/libol" + +type Queue struct { + SockWr int `json:"swr"` // per frames about 1572(1514+4+20+20+14)bytes + SockRd int `json:"srd"` // per frames + TapWr int `json:"twr"` // per frames about 1572((1514+4+20+20+14))bytes + TapRd int `json:"trd"` // per frames + VirSnd int `json:"vsd"` + VirWrt int `json:"vwr"` +} + +var ( + QdSwr = 1024 * 4 + QdSrd = 1024 * 4 + QdTwr = 1024 * 2 + QdTrd = 2 + QdVsd = 1024 * 8 + QdVWr = 1024 * 4 +) + +func (q *Queue) Default() { + if q.SockWr == 0 { + q.SockWr = QdSwr + } + if q.SockRd == 0 { + q.SockRd = QdSrd + } + if q.TapWr == 0 { + q.TapWr = QdTwr + } + if q.TapRd == 0 { + q.TapRd = QdTrd + } + if q.VirSnd == 0 { + q.VirSnd = QdVsd + } + if q.VirWrt == 0 { + q.VirWrt = QdVWr + } + libol.Debug("Queue.Default %v", q) +} diff --git a/pkg/config/network.go b/pkg/config/network.go new file mode 100755 index 0000000..89cb361 --- /dev/null +++ b/pkg/config/network.go @@ -0,0 +1,138 @@ +package config + +import ( + "github.com/luscis/openlan/pkg/libol" + "net" + "path/filepath" +) + +type Network struct { + ConfDir string `json:"-"` + File string `json:"file"` + Alias string `json:"-" yaml:"-"` + Name string `json:"name,omitempty" yaml:"name"` + Provider string `json:"provider,omitempty" yaml:"provider"` + Bridge *Bridge `json:"bridge,omitempty" yaml:"bridge,omitempty"` + Subnet *IpSubnet `json:"subnet,omitempty" yaml:"subnet,omitempty"` + OpenVPN *OpenVPN `json:"openvpn,omitempty" yaml:"openvpn,omitempty"` + Links []Point `json:"links,omitempty" yaml:"links,omitempty"` + Hosts []HostLease `json:"hosts,omitempty" yaml:"hosts,omitempty"` + Routes []PrefixRoute `json:"routes,omitempty" yaml:"routes,omitempty"` + Acl string `json:"acl,omitempty" yaml:"acl,omitempty"` + Specifies interface{} `json:"specifies,omitempty" yaml:"specifies,omitempty"` + Dhcp string `json:"dhcp,omitempty" yaml:"dhcp,omitempty"` + Outputs []Output `json:"outputs" yaml:"output,omitempty"` +} + +func (n *Network) Correct() { + if n.Bridge == nil { + n.Bridge = &Bridge{} + } + br := n.Bridge + br.Network = n.Name + br.Correct() + switch n.Provider { + case "esp": + spec := n.Specifies + if obj, ok := spec.(*ESPSpecifies); ok { + obj.Correct() + obj.Name = n.Name + } + case "fabric": + // 28 [udp] - 8 [esp] - + // 28 [udp] - 8 [vxlan] - + // 14 [ethernet] - tcp [40] - 1332 [mss] - + // 42 [padding] ~= variable 30-45 + if br.Mss == 0 { + br.Mss = 1332 + } + spec := n.Specifies + if obj, ok := spec.(*FabricSpecifies); ok { + obj.Correct() + obj.Name = n.Name + } + default: + if n.Subnet == nil { + n.Subnet = &IpSubnet{} + } + ipAddr := "" + ipMask := "" + if _i, _n, err := net.ParseCIDR(br.Address); err == nil { + ipAddr = _i.String() + ipMask = net.IP(_n.Mask).String() + } + if n.Subnet.Netmask == "" { + n.Subnet.Netmask = ipMask + } + for i := range n.Routes { + if n.Routes[i].Metric == 0 { + n.Routes[i].Metric = 660 + } + if n.Routes[i].NextHop == "" { + n.Routes[i].NextHop = ipAddr + } + if n.Routes[i].Mode == "" { + n.Routes[i].Mode = "snat" + } + } + if n.OpenVPN != nil { + n.OpenVPN.Network = n.Name + obj := DefaultOpenVPN() + n.OpenVPN.Correct(obj) + } + } +} + +func (n *Network) Dir(elem ...string) string { + args := append([]string{n.ConfDir}, elem...) + return filepath.Join(args...) +} + +func (n *Network) LoadLink() { + file := n.Dir("link", n.Name+".json") + if err := libol.FileExist(file); err == nil { + if err := libol.UnmarshalLoad(&n.Links, file); err != nil { + libol.Error("Network.LoadLink... %n", err) + } + } +} + +func (n *Network) LoadRoute() { + file := n.Dir("route", n.Name+".json") + if err := libol.FileExist(file); err == nil { + if err := libol.UnmarshalLoad(&n.Routes, file); err != nil { + libol.Error("Network.LoadRoute... %n", err) + } + } +} + +func (n *Network) Save() { + obj := *n + obj.Routes = nil + obj.Links = nil + if err := libol.MarshalSave(&obj, obj.File, true); err != nil { + libol.Error("Network.Save %s %s", obj.Name, err) + } + n.SaveRoute() + n.SaveLink() +} + +func (n *Network) SaveRoute() { + file := n.Dir("route", n.Name+".json") + if n.Routes == nil { + return + } + if err := libol.MarshalSave(n.Routes, file, true); err != nil { + libol.Error("Network.SaveRoute %s %s", n.Name, err) + } +} + +func (n *Network) SaveLink() { + file := n.Dir("link", n.Name+".json") + if n.Links == nil { + return + } + if err := libol.MarshalSave(n.Links, file, true); err != nil { + libol.Error("Network.SaveLink %s %s", n.Name, err) + } +} diff --git a/pkg/config/openvpn.go b/pkg/config/openvpn.go new file mode 100755 index 0000000..2d1452a --- /dev/null +++ b/pkg/config/openvpn.go @@ -0,0 +1,115 @@ +package config + +import ( + "fmt" + "github.com/luscis/openlan/pkg/libol" + "strconv" + "strings" +) + +type OpenVPN struct { + Network string `json:"network"` + Directory string `json:"directory"` + Listen string `json:"listen"` + Protocol string `json:"protocol"` + Subnet string `json:"subnet"` + Device string `json:"device"` + Version int `json:"version"` + Auth string `json:"auth"` // xauth or cert. + DhPem string `json:"dhPem"` + RootCa string `json:"rootCa"` + ServerCrt string `json:"cert"` + ServerKey string `json:"key"` + TlsAuth string `json:"tlsAuth"` + Cipher string `json:"cipher"` + Routes []string `json:"-"` + Renego int `json:"renego,omitempty"` + Script string `json:"-"` + Breed []*OpenVPN `json:"breed,omitempty"` + Push []string `json:"push,omitempty"` + Clients []*OpenVPNClient `json:"clients,omitempty"` +} + +type OpenVPNClient struct { + Name string `json:"name" yaml:"name"` + Address string `json:"address" yaml:"address"` + Netmask string `json:"netmask" yaml:"netmask"` +} + +func DefaultOpenVPN() *OpenVPN { + return &OpenVPN{ + Protocol: "tcp", + Auth: "xauth", + Device: "tun0", + RootCa: VarDir("cert/ca.crt"), + ServerCrt: VarDir("cert/crt"), + ServerKey: VarDir("cert/key"), + DhPem: VarDir("openvpn/dh.pem"), + TlsAuth: VarDir("openvpn/ta.key"), + Cipher: "AES-256-CBC", + Script: "/usr/bin/openlan", + } +} + +func (o *OpenVPN) Correct(obj *OpenVPN) { + if obj != nil { + if o.Network == "" { + o.Network = obj.Network + } + if o.Auth == "" { + o.Auth = obj.Auth + } + if o.Protocol == "" { + o.Protocol = obj.Protocol + } + if o.DhPem == "" { + o.DhPem = obj.DhPem + } + if o.RootCa == "" { + o.RootCa = obj.RootCa + } + if o.ServerCrt == "" { + o.ServerCrt = obj.ServerCrt + } + if o.ServerKey == "" { + o.ServerKey = obj.ServerKey + } + if o.TlsAuth == "" { + o.TlsAuth = obj.TlsAuth + } + if o.Cipher == "" { + o.Cipher = obj.Cipher + } + if o.Routes == nil || len(o.Routes) == 0 { + o.Routes = append(o.Routes, obj.Routes...) + } + if o.Push == nil || len(o.Push) == 0 { + o.Push = append(o.Push, obj.Push...) + } + if o.Script == "" { + bin := obj.Script + " user check --network " + o.Network + o.Script = bin + } + if o.Clients == nil || len(o.Clients) == 0 { + o.Clients = append(o.Clients, obj.Clients...) + } + } + if o.Directory == "" { + o.Directory = VarDir("openvpn", o.Network) + } + if o.Device == "" { + if !strings.Contains(o.Listen, ":") { + o.Listen += ":1194" + } + o.Device = GenName("tun") + } + pool := Manager.Switch.AddrPool + if o.Subnet == "" { + _, port := libol.GetHostPort(o.Listen) + value, _ := strconv.Atoi(port) + o.Subnet = fmt.Sprintf("%s.%d.0/24", pool, value&0xff) + } + for _, ch := range o.Breed { + ch.Correct(o) + } +} diff --git a/pkg/config/output.go b/pkg/config/output.go new file mode 100755 index 0000000..ac8ceea --- /dev/null +++ b/pkg/config/output.go @@ -0,0 +1,7 @@ +package config + +type Output struct { + Vlan int `json:"vlan"` + Interface string `json:"interface"` // format, like: gre:, vxlan:: + Link string `json:"link"` // link name +} diff --git a/pkg/config/password.go b/pkg/config/password.go new file mode 100755 index 0000000..8151156 --- /dev/null +++ b/pkg/config/password.go @@ -0,0 +1,7 @@ +package config + +type Password struct { + Network string `json:"network,omitempty"` + Username string `json:"username"` + Password string `json:"password"` +} diff --git a/pkg/config/point.go b/pkg/config/point.go new file mode 100755 index 0000000..c7be7ba --- /dev/null +++ b/pkg/config/point.go @@ -0,0 +1,183 @@ +package config + +import ( + "flag" + "github.com/luscis/openlan/pkg/libol" + "runtime" + "strings" +) + +type Interface struct { + Name string `json:"name,omitempty"` + IPMtu int `json:"mtu"` + Address string `json:"address,omitempty"` + Bridge string `json:"bridge,omitempty"` + Provider string `json:"provider,omitempty"` + Cost int `json:"cost,omitempty"` +} + +type Point struct { + File string `json:"file,omitempty"` + Alias string `json:"alias,omitempty"` + Connection string `json:"connection"` + Timeout int `json:"timeout"` + Username string `json:"username,omitempty"` + Network string `json:"network"` + Password string `json:"password,omitempty"` + Protocol string `json:"protocol,omitempty"` + Interface Interface `json:"interface"` + Log Log `json:"log"` + Http *Http `json:"http,omitempty"` + Crypt *Crypt `json:"crypt,omitempty"` + PProf string `json:"pprof,omitempty"` + RequestAddr bool `json:"requestAddr,omitempty"` + ByPass bool `json:"bypass,omitempty"` + SaveFile string `json:"-"` + Queue *Queue `json:"queue,omitempty"` + Terminal string `json:"-"` + Cert *Cert `json:"cert,omitempty"` + StatusFile string `json:"status,omitempty"` + PidFile string `json:"pid,omitempty"` +} + +func DefaultPoint() *Point { + obj := &Point{ + Alias: "", + Connection: "xx.openlan.net", + Network: "default", + Protocol: "tcp", // udp, kcp, tcp, tls, ws and wss etc. + Timeout: 60, + Log: Log{ + File: "./point.log", + Verbose: libol.INFO, + }, + Interface: Interface{ + IPMtu: 1500, + Provider: "kernel", + Name: "", + }, + SaveFile: "./point.json", + RequestAddr: true, + Crypt: &Crypt{}, + Cert: &Cert{}, + Terminal: "on", + } + obj.Correct(nil) + return obj +} + +func NewPoint() *Point { + obj := DefaultPoint() + p := &Point{ + RequestAddr: true, + Crypt: obj.Crypt, + Cert: obj.Cert, + } + p.Flags() + p.Parse() + p.Initialize() + if Manager.Point == nil { + Manager.Point = p + } + return p +} + +func (ap *Point) Flags() { + obj := DefaultPoint() + flag.StringVar(&ap.Alias, "alias", obj.Alias, "Alias for this point") + flag.StringVar(&ap.Terminal, "terminal", obj.Terminal, "Run interactive terminal") + flag.StringVar(&ap.Connection, "conn", obj.Connection, "Connection access to") + flag.StringVar(&ap.Username, "user", obj.Username, "User access to by @") + flag.StringVar(&ap.Password, "pass", obj.Password, "Password for authentication") + flag.StringVar(&ap.Protocol, "proto", obj.Protocol, "IP Protocol for connection") + flag.StringVar(&ap.Log.File, "log:file", obj.Log.File, "File log saved to") + flag.StringVar(&ap.Interface.Name, "if:name", obj.Interface.Name, "Configure interface name") + flag.StringVar(&ap.Interface.Address, "if:addr", obj.Interface.Address, "Configure interface address") + flag.StringVar(&ap.Interface.Bridge, "if:br", obj.Interface.Bridge, "Configure bridge name") + flag.StringVar(&ap.Interface.Provider, "if:provider", obj.Interface.Provider, "Specifies provider") + flag.StringVar(&ap.SaveFile, "conf", obj.SaveFile, "The configuration file") + flag.StringVar(&ap.Crypt.Secret, "crypt:secret", obj.Crypt.Secret, "Crypt secret key") + flag.StringVar(&ap.Crypt.Algo, "crypt:algo", obj.Crypt.Algo, "Crypt algorithm, such as: aes-256") + flag.StringVar(&ap.PProf, "pprof", obj.PProf, "Http listen for pprof debug") + flag.StringVar(&ap.Cert.CaFile, "cacert", obj.Cert.CaFile, "CA certificate file") + flag.IntVar(&ap.Timeout, "timeout", obj.Timeout, "Timeout(s) for socket write/read") + flag.IntVar(&ap.Log.Verbose, "log:level", obj.Log.Verbose, "Log level value") + flag.StringVar(&ap.StatusFile, "status", obj.StatusFile, "File status saved to") + flag.StringVar(&ap.PidFile, "pid", obj.PidFile, "Write pid to file") +} + +func (ap *Point) Parse() { + flag.Parse() +} + +func (ap *Point) Id() string { + return ap.Connection + ":" + ap.Network +} + +func (ap *Point) Initialize() { + if err := ap.Load(); err != nil { + libol.Warn("NewPoint.Initialize %s", err) + } + ap.Default() + libol.SetLogger(ap.Log.File, ap.Log.Verbose) +} + +func (ap *Point) Correct(obj *Point) { + if ap.Alias == "" { + ap.Alias = GetAlias() + } + if ap.Network == "" { + if strings.Contains(ap.Username, "@") { + ap.Network = strings.SplitN(ap.Username, "@", 2)[1] + } else if obj != nil { + ap.Network = obj.Network + } + } + CorrectAddr(&ap.Connection, 10002) + if runtime.GOOS == "darwin" { + ap.Interface.Provider = "tun" + } + if ap.Protocol == "tls" || ap.Protocol == "wss" { + if ap.Cert == nil && obj != nil { + ap.Cert = obj.Cert + } + } + if ap.Cert != nil { + if ap.Cert.Dir == "" { + ap.Cert.Dir = "." + } + ap.Cert.Correct() + } + if ap.Protocol == "" { + ap.Protocol = "tcp" + } +} + +func (ap *Point) Default() { + obj := DefaultPoint() + ap.Correct(obj) + if ap.Queue == nil { + ap.Queue = &Queue{} + } + ap.Queue.Default() + //reset zero value to default + if ap.Connection == "" { + ap.Connection = obj.Connection + } + if ap.Interface.IPMtu == 0 { + ap.Interface.IPMtu = obj.Interface.IPMtu + } + if ap.Timeout == 0 { + ap.Timeout = obj.Timeout + } + if ap.Crypt != nil { + ap.Crypt.Default() + } +} + +func (ap *Point) Load() error { + if err := libol.FileExist(ap.SaveFile); err == nil { + return libol.UnmarshalLoad(ap, ap.SaveFile) + } + return nil +} diff --git a/pkg/config/proxy.go b/pkg/config/proxy.go new file mode 100755 index 0000000..f27f5b8 --- /dev/null +++ b/pkg/config/proxy.go @@ -0,0 +1,100 @@ +package config + +import ( + "flag" + "github.com/luscis/openlan/pkg/libol" +) + +type ShadowProxy struct { + Server string `json:"server,omitempty"` + Key string `json:"key,omitempty"` + Cipher string `json:"cipher,omitempty"` + Password string `json:"password,omitempty"` + Plugin string `json:"plugin,omitempty"` + PluginOpts string `json:"pluginOpts,omitempty"` + Protocol string `json:"protocol,omitempty"` +} + +type SocksProxy struct { + Listen string `json:"listen,omitempty"` + Auth Password `json:"auth,omitempty"` +} + +type HttpProxy struct { + Listen string `json:"listen,omitempty"` + Auth Password `json:"auth,omitempty"` + Cert *Cert `json:"cert,omitempty"` +} + +type TcpProxy struct { + Listen string `json:"listen,omitempty"` + Target []string `json:"target,omitempty"` +} + +type Proxy struct { + Conf string `json:"-"` + Log Log `json:"log"` + Socks []*SocksProxy `json:"socks,omitempty"` + Http []*HttpProxy `json:"http,omitempty"` + Tcp []*TcpProxy `json:"tcp,omitempty"` + Shadow []*ShadowProxy `json:"shadow,omitempty"` + PProf string `json:"pprof"` +} + +func DefaultProxy() *Proxy { + obj := &Proxy{ + Log: Log{ + File: LogFile("openlan-proxy.log"), + Verbose: libol.INFO, + }, + } + obj.Correct(nil) + return obj +} + +func NewProxy() *Proxy { + p := &Proxy{} + p.Flags() + p.Parse() + p.Initialize() + if Manager.Proxy == nil { + Manager.Proxy = p + } + return p +} + +func (p *Proxy) Flags() { + obj := DefaultProxy() + flag.StringVar(&p.Log.File, "log:file", obj.Log.File, "Configure log file") + flag.StringVar(&p.Conf, "conf", obj.Conf, "The configure file") + flag.StringVar(&p.PProf, "prof", obj.PProf, "Http listen for CPU prof") + flag.IntVar(&p.Log.Verbose, "log:level", obj.Log.Verbose, "Configure log level") +} + +func (p *Proxy) Parse() { + flag.Parse() +} + +func (p *Proxy) Initialize() { + if err := p.Load(); err != nil { + libol.Error("Proxy.Initialize %s", err) + } + p.Default() + libol.Debug("Proxy.Initialize %v", p) +} + +func (p *Proxy) Correct(obj *Proxy) { + for _, h := range p.Http { + if h.Cert != nil { + h.Cert.Correct() + } + } +} + +func (p *Proxy) Default() { + p.Correct(nil) +} + +func (p *Proxy) Load() error { + return libol.UnmarshalLoad(p, p.Conf) +} diff --git a/pkg/config/subnet.go b/pkg/config/subnet.go new file mode 100755 index 0000000..c9603d6 --- /dev/null +++ b/pkg/config/subnet.go @@ -0,0 +1,29 @@ +package config + +type IpSubnet struct { + Network string `json:"network,omitempty"` + Start string `json:"start,omitempty"` + End string `json:"end,omitempty"` + Netmask string `json:"netmask,omitempty"` +} + +type MultiPath struct { + NextHop string `json:"nexthop"` + Weight int `json:"weight"` +} + +type PrefixRoute struct { + File string `json:"file,omitempty"` + Network string `json:"network,omitempty"` + Prefix string `json:"prefix"` + NextHop string `json:"nexthop"` + MultiPath []MultiPath `json:"multipath,omitempty"` + Metric int `json:"metric"` + Mode string `json:"mode" yaml:"forwardMode"` // route or snat +} + +type HostLease struct { + Network string `json:"network"` + Hostname string `json:"hostname"` + Address string `json:"address"` +} diff --git a/pkg/config/switch.go b/pkg/config/switch.go new file mode 100755 index 0000000..2735d6a --- /dev/null +++ b/pkg/config/switch.go @@ -0,0 +1,282 @@ +package config + +import ( + "flag" + "github.com/luscis/openlan/pkg/libol" + "path/filepath" +) + +func DefaultPerf() *Perf { + return &Perf{ + Point: 1024, + Neighbor: 1024, + OnLine: 64, + Link: 1024, + User: 1024, + Esp: 1024, + State: 1024 * 10, + Policy: 1024 * 10, + VxLAN: 1024, + } +} + +type Perf struct { + Point int `json:"point"` + Neighbor int `json:"neighbor"` + OnLine int `json:"online"` + Link int `json:"link"` + User int `json:"user"` + Esp int `json:"esp"` + State int `json:"state"` + Policy int `json:"policy"` + VxLAN int `json:"vxlan"` +} + +func (p *Perf) Correct(obj *Perf) { + if p.Point == 0 && obj != nil { + p.Point = obj.Point + } + if p.Neighbor == 0 && obj != nil { + p.Neighbor = obj.Neighbor + } + if p.OnLine == 0 && obj != nil { + p.OnLine = obj.OnLine + } + if p.Link == 0 && obj != nil { + p.Link = obj.Link + } + if p.User == 0 && obj != nil { + p.User = obj.User + } + if p.Esp == 0 && obj != nil { + p.Esp = obj.Esp + } + if p.State == 0 && obj != nil { + p.State = obj.State + } + if p.Policy == 0 && obj != nil { + p.Policy = obj.Policy + } + if p.VxLAN == 0 && obj != nil { + p.VxLAN = obj.VxLAN + } +} + +type Switch struct { + File string `json:"file"` + Alias string `json:"alias"` + Perf Perf `json:"limit,omitempty" yaml:"limit"` + Protocol string `json:"protocol"` // tcp, tls, udp, kcp, ws and wss. + Listen string `json:"listen"` + Timeout int `json:"timeout"` + Http *Http `json:"http,omitempty"` + Log Log `json:"log"` + Cert *Cert `json:"cert,omitempty"` + Crypt *Crypt `json:"crypt,omitempty"` + Network []*Network `json:"network,omitempty" yaml:"networks"` + Acl []*ACL `json:"acl,omitempty" yaml:"acl,omitempty"` + FireWall []FlowRule `json:"firewall,omitempty" yaml:"firewall,omitempty"` + Inspect []string `json:"inspect,omitempty" yaml:"inspect,omitempty"` + Queue Queue `json:"queue" yaml:"queue"` + PassFile string `json:"password" yaml:"passwordFile"` + Ldap *LDAP `json:"ldap,omitempty" yaml:"ldap,omitempty"` + AddrPool string `json:"pool,omitempty"` + ConfDir string `json:"-" yaml:"-"` + TokenFile string `json:"-" yaml:"-"` +} + +func DefaultSwitch() *Switch { + obj := &Switch{ + Timeout: 120, + Log: Log{ + File: LogFile("openlan-switch.log"), + Verbose: libol.INFO, + }, + Http: &Http{ + Listen: "0.0.0.0:10000", + }, + Listen: "0.0.0.0:10002", + } + obj.Correct(nil) + return obj +} + +func NewSwitch() *Switch { + s := Manager.Switch + s.Flags() + s.Parse() + s.Initialize() + return s +} + +func (s *Switch) Flags() { + obj := DefaultSwitch() + flag.StringVar(&s.Log.File, "log:file", obj.Log.File, "Configure log file") + flag.StringVar(&s.ConfDir, "conf:dir", obj.ConfDir, "Configure switch's directory") + flag.IntVar(&s.Log.Verbose, "log:level", obj.Log.Verbose, "Configure log level") +} + +func (s *Switch) Parse() { + flag.Parse() +} + +func (s *Switch) Initialize() { + s.File = s.Dir("switch.json") + if err := s.Load(); err != nil { + libol.Error("Switch.Initialize %s", err) + } + s.Default() + libol.Debug("Switch.Initialize %v", s) +} + +func (s *Switch) Correct(obj *Switch) { + if s.Alias == "" { + s.Alias = GetAlias() + } + CorrectAddr(&s.Listen, 10002) + if s.Http != nil { + CorrectAddr(&s.Http.Listen, 10000) + } + libol.Debug("Proxy.Correct Http %v", s.Http) + s.TokenFile = filepath.Join(s.ConfDir, "token") + s.File = filepath.Join(s.ConfDir, "switch.json") + if s.Cert != nil { + s.Cert.Correct() + } + perf := &s.Perf + perf.Correct(DefaultPerf()) + if s.PassFile == "" { + s.PassFile = filepath.Join(s.ConfDir, "password") + } + if s.Protocol == "" { + s.Protocol = "tcp" + } + if s.AddrPool == "" { + s.AddrPool = "100.44" + } +} + +func (s *Switch) Dir(elem ...string) string { + args := append([]string{s.ConfDir}, elem...) + return filepath.Join(args...) +} + +func (s *Switch) LoadNetwork() { + files, err := filepath.Glob(s.Dir("network", "*.json")) + if err != nil { + libol.Error("Switch.LoadNetwork %s", err) + } + for _, k := range files { + obj := &Network{ + Alias: s.Alias, + File: k, + ConfDir: s.ConfDir, + } + if err := libol.UnmarshalLoad(obj, k); err != nil { + libol.Error("Switch.LoadNetwork %s", err) + continue + } + obj.LoadLink() + obj.LoadRoute() + switch obj.Provider { + case "esp": + obj.Specifies = &ESPSpecifies{} + case "vxlan": + obj.Specifies = &VxLANSpecifies{} + case "fabric": + obj.Specifies = &FabricSpecifies{} + } + if obj.Specifies != nil { + if err := libol.UnmarshalLoad(obj, k); err != nil { + libol.Error("Switch.LoadNetwork %s", err) + continue + } + } + s.Network = append(s.Network, obj) + } + for _, obj := range s.Network { + for _, link := range obj.Links { + link.Default() + } + obj.Correct() + obj.Alias = s.Alias + if obj.File == "" { + obj.File = s.Dir("network", obj.Name+".json") + } + } +} + +func (s *Switch) LoadAcl() { + files, err := filepath.Glob(s.Dir("acl", "*.json")) + if err != nil { + libol.Error("Switch.LoadAcl %s", err) + } + for _, k := range files { + obj := &ACL{ + File: k, + } + if err := libol.UnmarshalLoad(obj, k); err != nil { + libol.Error("Switch.LoadAcl %s", err) + continue + } + s.Acl = append(s.Acl, obj) + } + for _, obj := range s.Acl { + for _, rule := range obj.Rules { + rule.Correct() + } + if obj.File == "" { + obj.File = s.Dir("acl", obj.Name+".json") + } + } +} + +func (s *Switch) Default() { + obj := DefaultSwitch() + s.Correct(obj) + if s.Timeout == 0 { + s.Timeout = obj.Timeout + } + if s.Crypt != nil { + s.Crypt.Default() + } + queue := &s.Queue + queue.Default() + s.LoadAcl() + s.LoadNetwork() +} + +func (s *Switch) Load() error { + return libol.UnmarshalLoad(s, s.File) +} + +func (s *Switch) Save() { + tmp := *s + tmp.Acl = nil + tmp.Network = nil + if err := libol.MarshalSave(&tmp, tmp.File, true); err != nil { + libol.Error("Switch.Save %s", err) + } + s.SaveAcl() + s.SaveNets() +} + +func (s *Switch) SaveAcl() { + if s.Acl == nil { + return + } + for _, obj := range s.Acl { + if err := libol.MarshalSave(obj, obj.File, true); err != nil { + libol.Error("Switch.Save.Acl %s %s", obj.Name, err) + } + } +} + +func (s *Switch) SaveNets() { + if s.Network == nil { + return + } + for _, obj := range s.Network { + obj.Save() + } +} diff --git a/pkg/config/vxlan.go b/pkg/config/vxlan.go new file mode 100755 index 0000000..a920574 --- /dev/null +++ b/pkg/config/vxlan.go @@ -0,0 +1,10 @@ +package config + +type VxLANSpecifies struct { + Name string `json:"name"` + Vni uint32 `json:"vni"` + Fabric string `json:"fabric"` +} + +func (c *VxLANSpecifies) Correct() { +} diff --git a/pkg/database/client.go b/pkg/database/client.go new file mode 100755 index 0000000..6ad6a91 --- /dev/null +++ b/pkg/database/client.go @@ -0,0 +1,134 @@ +package database + +import ( + "context" + "github.com/go-logr/logr" + "github.com/go-logr/stdr" + "github.com/luscis/openlan/cmd/api" + "github.com/luscis/openlan/pkg/libol" + "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-org/libovsdb/client" + "github.com/ovn-org/libovsdb/model" + "github.com/ovn-org/libovsdb/ovsdb" + "log" + "os" +) + +type OvSDB struct { + client.Client + Ops []ovsdb.Operation +} + +func (o *OvSDB) Context() context.Context { + return context.Background() +} + +func (o *OvSDB) Switch() (*Switch, error) { + var listSw []Switch + if err := o.List(&listSw); err != nil { + return nil, err + } + if len(listSw) == 0 { + return nil, libol.NewErr("hasn't switch") + } + return &listSw[0], nil +} + +func (o *OvSDB) Execute(ops []ovsdb.Operation) { + o.Ops = append(o.Ops, ops...) +} + +func (o *OvSDB) Commit() ([]ovsdb.OperationResult, error) { + ops := o.Ops + o.Ops = nil + return o.Client.Transact(o.Context(), ops...) +} + +func (o *OvSDB) Get(m model.Model) error { + return o.Client.Get(o.Context(), m) +} + +func (o *OvSDB) Transact(ops ...ovsdb.Operation) ([]ovsdb.OperationResult, error) { + return o.Client.Transact(o.Context(), ops...) +} + +func (o *OvSDB) List(result interface{}) error { + return o.Client.List(o.Context(), result) +} + +func (o *OvSDB) WhereList(predict interface{}, result interface{}) error { + return o.Client.WhereCache(predict).List(o.Context(), result) +} + +var Client *OvSDB + +type DBClient struct { + Server string + Database string + Verbose bool + Client *OvSDB +} + +func (c *DBClient) Context() context.Context { + return context.Background() +} + +func (c *DBClient) NilLog() *logr.Logger { + // create a new logger to log to /dev/null + writer, err := libol.OpenWrite(os.DevNull) + if err != nil { + writer = os.Stderr + } + l := stdr.NewWithOptions(log.New(writer, "", log.LstdFlags), stdr.Options{LogCaller: stdr.All}) + return &l +} + +func (c *DBClient) Open(handler *cache.EventHandlerFuncs) error { + server := c.Server + database := c.Database + dbModel, err := model.NewClientDBModel(database, models) + if err != nil { + return err + } + ops := []client.Option{ + client.WithEndpoint(server), + } + if !c.Verbose { + ops = append(ops, client.WithLogger(c.NilLog())) + } + ovs, err := client.NewOVSDBClient(dbModel, ops...) + if err != nil { + return err + } + if err := ovs.Connect(c.Context()); err != nil { + return err + } + Client = &OvSDB{Client: ovs} + c.Client = Client + if handler != nil { + processor := ovs.Cache() + if processor == nil { + return libol.NewErr("can't get cache.") + } + processor.AddEventHandler(handler) + } + if _, err := ovs.MonitorAll(c.Context()); err != nil { + return err + } + return nil +} + +var Conf *DBClient + +func NewDBClient(handler *cache.EventHandlerFuncs) (*DBClient, error) { + var err error + if Conf == nil { + Conf = &DBClient{ + Server: api.Server, + Database: api.Database, + Verbose: api.Verbose, + } + err = Conf.Open(handler) + } + return Conf, err +} diff --git a/pkg/database/models.go b/pkg/database/models.go new file mode 100755 index 0000000..fc0eaca --- /dev/null +++ b/pkg/database/models.go @@ -0,0 +1,14 @@ +package database + +import ( + "github.com/ovn-org/libovsdb/model" +) + +var models = map[string]model.Model{ + "Global_Switch": &Switch{}, + "Virtual_Network": &VirtualNetwork{}, + "Virtual_Link": &VirtualLink{}, + "Open_VPN": &OpenVPN{}, + "Name_Cache": &NameCache{}, + "Prefix_Route": &PrefixRoute{}, +} diff --git a/pkg/database/schema.go b/pkg/database/schema.go new file mode 100755 index 0000000..33cab91 --- /dev/null +++ b/pkg/database/schema.go @@ -0,0 +1,55 @@ +package database + +type Switch struct { + UUID string `ovsdb:"_uuid"` + Protocol string `ovsdb:"protocol"` + Listen int `ovsdb:"listen"` + OtherConfig map[string]string `ovsdb:"other_config" yaml:"other_config"` + VirtualNetworks []string `ovsdb:"virtual_networks" yaml:"virtual_networks"` +} + +type VirtualNetwork struct { + UUID string `ovsdb:"_uuid"` + Name string `ovsdb:"name"` + Provider string `ovsdb:"provider"` + Bridge string `ovsdb:"bridge"` + Address string `ovsdb:"address"` + OtherConfig map[string]string `ovsdb:"other_config" yaml:"other_config"` + RemoteLinks []string `ovsdb:"remote_links" yaml:"remote_links"` + LocalLinks []string `ovsdb:"local_links" yaml:"local_links"` + OpenVPN *string `ovsdb:"open_vpn" yaml:"open_vpn"` + PrefixRoutes []string `ovsdb:"prefix_routes" yaml:"prefix_routes"` +} + +type VirtualLink struct { + UUID string `ovsdb:"_uuid"` + Network string `ovsdb:"network"` + Connection string `ovsdb:"connection"` + Device string `ovsdb:"device"` + OtherConfig map[string]string `ovsdb:"other_config" yaml:"other_config"` + Authentication map[string]string `ovsdb:"authentication" yaml:"authentication"` + LinkState string `ovsdb:"link_state" yaml:"link_state"` + Status map[string]string `ovsdb:"status" yaml:"status"` +} + +type OpenVPN struct { + UUID string `ovsdb:"_uuid"` + Protocol string `ovsdb:"protocol"` + Listen int `ovsdb:"listen"` +} + +type NameCache struct { + UUID string `ovsdb:"_uuid"` + Name string `ovsdb:"name"` + Address string `ovsdb:"address"` + UpdateAt string `ovsdb:"update_at" yaml:"update_at"` +} + +type PrefixRoute struct { + UUID string `ovsdb:"_uuid"` + Network string `ovsdb:"network"` + Prefix string `ovsdb:"prefix"` + Source string `ovsdb:"source"` + Gateway string `ovsdb:"gateway"` + Mode string `ovsdb:"mode"` +} diff --git a/pkg/database/utils.go b/pkg/database/utils.go new file mode 100755 index 0000000..4addff9 --- /dev/null +++ b/pkg/database/utils.go @@ -0,0 +1,19 @@ +package database + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/ovn-org/libovsdb/ovsdb" +) + +func PrintError(result []ovsdb.OperationResult) { + for _, ret := range result { + if len(ret.Error) == 0 { + continue + } + libol.Info("%s", ret.Details) + } +} + +func GenUUID() string { + return libol.GenRandom(32) +} diff --git a/pkg/libol/daemon_linux.go b/pkg/libol/daemon_linux.go new file mode 100755 index 0000000..75bf851 --- /dev/null +++ b/pkg/libol/daemon_linux.go @@ -0,0 +1,10 @@ +package libol + +import "github.com/coreos/go-systemd/v22/daemon" + +func PreNotify() { +} + +func SdNotify() { + go daemon.SdNotify(false, daemon.SdNotifyReady) +} diff --git a/pkg/libol/daemon_others.go b/pkg/libol/daemon_others.go new file mode 100755 index 0000000..092f4b9 --- /dev/null +++ b/pkg/libol/daemon_others.go @@ -0,0 +1,9 @@ +// +build !linux + +package libol + +func PreNotify() { +} + +func SdNotify() { +} diff --git a/pkg/libol/error.go b/pkg/libol/error.go new file mode 100755 index 0000000..862f59d --- /dev/null +++ b/pkg/libol/error.go @@ -0,0 +1,7 @@ +package libol + +import "fmt" + +func NewErr(message string, v ...interface{}) error { + return fmt.Errorf(message, v...) +} diff --git a/pkg/libol/go.go b/pkg/libol/go.go new file mode 100755 index 0000000..10808b1 --- /dev/null +++ b/pkg/libol/go.go @@ -0,0 +1,62 @@ +package libol + +import ( + "net/http" + _ "net/http/pprof" + "sync" +) + +type gos struct { + lock sync.Mutex + total uint64 +} + +var Gos = gos{} + +func (t *gos) Add(call interface{}) { + t.lock.Lock() + defer t.lock.Unlock() + t.total++ + Debug("gos.Add %d %p", t.total, call) +} + +func (t *gos) Del(call interface{}) { + t.lock.Lock() + defer t.lock.Unlock() + t.total-- + Debug("gos.Del %d %p", t.total, call) +} + +func Go(call func()) { + name := FunName(call) + go func() { + defer Catch("Go.func") + Gos.Add(call) + Debug("Go.Add: %s", name) + call() + Debug("Go.Del: %s", name) + Gos.Del(call) + }() +} + +type PProf struct { + File string + Listen string + Error error +} + +func (p *PProf) Start() { + if p.Listen == "" { + p.Listen = "localhost:6060" + } + Go(func() { + Info("PProf.Start %s", p.Listen) + if err := http.ListenAndServe(p.Listen, nil); err != nil { + Error("PProf.Start %s", err) + p.Error = err + } + }) +} + +func (p *PProf) Stop() { +} diff --git a/pkg/libol/http.go b/pkg/libol/http.go new file mode 100755 index 0000000..5c24d82 --- /dev/null +++ b/pkg/libol/http.go @@ -0,0 +1,44 @@ +package libol + +import ( + "crypto/tls" + "io" + "net/http" +) + +type HttpClient struct { + Method string + Url string + Payload io.Reader + Auth Auth + TlsConfig *tls.Config + Client *http.Client +} + +func (cl *HttpClient) Do() (*http.Response, error) { + if cl.Method == "" { + cl.Method = "GET" + } + if cl.TlsConfig == nil { + cl.TlsConfig = &tls.Config{InsecureSkipVerify: true} + } + req, err := http.NewRequest(cl.Method, cl.Url, cl.Payload) + if err != nil { + return nil, err + } + if cl.Auth.Type == "basic" { + req.Header.Set("Authorization", BasicAuth(cl.Auth.Username, cl.Auth.Password)) + } + cl.Client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: cl.TlsConfig, + }, + } + return cl.Client.Do(req) +} + +func (cl *HttpClient) Close() { + if cl.Client != nil { + cl.Client.CloseIdleConnections() + } +} diff --git a/pkg/libol/iputils.go b/pkg/libol/iputils.go new file mode 100755 index 0000000..6cd4ed2 --- /dev/null +++ b/pkg/libol/iputils.go @@ -0,0 +1,203 @@ +package libol + +import ( + "os/exec" + "runtime" + "strings" +) + +func IpLinkUp(name string) ([]byte, error) { + switch runtime.GOOS { + case "linux": + args := []string{ + "link", "set", "dev", name, "up", + } + return exec.Command("ip", args...).CombinedOutput() + case "windows": + args := []string{ + "interface", "set", "interface", + "name=" + name, "admin=ENABLED", + } + return exec.Command("netsh", args...).CombinedOutput() + default: + return nil, NewErr("IpLinkUp %s notSupport", runtime.GOOS) + } +} + +func IpLinkDown(name string) ([]byte, error) { + switch runtime.GOOS { + case "linux": + args := []string{ + "link", "set", "dev", name, "down", + } + return exec.Command("ip", args...).CombinedOutput() + case "windows": + args := []string{ + "interface", "set", "interface", + "name=" + name, "admin=DISABLED", + } + return exec.Command("netsh", args...).CombinedOutput() + default: + return nil, NewErr("IpLinkDown %s notSupport", runtime.GOOS) + } +} + +func IpAddrAdd(name, addr string, opts ...string) ([]byte, error) { + switch runtime.GOOS { + case "linux": + args := append([]string{ + "address", "add", addr, "dev", name, + }, opts...) + return exec.Command("ip", args...).CombinedOutput() + case "windows": + args := append([]string{ + "interface", "ipv4", "add", "address", + "name=" + name, "address=" + addr, "store=active", + }, opts...) + return exec.Command("netsh", args...).CombinedOutput() + case "darwin": + args := append([]string{ + name, addr, + }, opts...) + return exec.Command("ifconfig", args...).CombinedOutput() + default: + return nil, NewErr("IpAddrAdd %s notSupport", runtime.GOOS) + } +} + +func IpAddrDel(name, addr string) ([]byte, error) { + switch runtime.GOOS { + case "linux": + args := []string{ + "address", "del", addr, "dev", name, + } + return exec.Command("ip", args...).CombinedOutput() + case "windows": + ipAddr := strings.SplitN(addr, "/", 1)[0] + args := []string{ + "interface", "ipv4", "delete", "address", + "name=" + name, "address=" + ipAddr, "store=active", + } + return exec.Command("netsh", args...).CombinedOutput() + case "darwin": + args := []string{ + name, addr, "delete", + } + return exec.Command("ifconfig", args...).CombinedOutput() + default: + return nil, NewErr("IpAddrDel %s notSupport", runtime.GOOS) + } +} + +func IpAddrShow(name string) []string { + switch runtime.GOOS { + case "windows": + addrs := make([]string, 0, 4) + args := []string{ + "interface", "ipv4", "show", "ipaddress", + "interface=" + name, "level=verbose", + } + out, err := exec.Command("netsh", args...).Output() + if err != nil { + return nil + } + outArr := strings.Split(string(out), "\n") + for _, addrStr := range outArr { + addrArr := strings.SplitN(addrStr, " ", 3) + if len(addrArr) != 3 { + continue + } + if addrArr[0] == "Remote" && strings.Contains(addrArr[2], "Parameters") { + addrs = append(addrs, addrArr[1]) + } + } + return addrs + default: + return nil + } +} + +func IpRouteAdd(name, prefix, nexthop string, opts ...string) ([]byte, error) { + switch runtime.GOOS { + case "linux": + args := []string{ + "route", "address", prefix, "via", nexthop, + } + return exec.Command("ip", args...).CombinedOutput() + case "windows": + args := []string{ + "interface", "ipv4", "add", "route", + "prefix=" + prefix, "interface=" + name, "nexthop=" + nexthop, + "store=active", + } + return exec.Command("netsh", args...).CombinedOutput() + case "darwin": + args := append([]string{ + "add", "-net", prefix}) + if name != "" { + args = append(args, "-iface", name) + } + if nexthop != "" { + args = append(args, nexthop) + } + args = append(args, opts...) + return exec.Command("route", args...).CombinedOutput() + default: + return nil, NewErr("IpRouteAdd %s notSupport", runtime.GOOS) + } +} + +func IpRouteDel(name, prefix, nexthop string, opts ...string) ([]byte, error) { + switch runtime.GOOS { + case "linux": + args := []string{ + "route", "del", prefix, "via", nexthop, + } + return exec.Command("ip", args...).CombinedOutput() + case "windows": + args := []string{ + "interface", "ipv4", "delete", "route", + "prefix=" + prefix, "interface=" + name, "nexthop=" + nexthop, + "store=active", + } + return exec.Command("netsh", args...).CombinedOutput() + case "darwin": + args := append([]string{ + "delete", "-net", prefix}) + if name != "" { + args = append(args, "-iface", name) + } + if nexthop != "" { + args = append(args, nexthop) + } + args = append(args, opts...) + return exec.Command("route", args...).CombinedOutput() + default: + return nil, NewErr("IpRouteDel %s notSupport", runtime.GOOS) + } +} + +func IpRouteShow(name string) []string { + switch runtime.GOOS { + default: + return nil + } +} + +func IpMetricSet(name, metric string, opts ...string) ([]byte, error) { + switch runtime.GOOS { + case "linux": + return nil, nil + case "windows": + args := append([]string{ + "interface", "ipv4", "set", "interface", + "interface=" + name, "metric=" + metric, + }, opts...) + return exec.Command("netsh", args...).CombinedOutput() + case "darwin": + //TODO + return nil, nil + default: + return nil, NewErr("IpAddrAdd %s notSupport", runtime.GOOS) + } +} diff --git a/pkg/libol/kcpsocket.go b/pkg/libol/kcpsocket.go new file mode 100755 index 0000000..9844cf1 --- /dev/null +++ b/pkg/libol/kcpsocket.go @@ -0,0 +1,208 @@ +package libol + +import ( + "github.com/xtaci/kcp-go/v5" + "net" + "time" +) + +type KcpConfig struct { + Block kcp.BlockCrypt + WinSize int // default 1024 + DataShards int // default 10 + ParityShards int // default 3 + Timeout time.Duration // ns + RdQus int // per frames + WrQus int // per frames +} + +var defaultKcpConfig = KcpConfig{ + Block: nil, + WinSize: 1024, + DataShards: 10, + ParityShards: 3, + Timeout: 120 * time.Second, +} + +func NewKcpConfig() *KcpConfig { + return &defaultKcpConfig +} + +type KcpServer struct { + *SocketServerImpl + kcpCfg *KcpConfig + listener *kcp.Listener +} + +func setConn(conn *kcp.UDPSession, cfg *KcpConfig) { + Info("setConn %s", conn.RemoteAddr()) + conn.SetStreamMode(true) + conn.SetWriteDelay(false) + Info("setConn %s to fast3", conn.RemoteAddr()) + // normal: 0, 40, 2, 1 + // fast : 0, 30, 2, 1 + // fast3 : 1, 10, 2, 1 + conn.SetNoDelay(1, 10, 2, 1) + conn.SetWindowSize(cfg.WinSize, cfg.WinSize) + conn.SetACKNoDelay(true) +} + +func NewKcpServer(listen string, cfg *KcpConfig) *KcpServer { + if cfg == nil { + cfg = &defaultKcpConfig + } + k := &KcpServer{ + kcpCfg: cfg, + SocketServerImpl: NewSocketServer(listen), + } + k.close = k.Close + return k +} + +func (k *KcpServer) Listen() (err error) { + k.listener, err = kcp.ListenWithOptions( + k.address, + k.kcpCfg.Block, + k.kcpCfg.DataShards, + k.kcpCfg.ParityShards) + if err != nil { + k.listener = nil + return err + } + if err := k.listener.SetDSCP(46); err != nil { + Warn("KcpServer.SetDSCP %s", err) + } + Info("KcpServer.Listen: kcp://%s", k.address) + return nil +} + +func (k *KcpServer) Close() { + if k.listener != nil { + _ = k.listener.Close() + Info("KcpServer.Close: %s", k.address) + k.listener = nil + } +} + +func (k *KcpServer) Accept() { + Debug("KcpServer.Accept") + promise := Promise{ + First: 2 * time.Second, + MinInt: 5 * time.Second, + MaxInt: 30 * time.Second, + } + promise.Done(func() error { + if err := k.Listen(); err != nil { + Warn("KcpServer.Accept: %s", err) + return err + } + return nil + }) + defer k.Close() + for { + if k.listener == nil { + return + } + conn, err := k.listener.AcceptKCP() + if k.preAccept(conn, err) != nil { + continue + } + setConn(conn, k.kcpCfg) + k.onClients <- NewKcpClientFromConn(conn, k.kcpCfg) + } +} + +// Client Implement + +type KcpClient struct { + *SocketClientImpl + kcpCfg *KcpConfig +} + +func NewKcpClient(addr string, cfg *KcpConfig) *KcpClient { + if cfg == nil { + cfg = &defaultKcpConfig + } + c := &KcpClient{ + kcpCfg: cfg, + SocketClientImpl: NewSocketClient(addr, &StreamMessagerImpl{ + timeout: cfg.Timeout, + bufSize: cfg.RdQus * MaxFrame, + }), + } + return c +} + +func NewKcpClientFromConn(conn net.Conn, cfg *KcpConfig) *KcpClient { + if cfg == nil { + cfg = &defaultKcpConfig + } + addr := conn.RemoteAddr().String() + c := &KcpClient{ + SocketClientImpl: NewSocketClient(addr, &StreamMessagerImpl{ + timeout: cfg.Timeout, + bufSize: cfg.RdQus * MaxFrame, + }), + } + c.updateConn(conn) + return c +} + +func (c *KcpClient) Connect() error { + if !c.Retry() { + return nil + } + c.out.Info("KcpClient.Connect: kcp://%s", c.address) + conn, err := kcp.DialWithOptions( + c.address, + c.kcpCfg.Block, + c.kcpCfg.DataShards, + c.kcpCfg.DataShards) + if err != nil { + return err + } + if err := conn.SetDSCP(46); err != nil { + c.out.Warn("KcpClient.SetDSCP: ", err) + } + setConn(conn, c.kcpCfg) + c.SetConnection(conn) + if c.listener.OnConnected != nil { + _ = c.listener.OnConnected(c) + } + return nil +} + +func (c *KcpClient) Close() { + c.out.Debug("KcpClient.Close: %v", c.IsOk()) + c.lock.Lock() + if c.connection != nil { + if c.status != ClTerminal { + c.status = ClClosed + } + c.out.Debug("KcpClient.Close") + c.updateConn(nil) + c.private = nil + c.lock.Unlock() + if c.listener.OnClose != nil { + _ = c.listener.OnClose(c) + } + } else { + c.lock.Unlock() + } +} + +func (c *KcpClient) Terminal() { + c.SetStatus(ClTerminal) + c.Close() +} + +func (c *KcpClient) SetStatus(v SocketStatus) { + c.lock.Lock() + defer c.lock.Unlock() + if c.status != v { + if c.listener.OnStatus != nil { + c.listener.OnStatus(c, c.status, v) + } + c.status = v + } +} diff --git a/pkg/libol/ldap.go b/pkg/libol/ldap.go new file mode 100755 index 0000000..f7af163 --- /dev/null +++ b/pkg/libol/ldap.go @@ -0,0 +1,70 @@ +package libol + +import ( + "crypto/tls" + "fmt" + "github.com/go-ldap/ldap" +) + +type LDAPConfig struct { + Server string + BindDN string + Password string + BaseDN string + Attr string + Filter string + EnableTls bool + Timeout int64 +} + +type LDAPService struct { + Conn *ldap.Conn + Cfg LDAPConfig +} + +func NewLDAPService(cfg LDAPConfig) (*LDAPService, error) { + conn, err := ldap.Dial("tcp", cfg.Server) + if err != nil { + return nil, err + } + if cfg.EnableTls { + err = conn.StartTLS(&tls.Config{InsecureSkipVerify: true}) + if err != nil { + return nil, err + } + } + if err = conn.Bind(cfg.BindDN, cfg.Password); err != nil { + return nil, err + } + if cfg.Timeout == 0 { + cfg.Timeout = 8 * 3600 + } + return &LDAPService{Conn: conn, Cfg: cfg}, nil +} + +func (l *LDAPService) Login(userName, password string) (bool, error) { + request := ldap.NewSearchRequest( + l.Cfg.BaseDN, + ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, + 0, 0, false, + fmt.Sprintf(l.Cfg.Filter, userName), + []string{l.Cfg.Attr}, + nil, + ) + Debug("LDAPService.Login %v", request) + result, err := l.Conn.Search(request) + if err != nil { + return false, err + } + if len(result.Entries) != 1 { + return false, fmt.Errorf("invalid users") + } + userDN := result.Entries[0].DN + if err = l.Conn.Bind(userDN, password); err != nil { + return false, err + } + if err = l.Conn.Bind(l.Cfg.BindDN, l.Cfg.Password); err != nil { + return false, nil + } + return true, nil +} diff --git a/pkg/libol/logger.go b/pkg/libol/logger.go new file mode 100755 index 0000000..31c7992 --- /dev/null +++ b/pkg/libol/logger.go @@ -0,0 +1,241 @@ +package libol + +import ( + "container/list" + "fmt" + "log" + "runtime/debug" + "sync" + "time" +) + +const ( + PRINT = 00 + LOG = 05 + STACK = 06 + DEBUG = 10 + FLOW = 11 + CMD = 15 + EVENT = 16 + INFO = 20 + WARN = 30 + ERROR = 40 + FATAL = 99 +) + +type Message struct { + Level string `json:"level"` + Date string `json:"date"` + Message string `json:"message"` + Module string `json:"module"` +} + +var levels = map[int]string{ + PRINT: "PRINT", + LOG: "LOG", + DEBUG: "DEBUG", + STACK: "STACK", + FLOW: "FLOW", + CMD: "CMD", + EVENT: "EVENT", + INFO: "INFO", + WARN: "WARN", + ERROR: "ERROR", + FATAL: "FATAL", +} + +type logger struct { + Level int + FileName string + FileLog *log.Logger + Lock sync.Mutex + Errors *list.List +} + +func (l *logger) Write(level int, format string, v ...interface{}) { + str, ok := levels[level] + if !ok { + str = "NULL" + } + if level >= l.Level { + log.Printf(fmt.Sprintf("%s|%s", str, format), v...) + } + if level >= INFO { + l.Save(str, format, v...) + } +} + +func (l *logger) Save(level string, format string, v ...interface{}) { + m := fmt.Sprintf(format, v...) + if l.FileLog != nil { + l.FileLog.Println(level + "|" + m) + } + l.Lock.Lock() + defer l.Lock.Unlock() + if l.Errors.Len() >= 1024 { + if e := l.Errors.Back(); e != nil { + l.Errors.Remove(e) + } + } + yy, mm, dd := time.Now().Date() + hh, mn, se := time.Now().Clock() + ele := &Message{ + Level: level, + Date: fmt.Sprintf("%d/%02d/%02d %02d:%02d:%02d", yy, mm, dd, hh, mn, se), + Message: m, + } + l.Errors.PushBack(ele) +} + +func (l *logger) List() <-chan *Message { + c := make(chan *Message, 128) + go func() { + l.Lock.Lock() + defer l.Lock.Unlock() + for ele := l.Errors.Back(); ele != nil; ele = ele.Prev() { + c <- ele.Value.(*Message) + } + c <- nil // Finish channel by nil. + }() + return c +} + +var Logger = &logger{ + Level: INFO, + FileName: ".log.error", + Errors: list.New(), +} + +func SetLogger(file string, level int) { + Logger.Level = level + if file == "" || Logger.FileName == file { + return + } + Logger.FileName = file + fp, err := OpenWrite(file) + if err == nil { + Logger.FileLog = log.New(fp, "", log.LstdFlags) + } else { + Warn("Logger.Init: %s", err) + } +} + +type SubLogger struct { + *logger + Prefix string +} + +func NewSubLogger(prefix string) *SubLogger { + return &SubLogger{ + logger: Logger, + Prefix: prefix, + } +} + +var rLogger = NewSubLogger("root") + +func HasLog(level int) bool { + return rLogger.Has(level) +} + +func Catch(name string) { + if err := recover(); err != nil { + Fatal("%s|PANIC >>> %s <<<", name, err) + Fatal("%s|STACK >>> %s <<<", name, debug.Stack()) + } +} + +func Print(format string, v ...interface{}) { + rLogger.Print(format, v...) +} + +func Log(format string, v ...interface{}) { + rLogger.Log(format, v...) +} + +func Stack(format string, v ...interface{}) { + rLogger.Stack(format, v...) +} + +func Debug(format string, v ...interface{}) { + rLogger.Debug(format, v...) +} + +func Cmd(format string, v ...interface{}) { + rLogger.Cmd(format, v...) +} + +func Info(format string, v ...interface{}) { + rLogger.Info(format, v...) +} + +func Warn(format string, v ...interface{}) { + rLogger.Warn(format, v...) +} + +func Error(format string, v ...interface{}) { + rLogger.Error(format, v...) +} + +func Fatal(format string, v ...interface{}) { + rLogger.Fatal(format, v...) +} + +func (s *SubLogger) Has(level int) bool { + if level >= s.Level { + return true + } + return false +} + +func (s *SubLogger) Fmt(format string) string { + return s.Prefix + "|" + format +} + +func (s *SubLogger) Print(format string, v ...interface{}) { + s.logger.Write(PRINT, s.Fmt(format), v...) +} + +func (s *SubLogger) Log(format string, v ...interface{}) { + s.logger.Write(LOG, s.Fmt(format), v...) +} + +func (s *SubLogger) Stack(format string, v ...interface{}) { + s.logger.Write(STACK, s.Fmt(format), v...) +} + +func (s *SubLogger) Debug(format string, v ...interface{}) { + s.logger.Write(DEBUG, s.Fmt(format), v...) +} + +func (s *SubLogger) Flow(format string, v ...interface{}) { + s.logger.Write(FLOW, s.Fmt(format), v...) +} + +func (s *SubLogger) Cmd(format string, v ...interface{}) { + s.logger.Write(CMD, s.Fmt(format), v...) +} + +func (s *SubLogger) Event(format string, v ...interface{}) { + s.logger.Write(EVENT, s.Fmt(format), v...) +} + +func (s *SubLogger) Info(format string, v ...interface{}) { + s.logger.Write(INFO, s.Fmt(format), v...) +} + +func (s *SubLogger) Warn(format string, v ...interface{}) { + s.logger.Write(WARN, s.Fmt(format), v...) +} + +func (s *SubLogger) Error(format string, v ...interface{}) { + s.logger.Write(ERROR, s.Fmt(format), v...) +} + +func (s *SubLogger) Fatal(format string, v ...interface{}) { + s.logger.Write(FATAL, s.Fmt(format), v...) +} + +func init() { + log.SetFlags(0) +} diff --git a/pkg/libol/message.go b/pkg/libol/message.go new file mode 100755 index 0000000..220bed9 --- /dev/null +++ b/pkg/libol/message.go @@ -0,0 +1,481 @@ +package libol + +import ( + "bytes" + "encoding/binary" + "fmt" + "github.com/xtaci/kcp-go/v5" + "net" + "time" +) + +const ( + MaxFrame = 1600 + MaxBuf = 4096 + HlMI = 0x02 + HlLI = 0x04 + HlSize = 0x04 + EthDI = 0x06 + MaxMsg = 1600 * 8 +) + +var MAGIC = []byte{0xff, 0xff} + +const ( + LoginReq = "logi= " + LoginResp = "logi: " + NeighborReq = "neig= " + NeighborResp = "neig: " + IpAddrReq = "ipad= " + IpAddrResp = "ipad: " + LeftReq = "left= " + SignReq = "sign= " + PingReq = "ping= " + PongResp = "pong: " +) + +func isControl(data []byte) bool { + if len(data) < 6 { + return false + } + if bytes.Equal(data[:EthDI], EthZero[:EthDI]) { + return true + } + return false +} + +type FrameProto struct { + // public + Eth *Ether + Vlan *Vlan + Arp *Arp + Ip4 *Ipv4 + Udp *Udp + Tcp *Tcp + Err error + Frame []byte +} + +func (i *FrameProto) Decode() error { + data := i.Frame + if i.Eth, i.Err = NewEtherFromFrame(data); i.Err != nil { + return i.Err + } + data = data[i.Eth.Len:] + if i.Eth.IsVlan() { + if i.Vlan, i.Err = NewVlanFromFrame(data); i.Err != nil { + return i.Err + } + data = data[i.Vlan.Len:] + } + switch i.Eth.Type { + case EthIp4: + if i.Ip4, i.Err = NewIpv4FromFrame(data); i.Err != nil { + return i.Err + } + data = data[i.Ip4.Len:] + switch i.Ip4.Protocol { + case IpTcp: + if i.Tcp, i.Err = NewTcpFromFrame(data); i.Err != nil { + return i.Err + } + case IpUdp: + if i.Udp, i.Err = NewUdpFromFrame(data); i.Err != nil { + return i.Err + } + } + case EthArp: + if i.Arp, i.Err = NewArpFromFrame(data); i.Err != nil { + return i.Err + } + } + return nil +} + +type FrameMessage struct { + seq uint64 + control bool + action string + params []byte + buffer []byte + size int + total int + frame []byte + proto *FrameProto +} + +func NewFrameMessage(maxSize int) *FrameMessage { + if maxSize <= 0 { + maxSize = MaxBuf + } + maxSize += HlSize + EthDI + if HasLog(DEBUG) { + Debug("NewFrameMessage: size %d", maxSize) + } + m := FrameMessage{ + params: make([]byte, 0, 2), + buffer: make([]byte, maxSize), + } + m.frame = m.buffer[HlSize:] + m.total = len(m.frame) + return &m +} + +func NewFrameMessageFromBytes(buffer []byte) *FrameMessage { + m := FrameMessage{ + params: make([]byte, 0, 2), + buffer: buffer, + } + m.frame = m.buffer[HlSize:] + m.total = len(m.frame) + m.size = len(m.frame) + return &m +} + +func (m *FrameMessage) Decode() bool { + m.control = isControl(m.frame) + if m.control { + if len(m.frame) < 2*EthDI { + Warn("FrameMessage.Decode: too small message") + } else { + m.action = string(m.frame[EthDI : 2*EthDI]) + m.params = m.frame[2*EthDI:] + } + } + return m.control +} + +func (m *FrameMessage) IsEthernet() bool { + return !m.control +} + +func (m *FrameMessage) IsControl() bool { + return m.control +} + +func (m *FrameMessage) Frame() []byte { + return m.frame +} + +func (m *FrameMessage) String() string { + return fmt.Sprintf("control: %t, frame: %x", m.control, m.frame[:20]) +} + +func (m *FrameMessage) Action() string { + return m.action +} + +func (m *FrameMessage) CmdAndParams() (string, []byte) { + return m.action, m.params +} + +func (m *FrameMessage) Append(data []byte) { + add := len(data) + if m.total-m.size >= add { + copy(m.frame[m.size:], data) + m.size += add + } else { + Warn("FrameMessage.Append: %d not enough buffer", m.total) + } +} + +func (m *FrameMessage) Size() int { + return m.size +} + +func (m *FrameMessage) SetSize(v int) { + m.size = v +} + +func (m *FrameMessage) Proto() (*FrameProto, error) { + if m.proto == nil { + m.proto = &FrameProto{Frame: m.frame} + _ = m.proto.Decode() + } + return m.proto, m.proto.Err +} + +type ControlMessage struct { + seq uint64 + control bool + operator string + action string + params []byte +} + +func NewControlFrame(action string, body []byte) *FrameMessage { + m := NewControlMessage(action[:4], action[4:], body) + return m.Encode() +} + +//operator: request is '= ', and response is ': ' +//action: login, network etc. +//body: json string. +func NewControlMessage(action, opr string, body []byte) *ControlMessage { + c := ControlMessage{ + control: true, + action: action, + params: body, + operator: opr, + } + return &c +} + +func (c *ControlMessage) Encode() *FrameMessage { + p := fmt.Sprintf("%s%s%s", c.action[:4], c.operator[:2], c.params) + frame := NewFrameMessage(len(p)) + frame.control = c.control + frame.action = c.action + c.operator + frame.params = c.params + frame.Append(EthZero[:6]) + frame.Append([]byte(p)) + return frame +} + +type Messager interface { + Send(conn net.Conn, frame *FrameMessage) (int, error) + Receive(conn net.Conn, max, min int) (*FrameMessage, error) + Flush() +} + +type StreamMessagerImpl struct { + timeout time.Duration // ns for read and write deadline. + block kcp.BlockCrypt + buffer []byte + bufSize int // default is (1518 + 20+20+14) * 8 +} + +func (s *StreamMessagerImpl) Flush() { + s.buffer = nil +} + +func (s *StreamMessagerImpl) write(conn net.Conn, tmp []byte) (int, error) { + if s.timeout != 0 { + err := conn.SetWriteDeadline(time.Now().Add(s.timeout)) + if err != nil { + return 0, err + } + } + n, err := conn.Write(tmp) + if err != nil { + return 0, err + } + return n, nil +} + +func (s *StreamMessagerImpl) writeX(conn net.Conn, buf []byte) error { + if conn == nil { + return NewErr("connection is nil") + } + offset := 0 + size := len(buf) + left := size - offset + if HasLog(LOG) { + Log("StreamMessagerImpl.writeX: %s %d", conn.RemoteAddr(), size) + Log("StreamMessagerImpl.writeX: %s Data %x", conn.RemoteAddr(), buf) + } + for left > 0 { + tmp := buf[offset:] + if HasLog(LOG) { + Log("StreamMessagerImpl.writeX: tmp %s %d", conn.RemoteAddr(), len(tmp)) + } + n, err := s.write(conn, tmp) + if err != nil { + return err + } + if HasLog(LOG) { + Log("StreamMessagerImpl.writeX: %s snd %d, size %d", conn.RemoteAddr(), n, size) + } + offset += n + left = size - offset + } + return nil +} + +func (s *StreamMessagerImpl) encode(frame *FrameMessage) { + frame.buffer[0] = MAGIC[0] + frame.buffer[1] = MAGIC[1] + binary.BigEndian.PutUint16(frame.buffer[HlMI:HlLI], uint16(frame.size)) + if s.block != nil { + s.block.Encrypt(frame.frame, frame.frame) + } +} + +func (s *StreamMessagerImpl) Send(conn net.Conn, frame *FrameMessage) (int, error) { + s.encode(frame) + fs := frame.size + HlSize + if err := s.writeX(conn, frame.buffer[:fs]); err != nil { + return 0, err + } + return fs, nil +} + +func (s *StreamMessagerImpl) read(conn net.Conn, tmp []byte) (int, error) { + if s.timeout != 0 { + err := conn.SetReadDeadline(time.Now().Add(s.timeout)) + if err != nil { + return 0, err + } + } + n, err := conn.Read(tmp) + if err != nil { + return 0, err + } + return n, nil +} + +//340Mib +func (s *StreamMessagerImpl) readX(conn net.Conn, buf []byte) error { + if conn == nil { + return NewErr("connection is nil") + } + offset := 0 + left := len(buf) + if HasLog(LOG) { + Log("StreamMessagerImpl.readX: %s %d", conn.RemoteAddr(), len(buf)) + } + for left > 0 { + tmp := make([]byte, left) + n, err := s.read(conn, tmp) + if err != nil { + return err + } + copy(buf[offset:], tmp) + offset += n + left -= n + } + if HasLog(LOG) { + Log("StreamMessagerImpl.readX: Data %s %x", conn.RemoteAddr(), buf) + } + return nil +} + +func (s *StreamMessagerImpl) decode(tmp []byte, min int) (*FrameMessage, error) { + ts := len(tmp) + if ts < min { + return nil, nil + } + if !bytes.Equal(tmp[:HlMI], MAGIC[:HlMI]) { + return nil, NewErr("wrong magic") + } + ps := binary.BigEndian.Uint16(tmp[HlMI:HlLI]) + fs := int(ps) + HlSize + if ts >= fs { + s.buffer = tmp[fs:] + if s.block != nil { + s.block.Decrypt(tmp[HlSize:fs], tmp[HlSize:fs]) + } + if HasLog(DEBUG) { + Debug("StreamMessagerImpl.decode: %d %x", fs, tmp[:fs]) + } + return NewFrameMessageFromBytes(tmp[:fs]), nil + } + return nil, nil +} + +// 430Mib +func (s *StreamMessagerImpl) Receive(conn net.Conn, max, min int) (*FrameMessage, error) { + frame, err := s.decode(s.buffer, min) + if err != nil { + return nil, err + } + if frame != nil { // firstly, check buffer has messages. + return frame, nil + } + if s.bufSize == 0 { + s.bufSize = MaxMsg // 1572 * 8 + } + bs := len(s.buffer) + tmp := make([]byte, s.bufSize) + if bs > 0 { + copy(tmp[:bs], s.buffer[:bs]) + } + for { // loop forever until socket error or find one message. + rn, err := s.read(conn, tmp[bs:]) + if err != nil { + return nil, err + } + rs := bs + rn + frame, err := s.decode(tmp[:rs], min) + if err != nil { + return nil, err + } + if frame != nil { + return frame, nil + } + // If notFound message, continue to read. + bs = rs + } +} + +type PacketMessagerImpl struct { + timeout time.Duration // ns for read and write deadline + block kcp.BlockCrypt + bufSize int // default is (1518 + 20+20+14) * 8 +} + +func (s *PacketMessagerImpl) Flush() { + //TODO +} + +func (s *PacketMessagerImpl) Send(conn net.Conn, frame *FrameMessage) (int, error) { + frame.buffer[0] = MAGIC[0] + frame.buffer[1] = MAGIC[1] + binary.BigEndian.PutUint16(frame.buffer[HlMI:HlLI], uint16(frame.size)) + if s.block != nil { + s.block.Encrypt(frame.frame, frame.frame) + } + if HasLog(DEBUG) { + Debug("PacketMessagerImpl.Send: %s %d %x", conn.RemoteAddr(), frame.size, frame.buffer) + } + if s.timeout != 0 { + err := conn.SetWriteDeadline(time.Now().Add(s.timeout)) + if err != nil { + return 0, err + } + } + if _, err := conn.Write(frame.buffer[:HlSize+frame.size]); err != nil { + return 0, err + } + return frame.size, nil +} + +func (s *PacketMessagerImpl) Receive(conn net.Conn, max, min int) (*FrameMessage, error) { + if s.bufSize == 0 { + s.bufSize = MaxMsg + } + frame := NewFrameMessage(s.bufSize) + if HasLog(DEBUG) { + Debug("PacketMessagerImpl.Receive %s %d", conn.RemoteAddr(), s.timeout) + } + if s.timeout != 0 { + err := conn.SetReadDeadline(time.Now().Add(s.timeout)) + if err != nil { + return nil, err + } + } + n, err := conn.Read(frame.buffer) + if err != nil { + return nil, err + } + if HasLog(DEBUG) { + Debug("PacketMessagerImpl.Receive: %s %x", conn.RemoteAddr(), frame.buffer[:n]) + } + if n <= 4 { + return nil, NewErr("%s: small frame", conn.RemoteAddr()) + } + if !bytes.Equal(frame.buffer[:HlMI], MAGIC[:HlMI]) { + return nil, NewErr("%s: wrong magic", conn.RemoteAddr()) + } + size := int(binary.BigEndian.Uint16(frame.buffer[HlMI:HlLI])) + if size > max || size < min { + return nil, NewErr("%s: wrong size %d", conn.RemoteAddr(), size) + } + tmp := frame.buffer[HlSize : HlSize+size] + if s.block != nil { + s.block.Decrypt(tmp, tmp) + } + frame.size = size + frame.frame = tmp + return frame, nil +} diff --git a/pkg/libol/nl_linux.go b/pkg/libol/nl_linux.go new file mode 100755 index 0000000..d337541 --- /dev/null +++ b/pkg/libol/nl_linux.go @@ -0,0 +1,45 @@ +package libol + +import ( + "github.com/vishvananda/netlink" + "net" +) + +func GetLocalByGw(addr string) (net.IP, error) { + local := net.IP{} + routes, err := netlink.RouteList(nil, netlink.FAMILY_V4) + if err != nil { + return nil, err + } + dest := net.ParseIP(addr) + if dest == nil { + Warn("GetLocalByGW: parseIP %s failed", addr) + return nil, nil + } + find := netlink.Route{LinkIndex: -1} + for _, rte := range routes { + if rte.Dst != nil && !rte.Dst.Contains(dest) { + continue + } + if find.LinkIndex != -1 && find.Priority < rte.Priority { + continue + } + find = rte + } + if find.LinkIndex != -1 { + index := find.LinkIndex + source := find.Gw + if source == nil { + source = find.Src + } + link, _ := netlink.LinkByIndex(index) + address, _ := netlink.AddrList(link, netlink.FAMILY_V4) + for _, ifAddr := range address { + if ifAddr.Contains(source) { + local = ifAddr.IP + } + } + } + Info("GetLocalByGw: find %s on %s", addr, local) + return local, nil +} diff --git a/pkg/libol/nl_others.go b/pkg/libol/nl_others.go new file mode 100755 index 0000000..c295454 --- /dev/null +++ b/pkg/libol/nl_others.go @@ -0,0 +1,9 @@ +// +build !linux + +package libol + +import "net" + +func GetLocalByGw(addr string) (net.IP, error) { + return nil, NewErr("GetLocalByGw notSupport") +} diff --git a/pkg/libol/promise.go b/pkg/libol/promise.go new file mode 100755 index 0000000..5195e0d --- /dev/null +++ b/pkg/libol/promise.go @@ -0,0 +1,41 @@ +package libol + +import "time" + +type Promise struct { + Count int + MaxTry int + First time.Duration // the delay time. + MinInt time.Duration // the normal time. + MaxInt time.Duration // the max delay time. +} + +func NewPromise(first, min, max time.Duration) *Promise { + return &Promise{ + First: first, + MaxInt: max, + MinInt: min, + } +} + +func (p *Promise) Done(call func() error) { + for { + p.Count++ + if p.MaxTry > 0 && p.Count > p.MaxTry { + return + } + if err := call(); err == nil { + return + } + time.Sleep(p.First) + if p.First < p.MaxInt { + p.First += p.MinInt + } + } +} + +func (p *Promise) Go(call func() error) { + Go(func() { + p.Done(call) + }) +} diff --git a/pkg/libol/protocol.go b/pkg/libol/protocol.go new file mode 100755 index 0000000..a161ffd --- /dev/null +++ b/pkg/libol/protocol.go @@ -0,0 +1,532 @@ +package libol + +import ( + "encoding/binary" + "fmt" +) + +var ( + EthZero = []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00} + EthAll = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +) + +const ( + EthArp = 0x0806 + EthIp4 = 0x0800 + EthIp6 = 0x86DD + EthVlan = 0x8100 +) + +type Ether struct { + Dst []byte + Src []byte + Type uint16 + Len int +} + +const ( + EtherLen = 14 + VlanLen = 4 + TcpLen = 20 + Ipv4Len = 20 + UdpLen = 8 +) + +func NewEther(t uint16) (e *Ether) { + e = &Ether{ + Type: t, + Src: make([]byte, 6), + Dst: make([]byte, 6), + Len: EtherLen, + } + return +} + +func NewEtherArp() (e *Ether) { + return NewEther(EthArp) +} + +func NewEtherIP4() (e *Ether) { + return NewEther(EthIp4) +} + +func NewEtherFromFrame(frame []byte) (e *Ether, err error) { + e = NewEther(0) + err = e.Decode(frame) + return +} + +func (e *Ether) Decode(frame []byte) error { + if len(frame) < 14 { + return NewErr("Ether.Decode too small header: %d", len(frame)) + } + + copy(e.Dst[:6], frame[:6]) + copy(e.Src[:6], frame[6:12]) + e.Type = binary.BigEndian.Uint16(frame[12:14]) + e.Len = 14 + + return nil +} + +func (e *Ether) Encode() []byte { + buffer := make([]byte, 14) + + copy(buffer[:6], e.Dst[:6]) + copy(buffer[6:12], e.Src[:6]) + binary.BigEndian.PutUint16(buffer[12:14], e.Type) + + return buffer[:14] +} + +func (e *Ether) IsVlan() bool { + return e.Type == EthVlan +} + +func (e *Ether) IsArp() bool { + return e.Type == EthArp +} + +func (e *Ether) IsIP4() bool { + return e.Type == EthIp4 +} + +type Vlan struct { + Tci uint16 + Vid uint16 + Pro uint16 + Len int +} + +func NewVlan(tci uint16, vid uint16) (n *Vlan) { + n = &Vlan{ + Tci: tci, + Vid: vid, + Len: VlanLen, + } + + return +} + +func NewVlanFromFrame(frame []byte) (n *Vlan, err error) { + n = &Vlan{ + Len: VlanLen, + } + err = n.Decode(frame) + return +} + +func (n *Vlan) Decode(frame []byte) error { + if len(frame) < VlanLen { + return NewErr("Vlan.Decode: too small header") + } + + v := binary.BigEndian.Uint16(frame[0:2]) + n.Tci = uint16(v >> 12) + n.Vid = uint16(0x0fff & v) + n.Pro = binary.BigEndian.Uint16(frame[2:4]) + + return nil +} + +func (n *Vlan) Encode() []byte { + buffer := make([]byte, 16) + + v := (n.Tci << 12) | n.Vid + binary.BigEndian.PutUint16(buffer[0:2], v) + binary.BigEndian.PutUint16(buffer[2:4], n.Pro) + + return buffer[:4] +} + +const ( + ArpRequest = 1 + ArpReply = 2 +) + +const ( + ArpHrdNetrom = 0 + ArpHrdEther = 1 +) + +type Arp struct { + HrdCode uint16 // format hardware address + ProCode uint16 // format protocol address + HrdLen uint8 // length of hardware address + ProLen uint8 // length of protocol address + OpCode uint16 // ARP Op(command) + + SHwAddr []byte // sender hardware address. + SIpAddr []byte // sender IP address. + THwAddr []byte // target hardware address. + TIpAddr []byte // target IP address. + Len int +} + +func NewArp() (a *Arp) { + a = &Arp{ + HrdCode: ArpHrdEther, + ProCode: EthIp4, + HrdLen: 6, + ProLen: 4, + OpCode: ArpRequest, + Len: 0, + SHwAddr: make([]byte, 6), + SIpAddr: make([]byte, 4), + THwAddr: make([]byte, 6), + TIpAddr: make([]byte, 4), + } + + return +} + +func NewArpFromFrame(frame []byte) (a *Arp, err error) { + a = NewArp() + err = a.Decode(frame) + return +} + +func (a *Arp) Decode(frame []byte) error { + var err error + + if len(frame) < 8 { + return NewErr("Arp.Decode: too small header: %d", len(frame)) + } + + a.HrdCode = binary.BigEndian.Uint16(frame[0:2]) + a.ProCode = binary.BigEndian.Uint16(frame[2:4]) + a.HrdLen = uint8(frame[4]) + a.ProLen = uint8(frame[5]) + if a.HrdLen != 6 || a.ProLen != 4 { + return NewErr("Arp.Decode: AddrLen: %d,%d", a.HrdLen, a.ProLen) + } + a.OpCode = binary.BigEndian.Uint16(frame[6:8]) + + p := uint8(8) + if len(frame) < int(p+2*(a.HrdLen+a.ProLen)) { + return NewErr("Arp.Decode: too small frame: %d", len(frame)) + } + + copy(a.SHwAddr[:6], frame[p:p+6]) + p += a.HrdLen + copy(a.SIpAddr[:4], frame[p:p+4]) + p += a.ProLen + copy(a.THwAddr[:6], frame[p:p+6]) + p += a.HrdLen + copy(a.TIpAddr[:4], frame[p:p+4]) + p += a.ProLen + + a.Len = int(p) + + return err +} + +func (a *Arp) Encode() []byte { + buffer := make([]byte, 1024) + + binary.BigEndian.PutUint16(buffer[0:2], a.HrdCode) + binary.BigEndian.PutUint16(buffer[2:4], a.ProCode) + buffer[4] = byte(a.HrdLen) + buffer[5] = byte(a.ProLen) + binary.BigEndian.PutUint16(buffer[6:8], a.OpCode) + + p := uint8(8) + copy(buffer[p:p+a.HrdLen], a.SHwAddr[0:a.HrdLen]) + p += a.HrdLen + copy(buffer[p:p+a.ProLen], a.SIpAddr[0:a.ProLen]) + p += a.ProLen + + copy(buffer[p:p+a.HrdLen], a.THwAddr[0:a.HrdLen]) + p += a.HrdLen + copy(buffer[p:p+a.ProLen], a.TIpAddr[0:a.ProLen]) + p += a.ProLen + + a.Len = int(p) + + return buffer[:p] +} + +func (a *Arp) IsIP4() bool { + return a.ProCode == EthIp4 +} + +func (a *Arp) IsReply() bool { + return a.OpCode == ArpReply +} + +func (a *Arp) IsRequest() bool { + return a.OpCode == ArpRequest +} + +const ( + Ipv4Ver = 0x04 + Ipv6Ver = 0x06 +) + +const ( + IpIcmp = 0x01 + IpIgmp = 0x02 + IpIpIp = 0x04 + IpTcp = 0x06 + IpUdp = 0x11 + IpEsp = 0x32 + IpAh = 0x33 + IpOspf = 0x59 + IpPim = 0x67 + IpVrrp = 0x70 + IpIsis = 0x7c +) + +func IpProto2Str(proto uint8) string { + switch proto { + case IpIcmp: + return "icmp" + case IpIgmp: + return "igmp" + case IpIpIp: + return "ipip" + case IpEsp: + return "esp" + case IpAh: + return "ah" + case IpOspf: + return "ospf" + case IpIsis: + return "isis" + case IpUdp: + return "udp" + case IpTcp: + return "tcp" + case IpPim: + return "pim" + case IpVrrp: + return "vrrp" + default: + return fmt.Sprintf("%02x", proto) + } +} + +type Ipv4 struct { + Version uint8 //4bite v4: 0100, v6: 0110 + HeaderLen uint8 //4bit 15*4 + ToS uint8 //Type of Service + TotalLen uint16 + Identifier uint16 + Flag uint16 //3bit Z|DF|MF + Offset uint16 //13bit Fragment offset + ToL uint8 //Time of Live + Protocol uint8 + HeaderChecksum uint16 //Header Checksum + Source []byte + Destination []byte + Options uint32 //Reserved + Len int +} + +func NewIpv4() (i *Ipv4) { + i = &Ipv4{ + Version: 0x04, + HeaderLen: 0x05, + ToS: 0, + TotalLen: 0, + Identifier: 0, + Flag: 0, + Offset: 0, + ToL: 0xff, + Protocol: 0, + HeaderChecksum: 0, + Options: 0, + Len: Ipv4Len, + Source: make([]byte, 4), + Destination: make([]byte, 4), + } + return +} + +func NewIpv4FromFrame(frame []byte) (i *Ipv4, err error) { + i = NewIpv4() + err = i.Decode(frame) + return +} + +func (i *Ipv4) Decode(frame []byte) error { + if len(frame) < Ipv4Len { + return NewErr("Ipv4.Decode: too small header: %d", len(frame)) + } + + h := uint8(frame[0]) + i.Version = h >> 4 + i.HeaderLen = h & 0x0f + i.ToS = uint8(frame[1]) + i.TotalLen = binary.BigEndian.Uint16(frame[2:4]) + i.Identifier = binary.BigEndian.Uint16(frame[4:6]) + f := binary.BigEndian.Uint16(frame[6:8]) + i.Offset = f & 0x1fFf + i.Flag = f >> 13 + i.ToL = uint8(frame[8]) + i.Protocol = uint8(frame[9]) + i.HeaderChecksum = binary.BigEndian.Uint16(frame[10:12]) + if !i.IsIP4() { + return NewErr("Ipv4.Decode: not right ipv4 version: 0x%x", i.Version) + } + copy(i.Source[:4], frame[12:16]) + copy(i.Destination[:4], frame[16:20]) + + return nil +} + +func (i *Ipv4) Encode() []byte { + buffer := make([]byte, 32) + + h := uint8((i.Version << 4) | i.HeaderLen) + buffer[0] = h + buffer[1] = i.ToS + binary.BigEndian.PutUint16(buffer[2:4], i.TotalLen) + binary.BigEndian.PutUint16(buffer[4:6], i.Identifier) + f := uint16((i.Flag << 13) | i.Offset) + binary.BigEndian.PutUint16(buffer[6:8], f) + buffer[8] = i.ToL + buffer[9] = i.Protocol + binary.BigEndian.PutUint16(buffer[10:12], i.HeaderChecksum) + copy(buffer[12:16], i.Source[:4]) + copy(buffer[16:20], i.Destination[:4]) + + return buffer[:i.Len] +} + +func (i *Ipv4) IsIP4() bool { + return i.Version == Ipv4Ver +} + +const ( + TcpUrg = 0x20 + TcpAck = 0x10 + TcpPsh = 0x08 + TcpRst = 0x04 + TcpSyn = 0x02 + TcpFin = 0x01 +) + +type Tcp struct { + Source uint16 + Destination uint16 + Sequence uint32 + Acknowledgment uint32 + DataOffset uint8 + ControlBits uint8 + Window uint16 + Checksum uint16 + UrgentPointer uint16 + Options []byte + Padding []byte + Len int +} + +func NewTcp() (t *Tcp) { + t = &Tcp{ + Source: 0, + Destination: 0, + Sequence: 0, + Acknowledgment: 0, + DataOffset: 0, + ControlBits: 0, + Window: 0, + Checksum: 0, + UrgentPointer: 0, + Len: TcpLen, + } + return +} + +func NewTcpFromFrame(frame []byte) (t *Tcp, err error) { + t = NewTcp() + err = t.Decode(frame) + return +} + +func (t *Tcp) Decode(frame []byte) error { + if len(frame) < TcpLen { + return NewErr("Tcp.Decode: too small header: %d", len(frame)) + } + + t.Source = binary.BigEndian.Uint16(frame[0:2]) + t.Destination = binary.BigEndian.Uint16(frame[2:4]) + t.Sequence = binary.BigEndian.Uint32(frame[4:8]) + t.Acknowledgment = binary.BigEndian.Uint32(frame[8:12]) + t.DataOffset = uint8(frame[12]) + t.ControlBits = uint8(frame[13]) + t.Window = binary.BigEndian.Uint16(frame[14:16]) + t.Checksum = binary.BigEndian.Uint16(frame[16:18]) + t.UrgentPointer = binary.BigEndian.Uint16(frame[18:20]) + + return nil +} + +func (t *Tcp) Encode() []byte { + buffer := make([]byte, 32) + + binary.BigEndian.PutUint16(buffer[0:2], t.Source) + binary.BigEndian.PutUint16(buffer[2:4], t.Destination) + binary.BigEndian.PutUint32(buffer[4:8], t.Sequence) + binary.BigEndian.PutUint32(buffer[8:12], t.Acknowledgment) + buffer[12] = t.DataOffset + buffer[13] = t.ControlBits + binary.BigEndian.PutUint16(buffer[14:16], t.Window) + binary.BigEndian.PutUint16(buffer[16:18], t.Checksum) + binary.BigEndian.PutUint16(buffer[18:20], t.UrgentPointer) + + return buffer[:t.Len] +} + +func (t *Tcp) HasFlag(flag uint8) bool { + return t.ControlBits&flag == flag +} + +type Udp struct { + Source uint16 + Destination uint16 + Length uint16 + Checksum uint16 + Len int +} + +func NewUdp() (u *Udp) { + u = &Udp{ + Source: 0, + Destination: 0, + Length: 0, + Checksum: 0, + Len: UdpLen, + } + return +} + +func NewUdpFromFrame(frame []byte) (u *Udp, err error) { + u = NewUdp() + err = u.Decode(frame) + return +} + +func (u *Udp) Decode(frame []byte) error { + if len(frame) < UdpLen { + return NewErr("Udp.Decode: too small header: %d", len(frame)) + } + + u.Source = binary.BigEndian.Uint16(frame[0:2]) + u.Destination = binary.BigEndian.Uint16(frame[2:4]) + u.Length = binary.BigEndian.Uint16(frame[4:6]) + u.Checksum = binary.BigEndian.Uint16(frame[6:8]) + + return nil +} + +func (u *Udp) Encode() []byte { + buffer := make([]byte, 32) + + binary.BigEndian.PutUint16(buffer[0:2], u.Source) + binary.BigEndian.PutUint16(buffer[2:4], u.Destination) + binary.BigEndian.PutUint16(buffer[4:6], u.Length) + binary.BigEndian.PutUint16(buffer[6:8], u.Checksum) + + return buffer[:u.Len] +} diff --git a/pkg/libol/safe.go b/pkg/libol/safe.go new file mode 100755 index 0000000..709b66a --- /dev/null +++ b/pkg/libol/safe.go @@ -0,0 +1,271 @@ +package libol + +import "sync" + +//m := NewSafeStrStr(1024) +//m.Set("hi", "1") +//a := "3" +//m.Set("hip", a) +//c := m.Get("hip") +//fmt.Printf("%s\n%s\n", m, c) + +type SafeStrStr struct { + size int + data map[string]string + lock sync.RWMutex +} + +func NewSafeStrStr(size int) *SafeStrStr { + calSize := size + if calSize == 0 { + calSize = 128 + } + return &SafeStrStr{ + size: size, + data: make(map[string]string, calSize), + } +} + +func (sm *SafeStrStr) Len() int { + sm.lock.RLock() + defer sm.lock.RUnlock() + return len(sm.data) +} + +func (sm *SafeStrStr) Reset(k, v string) error { + sm.lock.Lock() + defer sm.lock.Unlock() + + if sm.size != 0 && len(sm.data) >= sm.size { + return NewErr("SafeStrStr.Set already full") + } + sm.data[k] = v + return nil +} + +func (sm *SafeStrStr) Set(k, v string) error { + sm.lock.Lock() + defer sm.lock.Unlock() + + if sm.size != 0 && len(sm.data) >= sm.size { + return NewErr("SafeStrStr.Set already full") + } + if _, ok := sm.data[k]; !ok { + sm.data[k] = v + } + return nil +} + +func (sm *SafeStrStr) Del(k string) { + sm.lock.Lock() + defer sm.lock.Unlock() + + if _, ok := sm.data[k]; ok { + delete(sm.data, k) + } +} + +func (sm *SafeStrStr) Get(k string) string { + sm.lock.RLock() + defer sm.lock.RUnlock() + return sm.data[k] +} + +func (sm *SafeStrStr) GetEx(k string) (string, bool) { + sm.lock.RLock() + defer sm.lock.RUnlock() + v, ok := sm.data[k] + return v, ok +} + +func (sm *SafeStrStr) Iter(proc func(k, v string)) int { + sm.lock.RLock() + defer sm.lock.RUnlock() + + count := 0 + for k, u := range sm.data { + if k != "" { + proc(k, u) + count += 1 + } + } + return count +} + +type SafeStrMap struct { + size int + data map[string]interface{} + lock sync.RWMutex +} + +func NewSafeStrMap(size int) *SafeStrMap { + calSize := size + if calSize == 0 { + calSize = 128 + } + return &SafeStrMap{ + size: size, + data: make(map[string]interface{}, calSize), + } +} + +func (sm *SafeStrMap) Len() int { + sm.lock.RLock() + defer sm.lock.RUnlock() + return len(sm.data) +} + +func (sm *SafeStrMap) add(k string, v interface{}) error { + if sm.size != 0 && len(sm.data) >= sm.size { + return NewErr("SafeStrMap.Set already full") + } + if _, ok := sm.data[k]; !ok { + sm.data[k] = v + } + return nil +} + +func (sm *SafeStrMap) Set(k string, v interface{}) error { + sm.lock.Lock() + defer sm.lock.Unlock() + + return sm.add(k, v) +} + +func (sm *SafeStrMap) Mod(k string, v interface{}) error { + sm.lock.Lock() + defer sm.lock.Unlock() + + if _, ok := sm.data[k]; !ok { + return sm.add(k, v) + } + sm.data[k] = v + return nil +} + +func (sm *SafeStrMap) Del(k string) { + sm.lock.Lock() + defer sm.lock.Unlock() + + if _, ok := sm.data[k]; ok { + delete(sm.data, k) + } +} + +func (sm *SafeStrMap) Clear() { + sm.lock.Lock() + defer sm.lock.Unlock() + sm.data = make(map[string]interface{}, sm.size) +} + +func (sm *SafeStrMap) Get(k string) interface{} { + sm.lock.RLock() + defer sm.lock.RUnlock() + return sm.data[k] +} + +func (sm *SafeStrMap) GetEx(k string) (interface{}, bool) { + sm.lock.RLock() + defer sm.lock.RUnlock() + v, ok := sm.data[k] + return v, ok +} + +func (sm *SafeStrMap) Iter(proc func(k string, v interface{})) int { + sm.lock.RLock() + defer sm.lock.RUnlock() + + count := 0 + for k, u := range sm.data { + if u != nil { + proc(k, u) + count += 1 + } + } + return count +} + +// a := SafeVar +// a.Set(0x01) +// a.Get().(int) + +type SafeVar struct { + data interface{} + lock sync.RWMutex +} + +func NewSafeVar() *SafeVar { + return &SafeVar{} +} + +func (sv *SafeVar) Set(v interface{}) { + sv.lock.Lock() + defer sv.lock.Unlock() + sv.data = v +} + +func (sv *SafeVar) Get() interface{} { + sv.lock.RLock() + defer sv.lock.RUnlock() + return sv.data +} + +func (sv *SafeVar) GetWithFunc(proc func(v interface{})) { + sv.lock.RLock() + defer sv.lock.RUnlock() + proc(sv.data) +} + +type SafeStrInt64 struct { + lock sync.RWMutex + data map[string]int64 +} + +func NewSafeStrInt64() *SafeStrInt64 { + return &SafeStrInt64{ + data: make(map[string]int64, 32), + } +} + +func (s *SafeStrInt64) Get(k string) int64 { + s.lock.RLock() + defer s.lock.RUnlock() + if v, ok := s.data[k]; ok { + return v + } + return 0 +} + +func (s *SafeStrInt64) Set(k string, v int64) { + s.lock.Lock() + defer s.lock.Unlock() + s.data[k] = v +} + +func (s *SafeStrInt64) Add(k string, v int64) { + s.lock.Lock() + defer s.lock.Unlock() + if _, ok := s.data[k]; ok { + s.data[k] += v + } else { + s.data[k] = v + } +} + +func (s *SafeStrInt64) Copy(dst map[string]int64) { + s.lock.RLock() + defer s.lock.RUnlock() + for k, v := range s.data { + dst[k] = v + } +} + +func (s *SafeStrInt64) Data() map[string]int64 { + s.lock.RLock() + defer s.lock.RUnlock() + dst := make(map[string]int64, 32) + for k, v := range s.data { + dst[k] = v + } + return dst +} diff --git a/pkg/libol/safe_test.go b/pkg/libol/safe_test.go new file mode 100755 index 0000000..fdc666c --- /dev/null +++ b/pkg/libol/safe_test.go @@ -0,0 +1,187 @@ +package libol + +import ( + "fmt" + "github.com/stretchr/testify/assert" + "strconv" + "sync" + "testing" +) + +func TestSafeStrStr(t *testing.T) { + m := NewSafeStrStr(1024) + _ = m.Set("hi", "1") + i := m.Get("hi") + assert.Equal(t, i, "1", "be the same.") + + a := "3" + _ = m.Set("hip", a) + c := m.Get("hip") + assert.Equal(t, c, a, "be the same.") + assert.Equal(t, 2, m.Len(), "be the same.") + + for i := 0; i < 1024; i++ { + _ = m.Set(fmt.Sprintf("%d", i), fmt.Sprintf("%d", i)) + } + assert.Equal(t, 1024, m.Len(), "") + fmt.Printf("TestSafeStrStr.size: %d\n", m.Len()) + for i := 0; i < 1024; i++ { + m.Del(fmt.Sprintf("%d", i)) + } + assert.Equal(t, 2, m.Len(), "") + + m.Del("hi") + ii := m.Get("hi") + assert.Equal(t, ii, "", "be the same.") + assert.Equal(t, 1, m.Len(), "be the same.") + + iii := m.Get("hello") + assert.Equal(t, iii, "", "be the same.") +} + +func TestZeroMapSet(t *testing.T) { + m := make(map[string]int, 32) + m["hi"] = 1 + i := m["hi"] + assert.Equal(t, i, 1, "be the same.") + + m["hi"] = 3 + c := m["hi"] + assert.Equal(t, c, 3, "be the same.") + assert.Equal(t, 1, len(m), "be the same.") +} + +func TestZeroSafeStrStrSet(t *testing.T) { + m := NewSafeStrStr(0) + _ = m.Set("hi", "1") + i := m.Get("hi") + assert.Equal(t, i, "1", "be the same.") + + _ = m.Set("hi", "3") + c := m.Get("hi") + assert.Equal(t, c, "1", "be the same.") + assert.Equal(t, 1, m.Len(), "be the same.") +} + +func TestZeroSafeStrMapSet(t *testing.T) { + m := NewSafeStrMap(0) + _ = m.Set("hi", 1) + i := m.Get("hi") + assert.Equal(t, i, 1, "be the same.") + + _ = m.Set("hi", 3) + c := m.Get("hi").(int) + assert.Equal(t, c, 1, "be the same.") + assert.Equal(t, 1, m.Len(), "be the same.") +} + +func TestZeroSafeStrStr(t *testing.T) { + m := NewSafeStrStr(0) + _ = m.Set("hi", "1") + i := m.Get("hi") + assert.Equal(t, i, "1", "be the same.") + + a := "3" + _ = m.Set("hip", a) + c := m.Get("hip") + assert.Equal(t, c, a, "be the same.") + assert.Equal(t, 2, m.Len(), "be the same.") + + for i := 0; i < 1024; i++ { + _ = m.Set(fmt.Sprintf("%d", i), fmt.Sprintf("%d", i)) + } + assert.Equal(t, 1026, m.Len(), "") + fmt.Printf("TestZeroSafeStrStr.size: %d\n", m.Len()) + for i := 0; i < 1024; i++ { + m.Del(fmt.Sprintf("%d", i)) + } + assert.Equal(t, 2, m.Len(), "") + m.Del("hi") + ii := m.Get("hi") + assert.Equal(t, ii, "", "be the same.") + assert.Equal(t, 1, m.Len(), "be the same.") + + iii := m.Get("hello") + assert.Equal(t, iii, "", "be the same.") +} + +func TestZeroSafeStrStrIter(t *testing.T) { + m := NewSafeStrStr(0) + c := 0 + for i := 0; i < 10; i++ { + c += i + _ = m.Set(fmt.Sprintf("%d", i), fmt.Sprintf("%d", i)) + } + ct := 0 + m.Iter(func(k string, v string) { + i, _ := strconv.Atoi(v) + ct += i + }) + assert.Equal(t, ct, c, "be the same") + + ms := NewSafeStrMap(0) + cm := 0 + for i := 1024; i < 1024+1024; i++ { + cm += i + _ = ms.Set(fmt.Sprintf("%d", i), i) + } + cmt := 0 + ms.Iter(func(k string, v interface{}) { + cmt += v.(int) + }) + assert.Equal(t, cmt, cm, "be the same") +} + +func TestSafeVar(t *testing.T) { + v := NewSafeVar() + a := 3 + c := 0 + v.Set(2) + v.GetWithFunc(func(v interface{}) { + c = a + v.(int) + }) + assert.Equal(t, 5, c, "") +} + +func BenchmarkMapGet(b *testing.B) { + m := make(map[string]int, 2) + m["hi"] = 2 + + for i := 0; i < b.N; i++ { + v := m["hi"] + assert.Equal(b, v, 2, "") + } +} + +func BenchmarkMapGetWithLock(b *testing.B) { + m := make(map[string]int, 2) + m["hi"] = 2 + lock := sync.RWMutex{} + + for i := 0; i < b.N; i++ { + lock.RLock() + v := m["hi"] + lock.RUnlock() + assert.Equal(b, v, 2, "") + } +} + +func BenchmarkSafeStrStrGet(b *testing.B) { + m := NewSafeStrStr(2) + _ = m.Set("hi", "2") + + for i := 0; i < b.N; i++ { + v := m.Get("hi") + assert.Equal(b, v, "2", "") + } +} + +func BenchmarkSafeStrMapGet(b *testing.B) { + m := NewSafeStrMap(2) + _ = m.Set("hi", 2) + + for i := 0; i < b.N; i++ { + v := m.Get("hi").(int) + assert.Equal(b, v, 2, "") + } +} diff --git a/pkg/libol/socket.go b/pkg/libol/socket.go new file mode 100755 index 0000000..a6d928d --- /dev/null +++ b/pkg/libol/socket.go @@ -0,0 +1,519 @@ +package libol + +import ( + "net" + "sync" + "time" +) + +const ( + ClInit = 0x00 + ClConnected = 0x01 + ClUnAuth = 0x02 + ClAuth = 0x03 + ClConnecting = 0x04 + ClTerminal = 0x05 + ClClosed = 0x06 +) + +type SocketStatus uint8 + +func (s SocketStatus) String() string { + switch s { + case ClInit: + return "initialized" + case ClConnected: + return "connected" + case ClUnAuth: + return "unauthenticated" + case ClAuth: + return "authenticated" + case ClClosed: + return "closed" + case ClConnecting: + return "connecting" + case ClTerminal: + return "terminal" + } + return "" +} + +// Socket Client Interface and Implement + +const ( + CsSendOkay = "send" + CsRecvOkay = "recv" + CsSendError = "error" + CsDropped = "dropped" +) + +type ClientListener struct { + OnClose func(client SocketClient) error + OnConnected func(client SocketClient) error + OnStatus func(client SocketClient, old, new SocketStatus) +} + +type SocketClient interface { + LocalAddr() string + RemoteAddr() string + Connect() error + Close() + WriteMsg(frame *FrameMessage) error + ReadMsg() (*FrameMessage, error) + UpTime() int64 + AliveTime() int64 + String() string + Terminal() + Private() interface{} + SetPrivate(v interface{}) + Status() SocketStatus + SetStatus(v SocketStatus) + MaxSize() int + SetMaxSize(value int) + MinSize() int + IsOk() bool + Have(status SocketStatus) bool + Statistics() map[string]int64 + SetListener(listener ClientListener) + SetTimeout(v int64) + Out() *SubLogger +} + +type StreamSocket struct { + message Messager + connection net.Conn + statistics *SafeStrInt64 + maxSize int + minSize int + out *SubLogger + remoteAddr string + localAddr string + address string +} + +func (t *StreamSocket) LocalAddr() string { + return t.localAddr +} + +func (t *StreamSocket) RemoteAddr() string { + return t.remoteAddr +} + +func (t *StreamSocket) String() string { + return t.address +} + +func (t *StreamSocket) IsOk() bool { + return t.connection != nil +} + +func (t *StreamSocket) WriteMsg(frame *FrameMessage) error { + if !t.IsOk() { + t.statistics.Add(CsDropped, 1) + return NewErr("%s not okay", t) + } + if frame.IsControl() { + action, params := frame.CmdAndParams() + Cmd("StreamSocket.WriteMsg: %s%s", action, params) + } + if t.message == nil { // default is stream message + t.message = &StreamMessagerImpl{} + } + size, err := t.message.Send(t.connection, frame) + if err != nil { + t.statistics.Add(CsSendError, 1) + return err + } + t.statistics.Add(CsSendOkay, int64(size)) + return nil +} + +func (t *StreamSocket) ReadMsg() (*FrameMessage, error) { + if HasLog(LOG) { + Log("StreamSocket.ReadMsg: %s", t) + } + if !t.IsOk() { + return nil, NewErr("%s not okay", t) + } + if t.message == nil { // default is stream message + t.message = &StreamMessagerImpl{} + } + frame, err := t.message.Receive(t.connection, t.maxSize, t.minSize) + if err != nil { + return nil, err + } + size := len(frame.frame) + t.statistics.Add(CsRecvOkay, int64(size)) + return frame, nil +} + +type SocketClientImpl struct { + *StreamSocket + lock sync.RWMutex + listener ClientListener + newTime int64 + connectedTime int64 + private interface{} + status SocketStatus + timeout int64 // sec for read and write timeout +} + +func NewSocketClient(address string, message Messager) *SocketClientImpl { + return &SocketClientImpl{ + StreamSocket: &StreamSocket{ + maxSize: 1514, + minSize: 15, + message: message, + statistics: NewSafeStrInt64(), + out: NewSubLogger(address), + remoteAddr: address, + address: address, + }, + newTime: time.Now().Unix(), + status: ClInit, + } +} + +// MUST IMPLEMENT +func (s *SocketClientImpl) Connect() error { + return nil +} + +// MUST IMPLEMENT +func (s *SocketClientImpl) Close() { +} + +// MUST IMPLEMENT +func (s *SocketClientImpl) Terminal() { +} + +func (s *SocketClientImpl) Out() *SubLogger { + if s.out == nil { + s.out = NewSubLogger(s.address) + } + return s.out +} + +func (s *SocketClientImpl) Retry() bool { + s.lock.Lock() + defer s.lock.Unlock() + if s.connection != nil || + s.status == ClTerminal || + s.status == ClUnAuth { + return false + } + s.status = ClConnecting + return true +} + +func (s *SocketClientImpl) Status() SocketStatus { + s.lock.RLock() + defer s.lock.RUnlock() + return s.status +} + +func (s *SocketClientImpl) UpTime() int64 { + return time.Now().Unix() - s.newTime +} + +func (s *SocketClientImpl) AliveTime() int64 { + if s.connectedTime == 0 { + return 0 + } + return time.Now().Unix() - s.connectedTime +} + +func (s *SocketClientImpl) Private() interface{} { + s.lock.RLock() + defer s.lock.RUnlock() + return s.private +} + +func (s *SocketClientImpl) SetPrivate(v interface{}) { + s.lock.Lock() + defer s.lock.Unlock() + s.private = v +} + +func (s *SocketClientImpl) MaxSize() int { + return s.maxSize +} + +func (s *SocketClientImpl) SetMaxSize(value int) { + s.maxSize = value +} + +func (s *SocketClientImpl) MinSize() int { + return s.minSize +} + +func (s *SocketClientImpl) Have(state SocketStatus) bool { + return s.Status() == state +} + +func (s *SocketClientImpl) Statistics() map[string]int64 { + sts := make(map[string]int64) + s.statistics.Copy(sts) + return sts +} + +func (s *SocketClientImpl) SetListener(listener ClientListener) { + s.listener = listener +} + +func (s *SocketClientImpl) SetTimeout(v int64) { + s.timeout = v +} + +func (s *SocketClientImpl) updateConn(conn net.Conn) { + if conn != nil { + s.connection = conn + s.connectedTime = time.Now().Unix() + s.localAddr = conn.LocalAddr().String() + s.remoteAddr = conn.RemoteAddr().String() + } else { + if s.connection != nil { + _ = s.connection.Close() + } + s.connection = nil + s.localAddr = "" + s.remoteAddr = "" + s.message.Flush() + } + s.out.Event("SocketClientImpl.updateConn: %s %s", s.localAddr, s.remoteAddr) +} + +func (s *SocketClientImpl) SetConnection(conn net.Conn) { + s.lock.Lock() + defer s.lock.Unlock() + s.updateConn(conn) + s.status = ClConnected +} + +// MUST IMPLEMENT +func (s *SocketClientImpl) SetStatus(v SocketStatus) { +} + +// Socket Server Interface and Implement + +const ( + SsRecv = "recv" + SsDeny = "deny" + SsAlive = "alive" + SsSend = "send" + SsDrop = "dropped" + SsAccept = "accept" + SsClose = "closed" +) + +type ServerListener struct { + OnClient func(client SocketClient) error + OnClose func(client SocketClient) error + ReadAt func(client SocketClient, f *FrameMessage) error +} + +type ReadClient func(client SocketClient, f *FrameMessage) error + +type SocketServer interface { + Listen() (err error) + Close() + Accept() + ListClient() <-chan SocketClient + OffClient(client SocketClient) + TotalClient() int + Loop(call ServerListener) + Read(client SocketClient, ReadAt ReadClient) + String() string + Address() string + Statistics() map[string]int64 + SetTimeout(v int64) +} + +// TODO keepalive to release zombie connections. +type SocketServerImpl struct { + lock sync.RWMutex + statistics *SafeStrInt64 + address string + maxClient int + clients *SafeStrMap + onClients chan SocketClient + offClients chan SocketClient + close func() + timeout int64 // sec for read and write timeout + WrQus int // per frames. + error error +} + +func NewSocketServer(listen string) *SocketServerImpl { + return &SocketServerImpl{ + address: listen, + statistics: NewSafeStrInt64(), + maxClient: 128, + clients: NewSafeStrMap(1024), + onClients: make(chan SocketClient, 1024), + offClients: make(chan SocketClient, 1024), + WrQus: 1024, + } +} + +func (t *SocketServerImpl) ListClient() <-chan SocketClient { + list := make(chan SocketClient, 32) + Go(func() { + t.clients.Iter(func(k string, v interface{}) { + if client, ok := v.(SocketClient); ok { + list <- client + } + }) + list <- nil + }) + return list +} + +func (t *SocketServerImpl) TotalClient() int { + return t.clients.Len() +} + +func (t *SocketServerImpl) OffClient(client SocketClient) { + Warn("SocketServerImpl.OffClient %s", client) + if client != nil { + t.offClients <- client + } +} + +func (t *SocketServerImpl) doOnClient(call ServerListener, client SocketClient) { + Info("SocketServerImpl.doOnClient: +%s", client) + _ = t.clients.Set(client.RemoteAddr(), client) + if call.OnClient != nil { + _ = call.OnClient(client) + if call.ReadAt != nil { + Go(func() { t.Read(client, call.ReadAt) }) + } + } +} + +func (t *SocketServerImpl) doOffClient(call ServerListener, client SocketClient) { + Info("SocketServerImpl.doOffClient: -%s", client) + addr := client.RemoteAddr() + if _, ok := t.clients.GetEx(addr); ok { + Info("SocketServerImpl.doOffClient: close %s", addr) + t.statistics.Add(SsClose, 1) + if call.OnClose != nil { + _ = call.OnClose(client) + } + client.Close() + t.clients.Del(addr) + t.statistics.Add(SsAlive, -1) + } +} + +func (t *SocketServerImpl) Loop(call ServerListener) { + Debug("SocketServerImpl.Loop") + defer t.close() + for { + select { + case client := <-t.onClients: + t.doOnClient(call, client) + case client := <-t.offClients: + t.doOffClient(call, client) + } + } +} + +func (t *SocketServerImpl) Read(client SocketClient, ReadAt ReadClient) { + Log("SocketServerImpl.Read: %s", client) + done := make(chan bool, 2) + queue := make(chan *FrameMessage, t.WrQus) + Go(func() { + for { + select { + case frame := <-queue: + if err := ReadAt(client, frame); err != nil { + Error("SocketServerImpl.Read: readAt %s", err) + return + } + case <-done: + return + } + } + }) + for { + frame, err := client.ReadMsg() + if err != nil || frame.size <= 0 { + if frame != nil { + Error("SocketServerImpl.Read: %s %d", client, frame.size) + } else { + Error("SocketServerImpl.Read: %s %s", client, err) + } + done <- true + t.OffClient(client) + break + } + t.statistics.Add(SsRecv, 1) + if HasLog(LOG) { + Log("SocketServerImpl.Read: length: %d ", frame.size) + Log("SocketServerImpl.Read: frame : %x", frame) + } + queue <- frame + } +} + +// MUST IMPLEMENT +func (t *SocketServerImpl) Listen() error { + return nil +} + +// MUST IMPLEMENT +func (t *SocketServerImpl) Accept() { +} + +// MUST IMPLEMENT +func (t *SocketServerImpl) Close() { + if t.close != nil { + t.close() + } +} + +func (t *SocketServerImpl) Address() string { + return t.address +} + +func (t *SocketServerImpl) String() string { + return t.Address() +} + +func (t *SocketServerImpl) Statistics() map[string]int64 { + sts := make(map[string]int64, 32) + t.statistics.Copy(sts) + return sts +} + +func (t *SocketServerImpl) SetTimeout(v int64) { + t.timeout = v +} + +// pre-process when accept connection, +// and allowed accept new connection, will return nil. +func (t *SocketServerImpl) preAccept(conn net.Conn, err error) error { + if err != nil { + if t.error == nil || t.error.Error() != err.Error() { + Warn("SocketServerImpl.preAccept: %s", err) + } + t.error = err + return err + } + t.error = nil + addr := conn.RemoteAddr() + Debug("SocketServerImpl.preAccept: %s", addr) + t.statistics.Add(SsAccept, 1) + alive := t.statistics.Get(SsAlive) + if alive >= int64(t.maxClient) { + Debug("SocketServerImpl.preAccept: close %s", addr) + t.statistics.Add(SsDeny, 1) + t.statistics.Add(SsClose, 1) + _ = conn.Close() + return NewErr("too many open clients") + } + Debug("SocketServerImpl.preAccept: allow %s", addr) + t.statistics.Add(SsAlive, 1) + return nil +} diff --git a/pkg/libol/tcp_bufio_test.go b/pkg/libol/tcp_bufio_test.go new file mode 100755 index 0000000..52b6834 --- /dev/null +++ b/pkg/libol/tcp_bufio_test.go @@ -0,0 +1,109 @@ +package libol + +import ( + "bufio" + "fmt" + "log" + "net" + "strings" + "sync" + "testing" +) + +type Counter struct { + Rx int + Tx int +} + +func handleConnection(conn net.Conn, n int, c *Counter) { + for i := 0; i < n; i++ { + // will listen for message to process ending in newline (\n) + message, _ := bufio.NewReader(conn).ReadString('\n') + if len(message) == 0 { + break + } + c.Rx += 1 + // output message received + //fmt.Printf("Server Received: %s", string(message)) + // sample process for string received + newMessage := strings.ToUpper(message) + // send new string back to client + conn.Write([]byte(newMessage)) + } +} + +func startServer(wg *sync.WaitGroup, ok chan int, n int, c *Counter) { + ln, err := net.Listen("tcp", "127.0.0.1:8081") + if err != nil { + log.Fatal(err) + } + + ok <- 1 + + conn, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + + handleConnection(conn, n, c) + conn.Close() + + wg.Done() +} + +func startClient(wg *sync.WaitGroup, ok chan int, n int, c *Counter) { + <-ok + + addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:8081") + conn, err := net.DialTCP("tcp", nil, addr) + if err != nil { + panic(err.Error()) + } + + go func() { + for { + message, _ := bufio.NewReader(conn).ReadString('\n') + //fmt.Printf("Client Received: %s", string(message)) + if message != "" { + break + } + } + conn.Close() + wg.Done() + }() + + go func() { + for i := 0; i < n; i++ { + fmt.Fprintf(conn, "From the client\n") + c.Tx += 1 + } + }() +} + +func TestClientAndServer(t *testing.T) { + wg := &sync.WaitGroup{} + ok := make(chan int, 1) + + c := &Counter{} + wg.Add(1) + go startServer(wg, ok, 128, c) + wg.Add(1) + go startClient(wg, ok, 128, c) + + wg.Wait() + //fmt.Printf("Total tx: %d, rx: %d\n", c.Tx, c.Rx) +} + +func BenchmarkClientAndServer(b *testing.B) { + wg := &sync.WaitGroup{} + ok := make(chan int, 1) + + c := &Counter{} + wg.Add(1) + go startServer(wg, ok, b.N, c) + wg.Add(1) + go startClient(wg, ok, b.N, c) + + wg.Wait() + //fmt.Printf("Total tx: %d, rx: %d\n", c.Tx, c.Rx) +} diff --git a/pkg/libol/tcpsocket.go b/pkg/libol/tcpsocket.go new file mode 100755 index 0000000..7d80c83 --- /dev/null +++ b/pkg/libol/tcpsocket.go @@ -0,0 +1,179 @@ +package libol + +import ( + "crypto/tls" + "github.com/xtaci/kcp-go/v5" + "net" + "time" +) + +type TcpConfig struct { + Tls *tls.Config + Block kcp.BlockCrypt + Timeout time.Duration // ns + RdQus int // per frames + WrQus int // per frames +} + +// Server Implement + +type TcpServer struct { + *SocketServerImpl + tcpCfg *TcpConfig + listener net.Listener +} + +func NewTcpServer(listen string, cfg *TcpConfig) *TcpServer { + t := &TcpServer{ + tcpCfg: cfg, + SocketServerImpl: NewSocketServer(listen), + } + t.WrQus = cfg.WrQus + t.close = t.Close + return t +} + +func (t *TcpServer) Listen() (err error) { + if t.tcpCfg.Tls != nil { + t.listener, err = tls.Listen("tcp", t.address, t.tcpCfg.Tls) + if err != nil { + t.listener = nil + return err + } + Info("TcpServer.Listen: tls://%s", t.address) + } else { + t.listener, err = net.Listen("tcp", t.address) + if err != nil { + t.listener = nil + return err + } + Info("TcpServer.Listen: tcp://%s", t.address) + } + return nil +} + +func (t *TcpServer) Close() { + if t.listener != nil { + _ = t.listener.Close() + Info("TcpServer.Close: %s", t.address) + t.listener = nil + } +} + +func (t *TcpServer) Accept() { + Debug("TcpServer.Accept") + promise := Promise{ + First: 2 * time.Second, + MinInt: 5 * time.Second, + MaxInt: 30 * time.Second, + } + promise.Done(func() error { + if err := t.Listen(); err != nil { + Warn("TcpServer.Accept: %s", err) + return err + } + return nil + }) + defer t.Close() + for { + if t.listener == nil { + return + } + conn, err := t.listener.Accept() + if t.preAccept(conn, err) != nil { + continue + } + t.onClients <- NewTcpClientFromConn(conn, t.tcpCfg) + } +} + +// Client Implement + +type TcpClient struct { + *SocketClientImpl + tcpCfg *TcpConfig +} + +func NewTcpClient(addr string, cfg *TcpConfig) *TcpClient { + t := &TcpClient{ + tcpCfg: cfg, + SocketClientImpl: NewSocketClient(addr, &StreamMessagerImpl{ + block: cfg.Block, + timeout: cfg.Timeout, + bufSize: cfg.RdQus * MaxFrame, + }), + } + return t +} + +func NewTcpClientFromConn(conn net.Conn, cfg *TcpConfig) *TcpClient { + addr := conn.RemoteAddr().String() + t := &TcpClient{ + tcpCfg: cfg, + SocketClientImpl: NewSocketClient(addr, &StreamMessagerImpl{ + block: cfg.Block, + timeout: cfg.Timeout, + bufSize: cfg.RdQus * MaxFrame, + }), + } + t.updateConn(conn) + return t +} + +func (t *TcpClient) Connect() error { + if !t.Retry() { + return nil + } + var err error + var conn net.Conn + if t.tcpCfg.Tls != nil { + t.out.Info("TcpClient.Connect: tls://%s", t.address) + conn, err = tls.Dial("tcp", t.address, t.tcpCfg.Tls) + } else { + t.out.Info("TcpClient.Connect: tcp://%s", t.address) + conn, err = net.Dial("tcp", t.address) + } + if err != nil { + return err + } + t.SetConnection(conn) + if t.listener.OnConnected != nil { + _ = t.listener.OnConnected(t) + } + return nil +} + +func (t *TcpClient) Close() { + t.out.Debug("TcpClient.Close: %v", t.IsOk()) + t.lock.Lock() + if t.connection != nil { + if t.status != ClTerminal { + t.status = ClClosed + } + t.updateConn(nil) + t.private = nil + t.lock.Unlock() + if t.listener.OnClose != nil { + _ = t.listener.OnClose(t) + } + t.out.Debug("TcpClient.Close: %d", t.status) + } else { + t.lock.Unlock() + } +} + +func (t *TcpClient) Terminal() { + t.SetStatus(ClTerminal) + t.Close() +} + +func (t *TcpClient) SetStatus(v SocketStatus) { + t.lock.Lock() + defer t.lock.Unlock() + if t.status != v { + if t.listener.OnStatus != nil { + t.listener.OnStatus(t, t.status, v) + } + t.status = v + } +} diff --git a/pkg/libol/time_test.go b/pkg/libol/time_test.go new file mode 100755 index 0000000..a094b6f --- /dev/null +++ b/pkg/libol/time_test.go @@ -0,0 +1,14 @@ +package libol + +import ( + "fmt" + "testing" + "time" +) + +func TestTime(t *testing.T) { + ti, _ := time.Parse(time.UnixDate, "Mon Nov 30 21:45:49 CST 2020") + fmt.Println(ti) + fmt.Println(time.Since(ti)) + fmt.Println(time.Now()) +} diff --git a/pkg/libol/udpsocket.go b/pkg/libol/udpsocket.go new file mode 100755 index 0000000..bf09acd --- /dev/null +++ b/pkg/libol/udpsocket.go @@ -0,0 +1,174 @@ +package libol + +import ( + "github.com/xtaci/kcp-go/v5" + "net" + "time" +) + +type UdpConfig struct { + Block kcp.BlockCrypt + Timeout time.Duration // ns + Clients int + RdQus int // per frames + WrQus int // per frames +} + +var defaultUdpConfig = UdpConfig{ + Timeout: 120 * time.Second, + Clients: 1024, +} + +type UdpServer struct { + *SocketServerImpl + udpCfg *UdpConfig + listener net.Listener +} + +func NewUdpServer(listen string, cfg *UdpConfig) *UdpServer { + if cfg == nil { + cfg = &defaultUdpConfig + } + if cfg.Clients == 0 { + cfg.Clients = defaultUdpConfig.Clients + } + k := &UdpServer{ + udpCfg: cfg, + SocketServerImpl: NewSocketServer(listen), + } + k.close = k.Close + return k +} + +func (k *UdpServer) Listen() (err error) { + k.listener, err = XDPListen(k.address, k.udpCfg.Clients, k.udpCfg.RdQus*2) + if err != nil { + k.listener = nil + return err + } + Info("UdpServer.Listen: udp://%s", k.address) + return nil +} + +func (k *UdpServer) Close() { + if k.listener != nil { + _ = k.listener.Close() + Info("UdpServer.Close: %s", k.address) + k.listener = nil + } +} + +func (k *UdpServer) Accept() { + promise := Promise{ + First: 2 * time.Second, + MinInt: 5 * time.Second, + MaxInt: 30 * time.Second, + } + promise.Done(func() error { + if err := k.Listen(); err != nil { + Warn("UdpServer.Accept: %s", err) + return err + } + return nil + }) + defer k.Close() + for { + if k.listener == nil { + return + } + conn, err := k.listener.Accept() + if k.preAccept(conn, err) != nil { + continue + } + k.onClients <- NewUdpClientFromConn(conn, k.udpCfg) + } +} + +// Client Implement + +type UdpClient struct { + *SocketClientImpl + udpCfg *UdpConfig +} + +func NewUdpClient(addr string, cfg *UdpConfig) *UdpClient { + if cfg == nil { + cfg = &defaultUdpConfig + } + c := &UdpClient{ + udpCfg: cfg, + SocketClientImpl: NewSocketClient(addr, &PacketMessagerImpl{ + timeout: cfg.Timeout, + block: cfg.Block, + bufSize: cfg.RdQus * MaxFrame, + }), + } + return c +} + +func NewUdpClientFromConn(conn net.Conn, cfg *UdpConfig) *UdpClient { + if cfg == nil { + cfg = &defaultUdpConfig + } + addr := conn.RemoteAddr().String() + c := &UdpClient{ + SocketClientImpl: NewSocketClient(addr, &PacketMessagerImpl{ + timeout: cfg.Timeout, + block: cfg.Block, + bufSize: cfg.RdQus * MaxFrame, + }), + } + c.updateConn(conn) + return c +} + +func (c *UdpClient) Connect() error { + if !c.Retry() { + return nil + } + c.out.Info("UdpClient.Connect: udp://%s", c.address) + conn, err := net.Dial("udp", c.address) + if err != nil { + return err + } + c.SetConnection(conn) + if c.listener.OnConnected != nil { + _ = c.listener.OnConnected(c) + } + return nil +} + +func (c *UdpClient) Close() { + c.out.Debug("UdpClient.Close: %v", c.IsOk()) + c.lock.Lock() + if c.connection != nil { + if c.status != ClTerminal { + c.status = ClClosed + } + c.out.Info("UdpClient.Close") + c.updateConn(nil) + c.private = nil + c.lock.Unlock() + if c.listener.OnClose != nil { + _ = c.listener.OnClose(c) + } + } else { + c.lock.Unlock() + } +} + +func (c *UdpClient) Terminal() { + c.SetStatus(ClTerminal) + c.Close() +} + +func (c *UdpClient) SetStatus(v SocketStatus) { + c.lock.Lock() + defer c.lock.Unlock() + if c.status != v { + if c.listener.OnStatus != nil { + c.listener.OnStatus(c, c.status, v) + } + c.status = v + } +} diff --git a/pkg/libol/utils.go b/pkg/libol/utils.go new file mode 100755 index 0000000..803e875 --- /dev/null +++ b/pkg/libol/utils.go @@ -0,0 +1,304 @@ +package libol + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math/rand" + "net" + "os" + "os/signal" + "path" + "reflect" + "runtime" + "strconv" + "strings" + "syscall" + "time" +) + +const LeaseTime = "2006-01-02T15" +const SimpleTime = "2006-01-02 15:04:05" + +func GenRandom(n int) string { + letters := []byte("0123456789abcdefghijklmnopqrstuvwxyz") + buffer := make([]byte, n) + rand.Seed(time.Now().UnixNano()) + for i := range buffer { + buffer[i] = letters[rand.Int63()%int64(len(letters))] + } + buffer[0] = letters[rand.Int63()%26+10] + return string(buffer) +} + +func GenEthAddr(n int) []byte { + if n == 0 { + n = 6 + } + data := make([]byte, n) + rand.Seed(time.Now().UnixNano()) + for i := range data { + data[i] = byte(rand.Uint32() & 0xFF) + } + data[0] &= 0xfe + return data +} + +func GenUint32() uint32 { + rand.Seed(time.Now().UnixNano()) + return rand.Uint32() +} + +func GenInt32() int { + rand.Seed(time.Now().UnixNano()) + return rand.Int() +} + +func Marshal(v interface{}, pretty bool) ([]byte, error) { + str, err := json.Marshal(v) + if err != nil { + Error("Marshal error: %s", err) + return nil, err + } + if !pretty { + return str, nil + } + var out bytes.Buffer + if err := json.Indent(&out, str, "", " "); err != nil { + return str, nil + } + return out.Bytes(), nil +} + +func MarshalSave(v interface{}, file string, pretty bool) error { + f, err := CreateFile(file) + if err != nil { + Error("MarshalSave: %s", err) + return err + } + defer f.Close() + str, err := Marshal(v, true) + if err != nil { + Error("MarshalSave error: %s", err) + return err + } + if _, err := f.Write(str); err != nil { + Error("MarshalSave: %s", err) + return err + } + return nil +} + +func FileExist(file string) error { + if _, err := os.Stat(file); os.IsNotExist(err) { + return err + } + return nil +} + +func ScanAnn(r io.Reader) ([]byte, error) { + data := make([]byte, 0, 1024) + scan := bufio.NewScanner(r) + for scan.Scan() { + bs := scan.Bytes() + dis := false + for i, b := range bs { + if b == ' ' || b == '\t' || b == '\r' || b == '\n' { + continue + } + if b == '/' && len(bs) > i+1 && bs[i+1] == '/' { + dis = true // if start with //, need discard it. + } + break + } + if !dis { + data = append(data, bs...) + } + } + if err := scan.Err(); err != nil { + return nil, err + } + return data, nil +} + +func LoadWithoutAnn(file string) ([]byte, error) { + fp, err := OpenRead(file) + if err != nil { + return nil, err + } + defer fp.Close() + return ScanAnn(fp) +} + +func UnmarshalLoad(v interface{}, file string) error { + if err := FileExist(file); err != nil { + return NewErr("%s %s", file, err) + } + contents, err := LoadWithoutAnn(file) + if err != nil { + return NewErr("%s %s", file, err) + } + if err := json.Unmarshal(contents, v); err != nil { + return NewErr("%s", err) + } + return nil +} + +func FunName(i interface{}) string { + ptr := reflect.ValueOf(i).Pointer() + name := runtime.FuncForPC(ptr).Name() + return path.Base(name) +} + +func Netmask2Len(s string) int { + mask := net.IPMask(net.ParseIP(s).To4()) + prefixSize, _ := mask.Size() + return prefixSize +} + +func IPNetmask(ipAddr string) (string, error) { + if i, n, err := net.ParseCIDR(ipAddr); err == nil { + return i.String() + "/" + net.IP(n.Mask).String(), nil + } else { + return "", err + } +} + +func IPNetwork(ipAddr string) (string, error) { + if _, n, err := net.ParseCIDR(ipAddr); err == nil { + return n.IP.String() + "/" + net.IP(n.Mask).String(), nil + } else { + return ipAddr, err + } +} + +func PrettyTime(t int64) string { + s := "" + if t < 0 { + s = "-" + t = -t + } + min := t / 60 + if min < 60 { + return fmt.Sprintf("%s%dm%ds", s, min, t%60) + } + hours := min / 60 + if hours < 24 { + return fmt.Sprintf("%s%dh%dm", s, hours, min%60) + } + days := hours / 24 + return fmt.Sprintf("%s%dd%dh", s, days, hours%24) +} + +func PrettyBytes(b int64) string { + split := func(_v int64, _m int64) (i int64, d int) { + _d := float64(_v%_m) / float64(_m) + return _v / _m, int(_d * 100) //move two decimal to integer + } + if b < 1024 { + return fmt.Sprintf("%dB", b) + } + k, d := split(b, 1024) + if k < 1024 { + return fmt.Sprintf("%d.%02dK", k, d) + } + m, d := split(k, 1024) + if m < 1024 { + return fmt.Sprintf("%d.%02dM", m, d) + } + g, d := split(m, 1024) + return fmt.Sprintf("%d.%02dG", g, d) +} +func GetIPAddr(addr string) string { + _addr, _ := GetHostPort(addr) + return _addr +} + +func GetHostPort(addr string) (string, string) { + values := strings.SplitN(addr, ":", 2) + if len(values) == 2 { + return values[0], values[1] + } + return values[0], "" +} + +func Wait() { + x := make(chan os.Signal) + signal.Notify(x, os.Interrupt, syscall.SIGTERM) + signal.Notify(x, os.Interrupt, syscall.SIGKILL) + signal.Notify(x, os.Interrupt, syscall.SIGQUIT) //CTL+/ + signal.Notify(x, os.Interrupt, syscall.SIGINT) //CTL+C + Info("Wait: ...") + n := <-x + Warn("Wait: ... Signal %d received ...", n) +} + +func OpenTrunk(path string) (*os.File, error) { + return os.OpenFile(path, os.O_TRUNC|os.O_WRONLY|os.O_CREATE, 0600) +} + +func OpenWrite(path string) (*os.File, error) { + return os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) +} + +func OpenRead(path string) (*os.File, error) { + return os.OpenFile(path, os.O_RDONLY, os.ModePerm) +} + +func CreateFile(path string) (*os.File, error) { + return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) +} + +func ParseAddr(addr string) net.IP { + ip := strings.SplitN(addr, "/", 2)[0] + return net.ParseIP(ip) +} + +func ParseNet(addr string) (*net.IPNet, error) { + if _, ipNet, err := net.ParseCIDR(addr); err != nil { + return nil, err + } else { + return ipNet, nil + } +} + +func Uint2S(value uint32) string { + return strconv.FormatUint(uint64(value), 10) +} + +func IfName(name string) string { + size := len(name) + if size < 15 { + return name + } + return name[size-15 : size] +} + +func GetLocalTime(layout, value string) (time.Time, error) { + return time.ParseInLocation(layout, value, time.Local) +} + +func Base64Decode(value string) ([]byte, error) { + return base64.StdEncoding.DecodeString(value) +} + +func Base64Encode(value []byte) string { + return base64.StdEncoding.EncodeToString(value) +} + +func GetPrefix(value string, index int) string { + if len(value) >= index { + return value[:index] + } + return "" +} + +func GetSuffix(value string, index int) string { + if len(value) >= index { + return value[index:] + } + return "" +} diff --git a/pkg/libol/utils_test.go b/pkg/libol/utils_test.go new file mode 100755 index 0000000..5d24019 --- /dev/null +++ b/pkg/libol/utils_test.go @@ -0,0 +1,95 @@ +package libol + +import ( + "bytes" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestPrettyTime(t *testing.T) { + var s string + + s = PrettyTime(59) + assert.Equal(t, "0m59s", s, "be the same.") + s = PrettyTime(60 + 59) + assert.Equal(t, "1m59s", s, "be the same.") + + s = PrettyTime(60*2 + 8) + assert.Equal(t, "2m8s", s, "be the same.") + + s = PrettyTime(3600 + 1) + assert.Equal(t, "1h0m", s, "be the same.") + + s = PrettyTime(3600 + 61) + assert.Equal(t, "1h1m", s, "be the same.") + + s = PrettyTime(3600 + 60*59) + assert.Equal(t, "1h59m", s, "be the same.") + + s = PrettyTime(3600*23 + 60*59) + assert.Equal(t, "23h59m", s, "be the same.") + + s = PrettyTime(86400) + assert.Equal(t, "1d0h", s, "be the same.") + + s = PrettyTime(86400 + 3600*5 + 59) + assert.Equal(t, "1d5h", s, "be the same.") + + s = PrettyTime(86400 + 3600*23 + 59) + assert.Equal(t, "1d23h", s, "be the same.") +} + +func TestPrettyBytes(t *testing.T) { + var s string + + s = PrettyBytes(1023) + assert.Equal(t, "1023B", s, "be the same.") + s = PrettyBytes(1024 + 1023) + assert.Equal(t, "1.99K", s, "be the same.") + + s = PrettyBytes(1024*2 + 8) + assert.Equal(t, "2.00K", s, "be the same.") + + s = PrettyBytes(1024*2 + 1023) + assert.Equal(t, "2.99K", s, "be the same.") + + s = PrettyBytes(1024*1024 + 1) + assert.Equal(t, "1.00M", s, "be the same.") + + s = PrettyBytes(1024*1024 + 1024*256 + 1023) + assert.Equal(t, "1.25M", s, "be the same.") + + s = PrettyBytes(1024*1024 + 1024*1023) + assert.Equal(t, "1.99M", s, "be the same.") + + s = PrettyBytes(1024 * 1024 * 1024) + assert.Equal(t, "1.00G", s, "be the same.") + + s = PrettyBytes(1024*1024*1024 + 1024*1024*5 + 59) + assert.Equal(t, "1.00G", s, "be the same.") + + s = PrettyBytes(1024*1024*1024 + 1024*1024*512 + 59) + assert.Equal(t, "1.50G", s, "be the same.") +} + +func TestScanPure(t *testing.T) { + buff := bytes.NewBuffer([]byte(`// hi`)) + _, err := ScanAnn(buff) + assert.Equal(t, nil, err, "be the same.") + buff = bytes.NewBuffer([]byte(`// hi + you are`)) + data, err := ScanAnn(buff) + assert.Equal(t, string(data), "\t\t\tyou are", "be the same.") + buff = bytes.NewBuffer([]byte(`// hi + you are + // you are + //`)) + data, err = ScanAnn(buff) + assert.Equal(t, string(data), "\t\t\tyou are", "be the same.") + buff = bytes.NewBuffer([]byte(`// hi + you are + // you are + /`)) + data, err = ScanAnn(buff) + assert.Equal(t, string(data), "\t\t\tyou are\t\t\t/", "be the same.") +} diff --git a/pkg/libol/version.go b/pkg/libol/version.go new file mode 100755 index 0000000..5125a6e --- /dev/null +++ b/pkg/libol/version.go @@ -0,0 +1,12 @@ +package libol + +var ( + Date string + Version string + Commit string +) + +func init() { + Debug("version is %s", Version) + Debug("built on %s", Date) +} diff --git a/pkg/libol/wait.go b/pkg/libol/wait.go new file mode 100755 index 0000000..6041294 --- /dev/null +++ b/pkg/libol/wait.go @@ -0,0 +1,19 @@ +package libol + +type WaitOne struct { + done chan bool +} + +func NewWaitOne(n int) *WaitOne { + return &WaitOne{ + done: make(chan bool, n), + } +} + +func (w *WaitOne) Done() { + w.done <- true +} + +func (w *WaitOne) Wait() { + <-w.done +} diff --git a/pkg/libol/websocket.go b/pkg/libol/websocket.go new file mode 100755 index 0000000..b78fb59 --- /dev/null +++ b/pkg/libol/websocket.go @@ -0,0 +1,241 @@ +package libol + +import ( + "crypto/tls" + "crypto/x509" + "github.com/xtaci/kcp-go/v5" + "golang.org/x/net/websocket" + "io/ioutil" + "net" + "net/http" + "time" +) + +type wsConn struct { + *websocket.Conn +} + +func (ws *wsConn) RemoteAddr() net.Addr { + req := ws.Request() + if req == nil { + return ws.RemoteAddr() + } + addr := req.RemoteAddr + if ret, err := net.ResolveTCPAddr("tcp", addr); err == nil { + return ret + } + return nil +} + +type WebCert struct { + Key string + Crt string + RootCa string + Insecure bool +} + +type WebConfig struct { + Cert *WebCert + Block kcp.BlockCrypt + Timeout time.Duration // ns + RdQus int // per frames + WrQus int // per frames +} + +// Server Implement + +type WebServer struct { + *SocketServerImpl + webCfg *WebConfig + listener *http.Server +} + +func NewWebServer(listen string, cfg *WebConfig) *WebServer { + t := &WebServer{ + webCfg: cfg, + SocketServerImpl: NewSocketServer(listen), + } + t.WrQus = cfg.WrQus + t.close = t.Close + return t +} + +func (t *WebServer) Listen() (err error) { + if t.webCfg.Cert != nil { + Info("WebServer.Listen: wss://%s", t.address) + } else { + Info("WebServer.Listen: ws://%s", t.address) + } + t.listener = &http.Server{ + Addr: t.address, + } + return nil +} + +func (t *WebServer) Close() { + if t.listener != nil { + _ = t.listener.Close() + Info("WebServer.Close: %s", t.address) + t.listener = nil + } +} + +func (t *WebServer) Accept() { + Debug("WebServer.Accept") + + _ = t.Listen() + defer t.Close() + t.listener.Handler = websocket.Handler(func(ws *websocket.Conn) { + if t.preAccept(ws, nil) != nil { + return + } + defer ws.Close() + ws.PayloadType = websocket.BinaryFrame + wws := &wsConn{ws} + client := NewWebClientFromConn(wws, t.webCfg) + t.onClients <- client + <-client.done + Info("WebServer.Accept: %s exit", ws.RemoteAddr()) + }) + promise := Promise{ + First: 2 * time.Second, + MinInt: 5 * time.Second, + MaxInt: 30 * time.Second, + } + promise.Done(func() error { + if t.webCfg.Cert == nil { + if err := t.listener.ListenAndServe(); err != nil { + Error("WebServer.Accept on %s: %s", t.address, err) + return err + } + } else { + ca := t.webCfg.Cert + if err := t.listener.ListenAndServeTLS(ca.Crt, ca.Key); err != nil { + Error("WebServer.Accept on %s: %s", t.address, err) + return err + } + } + return nil + }) +} + +// Client Implement + +type WebClient struct { + *SocketClientImpl + webCfg *WebConfig + done chan bool + RdBuf int // per frames + WrBuf int // per frames +} + +func NewWebClient(addr string, cfg *WebConfig) *WebClient { + t := &WebClient{ + webCfg: cfg, + SocketClientImpl: NewSocketClient(addr, &StreamMessagerImpl{ + block: cfg.Block, + timeout: cfg.Timeout, + bufSize: cfg.RdQus * MaxFrame, + }), + done: make(chan bool, 2), + } + return t +} + +func NewWebClientFromConn(conn net.Conn, cfg *WebConfig) *WebClient { + addr := conn.RemoteAddr().String() + t := &WebClient{ + webCfg: cfg, + SocketClientImpl: NewSocketClient(addr, &StreamMessagerImpl{ + block: cfg.Block, + timeout: cfg.Timeout, + bufSize: cfg.RdQus * MaxFrame, + }), + done: make(chan bool, 2), + } + t.updateConn(conn) + return t +} + +func (t *WebClient) GetCertPool(ca string) *x509.CertPool { + caCert, err := ioutil.ReadFile(ca) + if err != nil { + Error("WebClient.GetCertPool: %s", err) + return nil + } + pool := x509.NewCertPool() + if !pool.AppendCertsFromPEM(caCert) { + Warn("WebClient.GetCertPool: invalid cert") + } + return pool +} + +func (t *WebClient) Connect() error { + if !t.Retry() { + return nil + } + var err error + var config *websocket.Config + if t.webCfg.Cert != nil { + t.out.Info("WebClient.Connect: wss://%s", t.address) + url := "wss://" + t.address + if config, err = websocket.NewConfig(url, url); err != nil { + return err + } + config.TlsConfig = &tls.Config{ + InsecureSkipVerify: t.webCfg.Cert.Insecure, + RootCAs: t.GetCertPool(t.webCfg.Cert.RootCa), + } + } else { + t.out.Info("WebClient.Connect: ws://%s", t.address) + url := "ws://" + t.address + if config, err = websocket.NewConfig(url, url); err != nil { + return err + } + } + conn, err := websocket.DialConfig(config) + if err != nil { + return err + } + t.SetConnection(conn) + if t.listener.OnConnected != nil { + _ = t.listener.OnConnected(t) + } + return nil +} + +func (t *WebClient) Close() { + t.out.Debug("WebClient.Close: %v", t.IsOk()) + t.lock.Lock() + if t.connection != nil { + if t.status != ClTerminal { + t.status = ClClosed + } + t.updateConn(nil) + t.done <- true + t.private = nil + t.lock.Unlock() + if t.listener.OnClose != nil { + _ = t.listener.OnClose(t) + } + t.out.Debug("WebClient.Close: %d", t.status) + } else { + t.lock.Unlock() + } +} + +func (t *WebClient) Terminal() { + t.SetStatus(ClTerminal) + t.Close() +} + +func (t *WebClient) SetStatus(v SocketStatus) { + t.lock.Lock() + defer t.lock.Unlock() + if t.status != v { + if t.listener.OnStatus != nil { + t.listener.OnStatus(t, t.status, v) + } + t.status = v + } +} diff --git a/pkg/libol/wsclient.go b/pkg/libol/wsclient.go new file mode 100755 index 0000000..f3eab52 --- /dev/null +++ b/pkg/libol/wsclient.go @@ -0,0 +1,58 @@ +package libol + +import ( + "crypto/tls" + "encoding/base64" + "golang.org/x/net/websocket" + "net/http" + "net/url" +) + +type Auth struct { + Type string + Username string + Password string +} + +func BasicAuth(username, password string) string { + auth := username + ":" + if password != "" { + auth += password + } + return "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) +} + +type WsClient struct { + Auth Auth + Url string + TlsConfig *tls.Config + Protocol string +} + +func (w *WsClient) Initialize() { + u, _ := url.Parse(w.Url) + if u.Scheme == "http" { + u.Scheme = "ws" + } else if u.Scheme == "https" { + u.Scheme = "wss" + } + w.Url = u.String() + w.TlsConfig = &tls.Config{InsecureSkipVerify: true} +} + +func (w *WsClient) Dial() (ws *websocket.Conn, err error) { + config, err := websocket.NewConfig(w.Url, w.Url) + if err != nil { + return nil, err + } + if w.Protocol != "" { + config.Protocol = []string{w.Protocol} + } + config.TlsConfig = w.TlsConfig + if w.Auth.Type == "basic" { + config.Header = http.Header{ + "Authorization": {BasicAuth(w.Auth.Username, w.Auth.Password)}, + } + } + return websocket.DialConfig(config) +} diff --git a/pkg/libol/xdp.go b/pkg/libol/xdp.go new file mode 100755 index 0000000..7bdd1f6 --- /dev/null +++ b/pkg/libol/xdp.go @@ -0,0 +1,216 @@ +package libol + +import ( + "net" + "sync" + "time" +) + +type XDP struct { + lock sync.RWMutex + bufSize int + connection *net.UDPConn + address *net.UDPAddr + sessions *SafeStrMap + accept chan *XDPConn +} + +func XDPListen(addr string, clients, bufSize int) (net.Listener, error) { + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, err + } + if bufSize == 0 { + bufSize = MaxBuf + } + Debug("bufSize: %d", bufSize) + x := &XDP{ + address: udpAddr, + sessions: NewSafeStrMap(clients), + accept: make(chan *XDPConn, 2), + bufSize: bufSize, + } + conn, err := net.ListenUDP("udp", udpAddr) + if err != nil { + return nil, err + } + x.connection = conn + Go(x.Loop) + return x, nil +} + +func (x *XDP) Recv(udpAddr *net.UDPAddr, data []byte) error { + // dispatch to XDPConn and new accept + addr := udpAddr.String() + if obj, ok := x.sessions.GetEx(addr); ok { + conn := obj.(*XDPConn) + conn.toQueue(data) + return nil + } + conn := &XDPConn{ + connection: x.connection, + remoteAddr: udpAddr, + localAddr: x.address, + readQueue: make(chan []byte, 1024), + closed: false, + onClose: func(conn *XDPConn) { + Info("XDP.Recv: onClose %s", conn) + x.sessions.Del(addr) + }, + } + if err := x.sessions.Set(addr, conn); err != nil { + return NewErr("session.Set: %s", err) + } + x.accept <- conn + conn.toQueue(data) + return nil +} + +// Loop forever +func (x *XDP) Loop() { + for { + data := make([]byte, x.bufSize) + n, udpAddr, err := x.connection.ReadFromUDP(data) + if err != nil { + Error("XDP.Loop %s", err) + break + } + if err := x.Recv(udpAddr, data[:n]); err != nil { + Warn("XDP.Loop: %s", err) + } + } +} + +// Accept waits for and returns the next connection to the listener. +func (x *XDP) Accept() (net.Conn, error) { + return <-x.accept, nil +} + +// Close closes the listener. +// Any blocked Accept operations will be unblocked and return errors. +func (x *XDP) Close() error { + x.lock.Lock() + defer x.lock.Unlock() + + _ = x.connection.Close() + return nil +} + +// returns the listener's network address. +func (x *XDP) Addr() net.Addr { + return x.address +} + +type XDPConn struct { + lock sync.RWMutex + connection *net.UDPConn + remoteAddr *net.UDPAddr + localAddr *net.UDPAddr + readQueue chan []byte + closed bool + readDead time.Time + writeDead time.Time + onClose func(conn *XDPConn) +} + +func (c *XDPConn) toQueue(b []byte) { + c.lock.RLock() + if c.closed { + c.lock.RUnlock() + return + } else { + c.lock.RUnlock() + } + c.readQueue <- b +} + +func (c *XDPConn) Read(b []byte) (n int, err error) { + c.lock.RLock() + if c.closed { + c.lock.RUnlock() + return 0, NewErr("read on closed") + } + var timeout *time.Timer + outChan := make(<-chan time.Time) + if !c.readDead.IsZero() { + if time.Now().After(c.readDead) { + c.lock.RUnlock() + return 0, NewErr("read timeout") + } + delay := c.readDead.Sub(time.Now()) + timeout = time.NewTimer(delay) + outChan = timeout.C + } + c.lock.RUnlock() + + // wait for read event or timeout or error + select { + case <-outChan: + return 0, NewErr("read timeout") + case d := <-c.readQueue: + if timeout != nil { + timeout.Stop() + } + return copy(b, d), nil + } +} + +func (c *XDPConn) Write(b []byte) (n int, err error) { + c.lock.RLock() + if c.closed { + c.lock.RUnlock() + return 0, NewErr("write to closed") + } else { + c.lock.RUnlock() + } + return c.connection.WriteToUDP(b, c.remoteAddr) +} + +func (c *XDPConn) Close() error { + c.lock.Lock() + defer c.lock.Unlock() + if c.closed { + return nil + } + if c.onClose != nil { + c.onClose(c) + } + c.connection = nil + c.closed = true + + return nil +} + +func (c *XDPConn) LocalAddr() net.Addr { + return c.localAddr +} + +func (c *XDPConn) RemoteAddr() net.Addr { + return c.remoteAddr +} + +func (c *XDPConn) SetDeadline(t time.Time) error { + c.lock.Lock() + defer c.lock.Unlock() + c.readDead = t + c.writeDead = t + return nil +} + +func (c *XDPConn) SetReadDeadline(t time.Time) error { + c.lock.Lock() + defer c.lock.Unlock() + c.readDead = t + return nil +} + +func (c *XDPConn) SetWriteDeadline(t time.Time) error { + c.lock.Lock() + defer c.lock.Unlock() + c.writeDead = t + return nil +} + +func (c *XDPConn) String() string { + return c.remoteAddr.String() +} diff --git a/pkg/models/esp.go b/pkg/models/esp.go new file mode 100755 index 0000000..c59f020 --- /dev/null +++ b/pkg/models/esp.go @@ -0,0 +1,108 @@ +// +build linux + +package models + +import ( + "fmt" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/schema" + nl "github.com/vishvananda/netlink" + "time" +) + +type Esp struct { + Name string + Address string + NewTime int64 +} + +func (l *Esp) Update() { +} + +func (l *Esp) ID() string { + return l.Name +} + +func NewEspSchema(e *Esp) schema.Esp { + e.Update() + se := schema.Esp{ + Name: e.Name, + Address: e.Address, + } + return se +} + +type EspState struct { + *schema.EspState + NewTime int64 + In *nl.XfrmState + Out *nl.XfrmState +} + +func (l *EspState) Update() { + used := int64(0) + if xss, err := nl.XfrmStateGet(l.In); xss != nil { + l.TxBytes = int64(xss.Statistics.Bytes) + l.TxPackages = int64(xss.Statistics.Packets) + used = int64(xss.Statistics.UseTime) + } else { + libol.Debug("EspState.Update %s", err) + } + if xss, err := nl.XfrmStateGet(l.Out); xss != nil { + l.RxBytes = int64(xss.Statistics.Bytes) + l.RxPackages = int64(xss.Statistics.Packets) + } else { + libol.Debug("EspState.Update %s", err) + } + if used > 0 { + l.AliveTime = time.Now().Unix() - used + } +} + +func (l *EspState) ID() string { + return fmt.Sprintf("%d-%s-%s", l.Spi, l.Local, l.Remote) +} + +func (l *EspState) UpTime() int64 { + return time.Now().Unix() - l.NewTime +} + +func NewEspStateSchema(e *EspState) schema.EspState { + e.Update() + se := schema.EspState{ + Name: e.Name, + Spi: e.Spi, + Local: e.Local, + Remote: e.Remote, + TxBytes: e.TxBytes, + TxPackages: e.TxPackages, + RxBytes: e.RxBytes, + RxPackages: e.RxPackages, + AliveTime: e.AliveTime, + } + return se +} + +type EspPolicy struct { + *schema.EspPolicy + In *nl.XfrmPolicy + Fwd *nl.XfrmPolicy + Out *nl.XfrmPolicy +} + +func (l *EspPolicy) Update() { +} + +func (l *EspPolicy) ID() string { + return fmt.Sprintf("%d-%s-%s", l.Spi, l.Source, l.Dest) +} + +func NewEspPolicySchema(e *EspPolicy) schema.EspPolicy { + e.Update() + se := schema.EspPolicy{ + Name: e.Name, + Source: e.Source, + Dest: e.Dest, + } + return se +} diff --git a/pkg/models/line.go b/pkg/models/line.go new file mode 100755 index 0000000..efc498a --- /dev/null +++ b/pkg/models/line.go @@ -0,0 +1,45 @@ +package models + +import ( + "net" + "strconv" + "time" +) + +type Line struct { + EthType uint16 + IpSource net.IP + IpDest net.IP + IpProtocol uint8 + PortDest uint16 + PortSource uint16 + NewTime int64 + HitTime int64 +} + +func NewLine(t uint16) *Line { + l := &Line{ + EthType: t, + NewTime: time.Now().Unix(), + HitTime: time.Now().Unix(), + } + return l +} + +func (l *Line) String() string { + str := strconv.FormatUint(uint64(l.EthType), 10) + str += ":" + l.IpSource.String() + str += ":" + l.IpDest.String() + str += ":" + strconv.FormatUint(uint64(l.IpProtocol), 10) + str += ":" + strconv.FormatUint(uint64(l.PortSource), 10) + str += ":" + strconv.FormatUint(uint64(l.PortDest), 10) + return str +} + +func (l *Line) UpTime() int64 { + return time.Now().Unix() - l.NewTime +} + +func (l *Line) LastTime() int64 { + return time.Now().Unix() - l.HitTime +} diff --git a/pkg/models/link.go b/pkg/models/link.go new file mode 100755 index 0000000..eeda874 --- /dev/null +++ b/pkg/models/link.go @@ -0,0 +1,23 @@ +package models + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/schema" +) + +type Link struct { + User string + Network string + Protocol string + StatusFile string +} + +func (l *Link) reload() *schema.Point { + status := &schema.Point{} + _ = libol.UnmarshalLoad(status, l.StatusFile) + return status +} + +func (l *Link) Status() *schema.Point { + return l.reload() +} diff --git a/pkg/models/neigbor.go b/pkg/models/neigbor.go new file mode 100755 index 0000000..3c7c656 --- /dev/null +++ b/pkg/models/neigbor.go @@ -0,0 +1,55 @@ +package models + +import ( + "github.com/luscis/openlan/pkg/libol" + "net" + "time" +) + +type Neighbor struct { + Network string `json:"network"` + Device string `json:"device"` + Client string `json:"client"` + HwAddr net.HardwareAddr `json:"hwAddr"` + IpAddr net.IP `json:"ipAddr"` + NewTime int64 `json:"newTime"` + HitTime int64 `json:"hitTime"` +} + +func (e *Neighbor) String() string { + str := e.HwAddr.String() + str += ":" + e.IpAddr.String() + str += ":" + e.Client + return str +} + +func NewNeighbor(hwAddr net.HardwareAddr, ipAddr net.IP, client libol.SocketClient) (e *Neighbor) { + e = &Neighbor{ + HwAddr: hwAddr, + IpAddr: ipAddr, + Client: client.String(), + NewTime: time.Now().Unix(), + HitTime: time.Now().Unix(), + } + e.Update(client) + return +} + +func (e *Neighbor) UpTime() int64 { + return time.Now().Unix() - e.HitTime +} + +func (e *Neighbor) Update(client libol.SocketClient) { + if client == nil { + return + } + private := client.Private() + if private == nil { + return + } + if point, ok := private.(*Point); ok { + e.Network = point.Network + e.Device = point.IfName + e.Client = client.String() + } +} diff --git a/pkg/models/network.go b/pkg/models/network.go new file mode 100755 index 0000000..cd8d514 --- /dev/null +++ b/pkg/models/network.go @@ -0,0 +1,102 @@ +package models + +import ( + "fmt" + "net" + "sort" + "strings" +) + +type Route struct { + Prefix string `json:"prefix"` + NextHop string `json:"nexthop"` + Metric int `json:"metric"` + Mode string `json:"mode"` +} + +func NewRoute(prefix string, nexthop, mode string) (this *Route) { + this = &Route{ + Prefix: prefix, + NextHop: nexthop, + Metric: 250, + Mode: mode, + } + return +} + +func (u *Route) String() string { + return fmt.Sprintf("%s, %s", u.Prefix, u.NextHop) +} + +func (u *Route) SetMetric(value int) { + u.Metric = value +} + +type Network struct { + Name string `json:"name"` + Tenant string `json:"tenant,omitempty"` + IfAddr string `json:"ifAddr"` + IpStart string `json:"ipStart"` + IpEnd string `json:"ipEnd"` + Netmask string `json:"netmask"` + Routes []*Route `json:"routes"` +} + +func NewNetwork(name string, ifAddr string) (this *Network) { + address := ifAddr + netmask := "255.255.255.255" + s := strings.SplitN(ifAddr, "/", 2) + if len(s) == 2 { + address = s[0] + _, n, err := net.ParseCIDR(ifAddr) + if err == nil { + netmask = net.IP(n.Mask).String() + } else { + netmask = s[1] + } + } + this = &Network{ + Name: name, + IfAddr: address, + Netmask: netmask, + } + return +} + +func (u *Network) String() string { + return fmt.Sprintf("%s, %s, %s, %s, %s, %s", + u.Name, u.IfAddr, u.IpStart, u.IpEnd, u.Netmask, u.Routes) +} + +func (u *Network) ParseIP(s string) { +} + +func NetworkEqual(o *Network, n *Network) bool { + if o == n { + return true + } else if o == nil || n == nil { + return false + } else if o.IfAddr != n.IfAddr || o.Netmask != n.Netmask { + return false + } else { + ors := make([]string, 0, 32) + nrs := make([]string, 0, 32) + for _, rt := range o.Routes { + ors = append(ors, rt.String()) + } + for _, rt := range n.Routes { + nrs = append(nrs, rt.String()) + } + if len(ors) != len(nrs) { + return false + } + sort.Strings(ors) + sort.Strings(nrs) + for i := range ors { + if ors[i] != nrs[i] { + return false + } + } + return true + } +} diff --git a/pkg/models/network_test.go b/pkg/models/network_test.go new file mode 100755 index 0000000..a2070f1 --- /dev/null +++ b/pkg/models/network_test.go @@ -0,0 +1,63 @@ +package models + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestNetworkEqual(t *testing.T) { + assert.Equal(t, true, NetworkEqual(nil, nil), "be the same.") + o := &Network{} + assert.Equal(t, false, NetworkEqual(o, nil), "be the same.") + n := &Network{} + assert.Equal(t, false, NetworkEqual(nil, n), "be the same.") + assert.Equal(t, true, NetworkEqual(n, n), "be the same.") + o = &Network{ + IfAddr: "192.168.1.1", + Netmask: "255.255.0.0", + Routes: []*Route{ + {Prefix: "0.0.0.0/24", NextHop: "1.1.1.1."}, + }, + } + n = &Network{ + IfAddr: "192.168.1.1", + Netmask: "255.255.0.0", + Routes: []*Route{ + {Prefix: "0.0.0.0/24", NextHop: "1.1.1.1."}, + }, + } + assert.Equal(t, true, NetworkEqual(o, n), "be the same.") + o = &Network{ + IfAddr: "192.168.1.1", + Netmask: "255.255.0.0", + Routes: []*Route{}, + } + assert.Equal(t, false, NetworkEqual(o, n), "be the same.") + assert.Equal(t, false, NetworkEqual(n, o), "be the same.") + o = &Network{ + IfAddr: "192.168.1.1", + Netmask: "255.255.0.0", + Routes: []*Route{ + {Prefix: "0.0.0.0/24", NextHop: "1.1.1.1."}, + {Prefix: "0.0.0.1/24", NextHop: "1.1.1.1."}, + }, + } + assert.Equal(t, false, NetworkEqual(o, n), "be the same.") + assert.Equal(t, false, NetworkEqual(n, o), "be the same.") + o = &Network{ + IfAddr: "192.168.1.1", + Netmask: "255.255.0.0", + Routes: []*Route{ + {Prefix: "0.0.0.0/24", NextHop: "1.1.1.1."}, + }, + } + assert.Equal(t, true, NetworkEqual(o, n), "be the same.") + assert.Equal(t, true, NetworkEqual(n, o), "be the same.") + o.IfAddr = "182.168.1.1" + assert.Equal(t, false, NetworkEqual(o, n), "be the same.") + assert.Equal(t, false, NetworkEqual(n, o), "be the same.") + o.IfAddr = "192.168.1.1" + assert.Equal(t, true, NetworkEqual(o, n), "be the same.") + o.IfAddr = "255.255.255.0" + assert.Equal(t, false, NetworkEqual(n, o), "be the same.") +} diff --git a/pkg/models/point.go b/pkg/models/point.go new file mode 100755 index 0000000..6ba41ed --- /dev/null +++ b/pkg/models/point.go @@ -0,0 +1,56 @@ +package models + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/network" +) + +type Point struct { + UUID string `json:"uuid"` + Alias string `json:"alias"` + Network string `json:"network"` + User string `json:"user"` + Protocol string `json:"protocol"` + Server string `json:"server"` + Uptime int64 `json:"uptime"` + Status string `json:"status"` + IfName string `json:"device"` + Client libol.SocketClient `json:"-"` + Device network.Taper `json:"-"` + System string `json:"system"` +} + +func NewPoint(c libol.SocketClient, d network.Taper, proto string) (w *Point) { + return &Point{ + Alias: "", + Server: c.LocalAddr(), + Client: c, + Device: d, + Protocol: proto, + } +} + +func (p *Point) Update() *Point { + client := p.Client + if client != nil { + p.Uptime = client.UpTime() + p.Status = client.Status().String() + } + device := p.Device + if device != nil { + p.IfName = device.Name() + } + return p +} + +func (p *Point) SetUser(user *User) { + p.User = user.Name + p.UUID = user.UUID + if len(p.UUID) > 13 { + // too long and using short uuid. + p.UUID = p.UUID[:13] + } + p.Network = user.Network + p.System = user.System + p.Alias = user.Alias +} diff --git a/pkg/models/schema.go b/pkg/models/schema.go new file mode 100755 index 0000000..cf4e973 --- /dev/null +++ b/pkg/models/schema.go @@ -0,0 +1,115 @@ +package models + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/schema" +) + +func NewPointSchema(p *Point) schema.Point { + client, dev := p.Client, p.Device + sts := client.Statistics() + return schema.Point{ + Uptime: p.Uptime, + UUID: p.UUID, + Alias: p.Alias, + User: p.User, + Protocol: p.Protocol, + Remote: client.String(), + Device: dev.Name(), + RxBytes: sts[libol.CsRecvOkay], + TxBytes: sts[libol.CsSendOkay], + ErrPkt: sts[libol.CsSendError], + State: client.Status().String(), + Network: p.Network, + AliveTime: client.AliveTime(), + System: p.System, + } +} + +func NewLinkSchema(l *Link) schema.Link { + sts := l.Status() + return schema.Link{ + UUID: sts.UUID, + User: sts.User, + Uptime: sts.Uptime, + Device: sts.Device, + Protocol: sts.Protocol, + Server: sts.Remote, + State: sts.State, + RxBytes: sts.RxBytes, + TxBytes: sts.TxBytes, + ErrPkt: sts.ErrPkt, + Network: sts.Network, + AliveTime: sts.AliveTime, + } +} + +func NewNeighborSchema(n *Neighbor) schema.Neighbor { + return schema.Neighbor{ + Uptime: n.UpTime(), + HwAddr: n.HwAddr.String(), + IpAddr: n.IpAddr.String(), + Client: n.Client, + Network: n.Network, + Device: n.Device, + } +} + +func NewOnLineSchema(l *Line) schema.OnLine { + return schema.OnLine{ + HitTime: l.LastTime(), + UpTime: l.UpTime(), + EthType: l.EthType, + IpSource: l.IpSource.String(), + IpDest: l.IpDest.String(), + IpProto: libol.IpProto2Str(l.IpProtocol), + PortSource: l.PortSource, + PortDest: l.PortDest, + } +} + +func NewUserSchema(u *User) schema.User { + return schema.User{ + Name: u.Name, + Password: u.Password, + Alias: u.Alias, + Network: u.Network, + Role: u.Role, + Lease: u.Lease.Format(libol.LeaseTime), + } +} + +func SchemaToUserModel(user *schema.User) *User { + lease, _ := libol.GetLocalTime(libol.LeaseTime, user.Lease) + obj := &User{ + Alias: user.Alias, + Password: user.Password, + Name: user.Name, + Network: user.Network, + Role: user.Role, + Lease: lease, + } + obj.Update() + return obj +} + +func NewNetworkSchema(n *Network) schema.Network { + sn := schema.Network{ + Name: n.Name, + IfAddr: n.IfAddr, + IpStart: n.IpStart, + IpEnd: n.IpEnd, + Netmask: n.Netmask, + Routes: make([]schema.PrefixRoute, 0, 32), + } + for _, route := range n.Routes { + sn.Routes = append(sn.Routes, + schema.PrefixRoute{ + NextHop: route.NextHop, + Prefix: route.Prefix, + Metric: route.Metric, + Mode: route.Mode, + }) + } + return sn +} diff --git a/pkg/models/user.go b/pkg/models/user.go new file mode 100755 index 0000000..01e8c06 --- /dev/null +++ b/pkg/models/user.go @@ -0,0 +1,60 @@ +package models + +import ( + "fmt" + "github.com/luscis/openlan/pkg/libol" + "runtime" + "strings" + "time" +) + +type User struct { + Alias string `json:"alias"` + Name string `json:"name"` + Network string `json:"network"` + Password string `json:"password"` + UUID string `json:"uuid"` + System string `json:"system"` + Role string `json:"type"` // admin , guest or ldap + Last libol.SocketClient `json:"last"` // lastly accessed by this. + Lease time.Time `json:"leastTime"` + UpdateAt int64 +} + +func NewUser(name, network, password string) *User { + return &User{ + Name: name, + Password: password, + Network: network, + System: runtime.GOOS, + Role: "guest", + } +} + +func (u *User) String() string { + return fmt.Sprintf("%s, %s, %s", u.Id(), u.Password, u.Role) +} + +func (u *User) Update() { + // to support lower version + if u.Network == "" { + if strings.Contains(u.Name, "@") { + u.Network = strings.SplitN(u.Name, "@", 2)[1] + } + } + if u.Network == "" { + u.Network = "default" + } + if strings.Contains(u.Name, "@") { + u.Name = strings.SplitN(u.Name, "@", 2)[0] + } + u.Alias = strings.ToLower(u.Alias) + if u.UUID == "" { + u.UUID = u.Alias + } + u.UpdateAt = time.Now().Unix() +} + +func (u *User) Id() string { + return u.Name + "@" + u.Network +} diff --git a/pkg/network/brctl_linux.go b/pkg/network/brctl_linux.go new file mode 100755 index 0000000..f08ed66 --- /dev/null +++ b/pkg/network/brctl_linux.go @@ -0,0 +1,142 @@ +package network + +import ( + "fmt" + "github.com/luscis/openlan/pkg/libol" + "github.com/vishvananda/netlink" + "os" + "strconv" +) + +type BrCtl struct { + Name string + Path string + Mtu int +} + +func NewBrCtl(name string, mtu int) (b *BrCtl) { + if mtu == 0 { + mtu = 1500 + } + return &BrCtl{ + Name: name, + Mtu: mtu, + } +} + +func (b *BrCtl) Has() bool { + if _, err := netlink.LinkByName(b.Name); err == nil { + return true + } + return false +} + +func (b *BrCtl) SysPath(fun string) string { + if b.Path == "" { + b.Path = fmt.Sprintf("/sys/devices/virtual/net/%s/bridge", b.Name) + } + return fmt.Sprintf("%s/%s", b.Path, fun) +} + +func (b *BrCtl) Stp(on bool) error { + file := b.SysPath("stp_state") + fp, err := os.OpenFile(file, os.O_RDWR, 0600) + if err != nil { + return err + } + defer fp.Close() + if on { + if _, err := fp.Write([]byte("1")); err != nil { + return err + } + } else { + if _, err := fp.Write([]byte("0")); err != nil { + return err + } + } + return nil +} + +func (b *BrCtl) Delay(delay int) error { // by second + file := b.SysPath("forward_delay") + fp, err := os.OpenFile(file, os.O_RDWR, 0600) + if err != nil { + return err + } + defer fp.Close() + if _, err := fp.Write([]byte(strconv.Itoa(delay * 100))); err != nil { + return err + } + return nil +} + +func (b *BrCtl) AddPort(port string) error { + link, err := netlink.LinkByName(port) + if err != nil { + return libol.NewErr("LinkByName " + err.Error()) + } + if err := netlink.LinkSetUp(link); err != nil { + return libol.NewErr("LinkSetUp " + err.Error()) + } + la := netlink.LinkAttrs{TxQLen: -1, Name: b.Name} + bridge := &netlink.Bridge{LinkAttrs: la} + if err := netlink.LinkSetMaster(link, bridge); err != nil { + return libol.NewErr("LinkSetMaster " + err.Error()) + } + return nil +} + +func (b *BrCtl) DelPort(port string) error { + link, err := netlink.LinkByName(port) + if err != nil { + return err + } + if err := netlink.LinkSetNoMaster(link); err != nil { + return err + } + return nil +} + +func (b *BrCtl) CallIptables(value int) error { + file := b.SysPath("nf_call_iptables") + fp, err := os.OpenFile(file, os.O_RDWR, 0600) + if err != nil { + return err + } + defer fp.Close() + if _, err := fp.Write([]byte(strconv.Itoa(value))); err != nil { + return err + } + return nil +} + +type BrPort struct { + Name string + Path string +} + +func NewBrPort(name string) (p *BrPort) { + return &BrPort{ + Name: name, + } +} + +func (p *BrPort) SysPath(fun string) string { + if p.Path == "" { + p.Path = fmt.Sprintf("/sys/devices/virtual/net/%s/brport/", p.Name) + } + return fmt.Sprintf("%s/%s", p.Path, fun) +} + +func (p *BrPort) Cost(cost int) error { + file := p.SysPath("path_cost") + fp, err := os.OpenFile(file, os.O_RDWR, 0600) + if err != nil { + return err + } + defer fp.Close() + if _, err := fp.Write([]byte(strconv.Itoa(cost))); err != nil { + return err + } + return nil +} diff --git a/pkg/network/bridge_linux.go b/pkg/network/bridge_linux.go new file mode 100755 index 0000000..5ca622a --- /dev/null +++ b/pkg/network/bridge_linux.go @@ -0,0 +1,151 @@ +package network + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/vishvananda/netlink" +) + +type LinuxBridge struct { + sts DeviceStats + address *netlink.Addr + ipMtu int + name string + device netlink.Link + ctl *BrCtl + out *libol.SubLogger +} + +func NewLinuxBridge(name string, mtu int) *LinuxBridge { + if mtu == 0 { + mtu = 1500 + } + b := &LinuxBridge{ + name: name, + ipMtu: mtu, + ctl: NewBrCtl(name, mtu), + out: libol.NewSubLogger(name), + } + Bridges.Add(b) + return b +} + +func (b *LinuxBridge) Kernel() string { + return b.name +} + +func (b *LinuxBridge) Open(addr string) { + b.out.Debug("LinuxBridge.Open") + link, _ := netlink.LinkByName(b.name) + if link == nil { + br := &netlink.Bridge{ + LinkAttrs: netlink.LinkAttrs{ + TxQLen: -1, + Name: b.name, + MTU: b.ipMtu, + }, + } + err := netlink.LinkAdd(br) + if err != nil { + b.out.Error("LinuxBridge.Open: %s", err) + return + } + link, err = netlink.LinkByName(b.name) + if link == nil { + b.out.Error("LinuxBridge.Open: %s", err) + return + } + } + if err := netlink.LinkSetUp(link); err != nil { + libol.Error("LinuxBridge.Open: %s", err) + } + b.out.Info("LinuxBridge.Open success") + if addr != "" { + ipAddr, err := netlink.ParseAddr(addr) + if err != nil { + b.out.Error("LinuxBridge.Open: ParseCIDR %s", err) + } + if err := netlink.AddrAdd(link, ipAddr); err != nil { + b.out.Error("LinuxBridge.Open: SetLinkIp: %s", err) + } + b.address = ipAddr + } + b.device = link +} + +func (b *LinuxBridge) Close() error { + var err error + if b.device != nil && b.address != nil { + if err = netlink.AddrDel(b.device, b.address); err != nil { + b.out.Error("LinuxBridge.Close: UnsetLinkIp %s", err) + } + } + return err +} + +func (b *LinuxBridge) AddSlave(name string) error { + if err := b.ctl.AddPort(name); err != nil { + b.out.Error("LinuxBridge.AddSlave: %s", name) + return err + } + b.out.Info("LinuxBridge.AddSlave: %s", name) + return nil +} + +func (b *LinuxBridge) DelSlave(name string) error { + if err := b.ctl.DelPort(name); err != nil { + b.out.Error("LinuxBridge.DelSlave: %s", name) + return err + } + b.out.Info("LinuxBridge.DelSlave: %s", name) + return nil +} + +func (b *LinuxBridge) ListSlave() <-chan Taper { + data := make(chan Taper, 32) + go func() { + data <- nil + }() + b.out.Warn("LinuxBridge.ListSlave: notSupport") + return data +} + +func (b *LinuxBridge) Type() string { + return ProviderLin +} + +func (b *LinuxBridge) String() string { + return b.name +} + +func (b *LinuxBridge) Name() string { + return b.name +} + +func (b *LinuxBridge) Mtu() int { + return b.ipMtu +} + +func (b *LinuxBridge) Stp(enable bool) error { + return b.ctl.Stp(enable) +} + +func (b *LinuxBridge) Delay(value int) error { + return b.ctl.Delay(value) +} + +func (b *LinuxBridge) ListMac() <-chan *MacFdb { + data := make(chan *MacFdb, 32) + go func() { + data <- nil + }() + b.out.Warn("LinuxBridge.ListMac: notSupport") + return data +} + +func (b *LinuxBridge) Stats() DeviceStats { + return b.sts +} + +func (b *LinuxBridge) CallIptables(value int) error { + return b.ctl.CallIptables(value) +} diff --git a/pkg/network/bridge_virtual.go b/pkg/network/bridge_virtual.go new file mode 100755 index 0000000..33c9b39 --- /dev/null +++ b/pkg/network/bridge_virtual.go @@ -0,0 +1,334 @@ +package network + +import ( + "errors" + "github.com/luscis/openlan/pkg/libol" + "net" + "sync" + "time" +) + +type VirtualBridge struct { + ipMtu int + name string + lock sync.RWMutex + ports map[string]Taper + macs map[string]*MacFdb + done chan bool + ticker *time.Ticker + timeout int + address string + kernel Taper + out *libol.SubLogger + sts DeviceStats +} + +func NewVirtualBridge(name string, mtu int) *VirtualBridge { + b := &VirtualBridge{ + name: name, + ipMtu: mtu, + ports: make(map[string]Taper, 1024), + macs: make(map[string]*MacFdb, 1024), + done: make(chan bool), + ticker: time.NewTicker(5 * time.Second), + timeout: 5 * 60, + out: libol.NewSubLogger(name), + } + Bridges.Add(b) + return b +} + +func (b *VirtualBridge) Open(addr string) { + tapCfg := TapConfig{ + Type: TAP, + Mtu: b.ipMtu, + } + b.out.Info("VirtualBridge.Open %s", addr) + libol.Go(b.Start) + if tap, err := NewKernelTap("", tapCfg); err != nil { + b.out.Error("VirtualBridge.Open new kernel %s", err) + } else { + out, err := libol.IpLinkUp(tap.Name()) + if err != nil { + b.out.Error("VirtualBridge.Open IpAddr %s:%s", err, out) + } + b.kernel = tap + b.out.Info("VirtualBridge.Open %s", tap.Name()) + _ = b.AddSlave(tap.name) + } + if addr != "" && b.kernel != nil { + b.address = addr + if out, err := libol.IpAddrAdd(b.kernel.Name(), b.address); err != nil { + b.out.Error("VirtualBridge.Open IpAddr %s:%s", err, out) + } + } +} + +func (b *VirtualBridge) Kernel() string { + if b.kernel == nil { + return "" + } + return b.kernel.Name() +} + +func (b *VirtualBridge) Close() error { + if b.kernel != nil { + if b.address != "" { + out, err := libol.IpAddrDel(b.kernel.Name(), b.address) + if err != nil { + b.out.Error("VirtualBridge.Close: IpAddr %s:%s", err, out) + } + } + _ = b.kernel.Close() + } + b.ticker.Stop() + b.done <- true + return nil +} + +func (b *VirtualBridge) AddSlave(name string) error { + tap := Taps.Get(name) + if tap == nil { + return libol.NewErr("%s notFound", name) + } + _ = tap.SetMaster(b) + b.lock.Lock() + b.ports[name] = tap + b.lock.Unlock() + b.out.Info("VirtualBridge.AddSlave: %s", name) + libol.Go(func() { + for { + data := make([]byte, b.ipMtu) + n, err := tap.Recv(data) + if err != nil || n == 0 { + break + } + if libol.HasLog(libol.DEBUG) { + libol.Debug("VirtualBridge.KernelTap: %s % x", tap.Name(), data[:20]) + } + m := &Framer{Data: data[:n], Source: tap} + _ = b.Input(m) + } + }) + return nil +} + +func (b *VirtualBridge) DelSlave(name string) error { + b.lock.Lock() + defer b.lock.Unlock() + if _, ok := b.ports[name]; ok { + delete(b.ports, name) + } + b.out.Info("VirtualBridge.DelSlave: %s", name) + return nil +} + +func (b *VirtualBridge) ListSlave() <-chan Taper { + data := make(chan Taper, 32) + go func() { + b.lock.RLock() + defer b.lock.RUnlock() + for _, obj := range b.ports { + data <- obj + } + data <- nil + }() + return data +} + +func (b *VirtualBridge) Type() string { + return ProviderVir +} + +func (b *VirtualBridge) String() string { + return b.name +} + +func (b *VirtualBridge) Name() string { + return b.name +} + +func (b *VirtualBridge) Forward(m *Framer) error { + if err := b.UniCast(m); err != nil { + _ = b.Flood(m) + } + return nil +} + +func (b *VirtualBridge) Expire() error { + deletes := make([]string, 0, 1024) + //collect need deleted. + b.lock.RLock() + for index, learn := range b.macs { + now := time.Now().Unix() + if now-learn.Uptime > int64(b.timeout) { + deletes = append(deletes, index) + } + } + b.lock.RUnlock() + b.out.Debug("VirtualBridge.Expire delete %d", len(deletes)) + //execute delete. + b.lock.Lock() + for _, d := range deletes { + if _, ok := b.macs[d]; ok { + delete(b.macs, d) + b.out.Event("VirtualBridge.Expire: delete %s", d) + } + } + b.lock.Unlock() + return nil +} + +func (b *VirtualBridge) Start() { + libol.Go(func() { + for { + select { + case <-b.done: + return + case t := <-b.ticker.C: + b.out.Log("VirtualBridge.Start: Tick at %s", t) + _ = b.Expire() + } + } + }) +} + +func (b *VirtualBridge) Input(m *Framer) error { + b.sts.Recv++ + b.Learn(m) + return b.Forward(m) +} + +func (b *VirtualBridge) Eth2Str(addr []byte) string { + if len(addr) < 6 { + return "" + } + return net.HardwareAddr(addr).String() +} + +func (b *VirtualBridge) Learn(m *Framer) { + mac := m.Data[6:12] + if mac[0]&0x01 == 0x01 { + return + } + key := b.Eth2Str(mac) + if l := b.GetMac(key); l != nil { + b.UpdateMac(key, m.Source) + return + } + learn := &MacFdb{ + Device: m.Source, + Uptime: time.Now().Unix(), + NewTime: time.Now().Unix(), + Address: make([]byte, 6), + } + copy(learn.Address, mac) + b.out.Event("VirtualBridge.Learn: %s on %s", key, m.Source) + b.AddMac(key, learn) +} + +func (b *VirtualBridge) GetMac(mac string) *MacFdb { + b.lock.RLock() + defer b.lock.RUnlock() + if l, ok := b.macs[mac]; ok { + return l + } + return nil +} + +func (b *VirtualBridge) AddMac(mac string, fdb *MacFdb) { + b.lock.Lock() + defer b.lock.Unlock() + b.macs[mac] = fdb +} + +func (b *VirtualBridge) UpdateMac(mac string, device Taper) { + b.lock.RLock() + defer b.lock.RUnlock() + if fdb, ok := b.macs[mac]; ok { + fdb.Uptime = time.Now().Unix() + fdb.Device = device + } +} + +func (b *VirtualBridge) ListMac() <-chan *MacFdb { + data := make(chan *MacFdb, 32) + go func() { + b.lock.RLock() + defer b.lock.RUnlock() + for _, obj := range b.macs { + data <- obj + } + data <- nil + }() + return data +} + +func (b *VirtualBridge) Flood(m *Framer) error { + data := m.Data + from := m.Source + if b.out.Has(libol.FLOW) { + b.out.Flow("VirtualBridge.Flood: % x", data[:20]) + } + outs := make([]Taper, 0, 32) + b.lock.RLock() + for _, port := range b.ports { + if from != port { + outs = append(outs, port) + } + } + b.lock.RUnlock() + for _, port := range outs { + if b.out.Has(libol.FLOW) { + b.out.Flow("VirtualBridge.Flood: %s % x", port, data[:20]) + } + b.sts.Send++ + if _, err := port.Send(data); err != nil { + b.out.Error("VirtualBridge.Flood: %s %s", port, err) + } + } + return nil +} + +func (b *VirtualBridge) UniCast(m *Framer) error { + data := m.Data + from := m.Source + dest := b.Eth2Str(data[:6]) + learn := b.GetMac(dest) + if learn == nil { + return errors.New(dest + " notFound") + } + out := learn.Device + if out != from && out.Has(UsUp) { // out should running + b.sts.Send++ + if _, err := out.Send(data); err != nil { + b.out.Warn("VirtualBridge.UniCast: %s %s", out, err) + } + } else { + b.sts.Drop++ + } + if b.out.Has(libol.FLOW) { + b.out.Flow("VirtualBridge.UniCast: %s to %s % x", from, out, data[:20]) + } + return nil +} + +func (b *VirtualBridge) Mtu() int { + return b.ipMtu +} + +func (b *VirtualBridge) Stp(enable bool) error { + return libol.NewErr("operation notSupport") +} + +func (b *VirtualBridge) Delay(value int) error { + return libol.NewErr("operation notSupport") +} + +func (b *VirtualBridge) Stats() DeviceStats { + return b.sts +} + +func (b *VirtualBridge) CallIptables(value int) error { + return libol.NewErr("operation notSupport") +} diff --git a/pkg/network/bridger.go b/pkg/network/bridger.go new file mode 100755 index 0000000..6332186 --- /dev/null +++ b/pkg/network/bridger.go @@ -0,0 +1,89 @@ +package network + +import ( + "sync" +) + +const ( + ProviderVir = "virtual" + ProviderKer = "kernel" + ProviderLin = "linux" +) + +type MacFdb struct { + Address []byte + Device Taper + Uptime int64 + NewTime int64 +} + +type Bridger interface { + Type() string + Name() string + Open(addr string) + Close() error + AddSlave(name string) error + DelSlave(name string) error + ListSlave() <-chan Taper + Mtu() int + Stp(enable bool) error + Delay(value int) error + Kernel() string // name in kernel. + ListMac() <-chan *MacFdb + String() string + Stats() DeviceStats + CallIptables(value int) error +} + +type bridger struct { + lock sync.RWMutex + index int + devices map[string]Bridger +} + +func (t *bridger) Add(br Bridger) { + t.lock.Lock() + defer t.lock.Unlock() + if t.devices == nil { + t.devices = make(map[string]Bridger, 1024) + } + t.devices[br.Name()] = br +} + +func (t *bridger) Get(name string) Bridger { + t.lock.RLock() + defer t.lock.RUnlock() + if t.devices == nil { + return nil + } + if t, ok := t.devices[name]; ok { + return t + } + return nil +} + +func (t *bridger) Del(name string) { + t.lock.Lock() + defer t.lock.Unlock() + if t.devices == nil { + return + } + if _, ok := t.devices[name]; ok { + delete(t.devices, name) + } +} + +func (t *bridger) List() <-chan Bridger { + data := make(chan Bridger, 32) + go func() { + t.lock.RLock() + defer t.lock.RUnlock() + for _, obj := range t.devices { + data <- obj + } + data <- nil + }() + return data +} + +var Bridges = &bridger{} diff --git a/pkg/network/bridger_linux.go b/pkg/network/bridger_linux.go new file mode 100755 index 0000000..2c727a4 --- /dev/null +++ b/pkg/network/bridger_linux.go @@ -0,0 +1,8 @@ +package network + +func NewBridger(provider, name string, ifMtu int) Bridger { + if provider == ProviderVir { + return NewVirtualBridge(name, ifMtu) + } + return NewLinuxBridge(name, ifMtu) +} diff --git a/pkg/network/bridger_others.go b/pkg/network/bridger_others.go new file mode 100755 index 0000000..d6e599b --- /dev/null +++ b/pkg/network/bridger_others.go @@ -0,0 +1,8 @@ +// +build !linux + +package network + +func NewBridger(provider, name string, ifMtu int) Bridger { + // others platform not support linux bridge. + return NewVirtualBridge(name, ifMtu) +} diff --git a/pkg/network/bridger_test.go b/pkg/network/bridger_test.go new file mode 100755 index 0000000..7210348 --- /dev/null +++ b/pkg/network/bridger_test.go @@ -0,0 +1,64 @@ +package network + +import ( + "sync" + "testing" +) + +func TestBridgeWriteAndReadByTap(t *testing.T) { + var wg sync.WaitGroup + + //open bridge. + br := NewBridger("linux", "br-test", 1500) + br.Open("") + + //open tap kernel + dev01, err := NewKernelTap("true", TapConfig{Type: TAP}) + if err != nil { + t.Errorf("Tap.Open %s", err) + return + } + dev02, err := NewKernelTap("true", TapConfig{Type: TAP}) + if err != nil { + t.Errorf("Tap.Open %s", err) + return + } + _ = br.AddSlave(dev01.name) + _ = br.AddSlave(dev02.name) + + wg.Add(1) + go func() { + //t.Logf("Tap.write: %s\n", dev01.Name()) + + frame := make([]byte, 65) + for i := 0; i < 64; i++ { + frame[i] = uint8(i) + } + //t.Logf("Tap.write: %x", frame) + n, err := dev01.Write(frame) + if err != nil { + t.Errorf("Tap.write: %s", err) + } + if n != len(frame) { + t.Errorf("Tap.write: %d", n) + } + wg.Done() + }() + + wg.Add(1) + go func() { + frame := make([]byte, 65) + t.Logf("Tap.read: %s\n", dev02.Name()) + + n, err := dev02.Read(frame) + if err != nil { + t.Errorf("Tap.read: %s", err) + } + if n != len(frame) { + t.Errorf("Tap.read: %d", n) + } + wg.Done() + }() + + wg.Wait() +} diff --git a/pkg/network/firewall.go b/pkg/network/firewall.go new file mode 100755 index 0000000..87a7499 --- /dev/null +++ b/pkg/network/firewall.go @@ -0,0 +1,180 @@ +package network + +import ( + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/moby/libnetwork/iptables" + "sync" +) + +const ( + OLCInput = "OPENLAN-IN" + OLCForward = "OPENLAN-FWD" + OLCOutput = "OPENLAN-OUT" + OLCPre = "OPENLAN-PRE" + OLCPost = "OPENLAN-POST" +) + +type FireWall struct { + lock sync.Mutex + chains IpChains + rules IpRules +} + +func NewFireWall(flows []config.FlowRule) *FireWall { + f := &FireWall{ + chains: make(IpChains, 0, 8), + rules: make(IpRules, 0, 32), + } + // Load custom rules. + for _, rule := range flows { + f.rules = f.rules.Add(IpRule{ + Table: rule.Table, + Chain: rule.Chain, + Source: rule.Source, + Dest: rule.Dest, + Jump: rule.Jump, + ToSource: rule.ToSource, + ToDest: rule.ToDest, + Comment: rule.Comment, + Proto: rule.Proto, + Match: rule.Match, + DstPort: rule.DstPort, + SrcPort: rule.SrcPort, + Input: rule.Input, + Output: rule.Output, + }) + } + return f +} + +func (f *FireWall) addOLC() { + f.AddChain(IpChain{Table: TFilter, Name: OLCInput}) + f.AddChain(IpChain{Table: TFilter, Name: OLCForward}) + f.AddChain(IpChain{Table: TFilter, Name: OLCOutput}) + f.AddChain(IpChain{Table: TNat, Name: OLCPre}) + f.AddChain(IpChain{Table: TNat, Name: OLCInput}) + f.AddChain(IpChain{Table: TNat, Name: OLCPost}) + f.AddChain(IpChain{Table: TNat, Name: OLCOutput}) + f.AddChain(IpChain{Table: TMangle, Name: OLCPre}) + f.AddChain(IpChain{Table: TMangle, Name: OLCInput}) + f.AddChain(IpChain{Table: TMangle, Name: OLCForward}) + f.AddChain(IpChain{Table: TMangle, Name: OLCPost}) + f.AddChain(IpChain{Table: TMangle, Name: OLCOutput}) + f.AddChain(IpChain{Table: TRaw, Name: OLCPre}) + f.AddChain(IpChain{Table: TRaw, Name: OLCOutput}) +} + +func (f *FireWall) jumpOLC() { + f.AddRule(IpRule{Order: "-I", Table: TFilter, Chain: CInput, Jump: OLCInput}) + f.AddRule(IpRule{Order: "-I", Table: TFilter, Chain: CForward, Jump: OLCForward}) + f.AddRule(IpRule{Order: "-I", Table: TFilter, Chain: COutput, Jump: OLCOutput}) + f.AddRule(IpRule{Order: "-I", Table: TNat, Chain: CPre, Jump: OLCPre}) + f.AddRule(IpRule{Order: "-I", Table: TNat, Chain: CInput, Jump: OLCInput}) + f.AddRule(IpRule{Order: "-I", Table: TNat, Chain: CPost, Jump: OLCPost}) + f.AddRule(IpRule{Order: "-I", Table: TNat, Chain: COutput, Jump: OLCOutput}) + f.AddRule(IpRule{Order: "-I", Table: TMangle, Chain: CPre, Jump: OLCPre}) + f.AddRule(IpRule{Order: "-I", Table: TMangle, Chain: CInput, Jump: OLCInput}) + f.AddRule(IpRule{Order: "-I", Table: TMangle, Chain: CForward, Jump: OLCForward}) + f.AddRule(IpRule{Order: "-I", Table: TMangle, Chain: CPost, Jump: OLCPost}) + f.AddRule(IpRule{Order: "-I", Table: TMangle, Chain: COutput, Jump: OLCOutput}) + f.AddRule(IpRule{Order: "-I", Table: TRaw, Chain: CPre, Jump: OLCPre}) + f.AddRule(IpRule{Order: "-I", Table: TRaw, Chain: COutput, Jump: OLCOutput}) +} + +func (f *FireWall) Initialize() { + // Init chains + f.addOLC() + f.jumpOLC() +} + +func (f *FireWall) AddChain(chain IpChain) { + f.chains = f.chains.Add(chain) +} + +func (f *FireWall) AddRule(rule IpRule) { + f.rules = f.rules.Add(rule) +} + +func (f *FireWall) InstallRule(rule IpRule) error { + f.lock.Lock() + defer f.lock.Unlock() + order := rule.Order + if order == "" { + order = "-A" + } + if _, err := rule.Opr(order); err != nil { + return err + } + f.rules = f.rules.Add(rule) + return nil +} + +func (f *FireWall) install() { + for _, c := range f.chains { + if _, err := c.Opr("-N"); err != nil { + libol.Error("FireWall.install %s", err) + } + } + for _, r := range f.rules { + order := r.Order + if order == "" { + order = "-A" + } + if _, err := r.Opr(order); err != nil { + libol.Error("FireWall.install %s", err) + } + } +} + +func (f *FireWall) Start() { + f.lock.Lock() + defer f.lock.Unlock() + libol.Info("FireWall.Start") + f.install() + iptables.OnReloaded(func() { + libol.Info("FireWall.Start OnReloaded") + f.lock.Lock() + defer f.lock.Unlock() + f.install() + }) +} + +func (f *FireWall) cancel() { + for _, r := range f.rules { + if _, err := r.Opr("-D"); err != nil { + libol.Warn("FireWall.cancel %s", err) + } + } + for _, c := range f.chains { + if _, err := c.Opr("-X"); err != nil { + libol.Warn("FireWall.cancel %s", err) + } + } +} + +func (f *FireWall) CancelRule(rule IpRule) error { + f.lock.Lock() + defer f.lock.Unlock() + if _, err := rule.Opr("-D"); err != nil { + return err + } + f.rules = f.rules.Remove(rule) + return nil +} + +func (f *FireWall) Stop() { + f.lock.Lock() + defer f.lock.Unlock() + libol.Info("FireWall.Stop") + f.cancel() +} + +func (f *FireWall) Refresh() { + f.cancel() + f.install() +} + +func init() { + IpInit() +} diff --git a/pkg/network/framer.go b/pkg/network/framer.go new file mode 100755 index 0000000..d07640f --- /dev/null +++ b/pkg/network/framer.go @@ -0,0 +1,7 @@ +package network + +type Framer struct { + Data []byte + Source Taper + Output Taper +} diff --git a/pkg/network/iptables.go b/pkg/network/iptables.go new file mode 100755 index 0000000..2e935fc --- /dev/null +++ b/pkg/network/iptables.go @@ -0,0 +1,245 @@ +package network + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/moby/libnetwork/iptables" + "runtime" + "strconv" + "strings" +) + +const ( + TNat = "nat" + TRaw = "raw" + TMangle = "mangle" + TFilter = "filter" + CInput = "INPUT" + CForward = "FORWARD" + COutput = "OUTPUT" + CPost = "POSTROUTING" + CPre = "PREROUTING" + CMasq = "MASQUERADE" + CNoTrk = "NOTRACK" + CSnat = "SNAT" +) + +type IpRule struct { + Table string + Chain string + Source string + ToSource string + NoSource string + Dest string + ToDest string + NoDest string + Proto string + DstPort string + SrcPort string + Input string + Output string + Comment string + Jump string + SetMss int + Order string + Match string + TcpFlag []string +} + +type IpRules []IpRule + +func (ru IpRule) Itoa(value int) string { + return strconv.Itoa(value) +} + +func (ru IpRule) Args() []string { + var args []string + + if ru.Source != "" { + args = append(args, "-s", ru.Source) + } else if ru.NoSource != "" { + args = append(args, "!") + args = append(args, "-s", ru.NoSource) + } + if ru.Dest != "" { + args = append(args, "-d", ru.Dest) + } else if ru.NoDest != "" { + args = append(args, "!") + args = append(args, "-d", ru.NoDest) + } + if ru.Proto != "" { + args = append(args, "-p", ru.Proto) + } + if ru.Match != "" { + args = append(args, "-m", ru.Match) + } + if len(ru.TcpFlag) > 0 { + args = append(args, "--tcp-flags", ru.TcpFlag[0], ru.TcpFlag[1]) + } + if len(ru.SrcPort) > 0 { + args = append(args, "--sport", ru.SrcPort) + } + if len(ru.DstPort) > 0 { + if ru.Match == "multiport" { + args = append(args, "--dports", ru.DstPort) + } else { + args = append(args, "--dport", ru.DstPort) + } + } + if ru.Input != "" { + args = append(args, "-i", ru.Input) + } + if ru.Output != "" { + args = append(args, "-o", ru.Output) + } + if ru.Jump != "" { + jump := strings.ToUpper(ru.Jump) + if jump == "DROP" || jump == "ACCEPT" { + args = append(args, "-j", jump) + } else { + args = append(args, "-j", ru.Jump) + } + if ru.SetMss > 0 { + args = append(args, "--set-mss", ru.Itoa(ru.SetMss)) + } + } else { + args = append(args, "-j", "ACCEPT") + } + if ru.ToSource != "" { + args = append(args, "--to-source", ru.ToSource) + } + if ru.ToDest != "" { + args = append(args, "--to-destination", ru.ToDest) + } + return args +} + +func (ru IpRule) Exist() bool { + table := iptables.Table(ru.Table) + chain := ru.Chain + args := ru.Args() + return iptables.Exists(table, chain, args...) +} + +func (ru IpRule) String() string { + return ru.Table + " " + ru.Chain + " " + strings.Join(ru.Args(), " ") +} + +func (ru IpRule) Eq(obj IpRule) bool { + return ru.String() == obj.String() +} + +func (ru IpRule) Opr(opr string) ([]byte, error) { + libol.Debug("IpRuleOpr: %s, %v", opr, ru) + table := iptables.Table(ru.Table) + chain := ru.Chain + switch runtime.GOOS { + case "linux": + args := ru.Args() + fullArgs := append([]string{"-t", ru.Table, opr, ru.Chain}, args...) + if opr == "-I" || opr == "-A" { + if iptables.Exists(table, chain, args...) { + return nil, nil + } + } + return iptables.Raw(fullArgs...) + default: + return nil, libol.NewErr("iptables notSupport %s", runtime.GOOS) + } +} + +func (rules IpRules) Add(obj IpRule) IpRules { + if !rules.Has(obj) { + return append(rules, obj) + } + return rules +} + +func (rules IpRules) Has(rule IpRule) bool { + for _, r := range rules { + if r.Eq(rule) { + return true + } + } + return false +} + +func (rules IpRules) Remove(obj IpRule) IpRules { + index := 0 + news := make(IpRules, 0, 32) + find := false + for _, item := range rules { + if !find && item.Eq(obj) { + find = true + continue + } + news[index] = item + index++ + } + return news[:index] +} + +type IpChain struct { + Table string + Name string +} + +type IpChains []IpChain + +func (ch IpChain) Opr(opr string) ([]byte, error) { + libol.Debug("IpChainOpr: %s, %v", opr, ch) + table := iptables.Table(ch.Table) + name := ch.Name + switch runtime.GOOS { + case "linux": + if opr == "-N" { + if iptables.ExistChain(name, table) { + return nil, nil + } + if _, err := iptables.NewChain(name, table, true); err != nil { + return nil, err + } + } else if opr == "-X" { + if err := iptables.RemoveExistingChain(name, table); err != nil { + return nil, err + } + } + default: + return nil, libol.NewErr("iptables notSupport %s", runtime.GOOS) + } + return nil, nil +} + +func (ch IpChain) Eq(obj IpChain) bool { + if ch.Table != obj.Table { + return false + } + if ch.Name != obj.Name { + return false + } + return true +} + +func (chains IpChains) Add(obj IpChain) IpChains { + return append(chains, obj) +} + +func (chains IpChains) Pop(obj IpChain) IpChains { + index := 0 + news := make(IpChains, 0, 32) + find := false + for _, item := range chains { + if !find && item.Eq(obj) { + find = true + continue + } + news[index] = item + index++ + } + return news[:index] +} + +func IpInit() { + if err := iptables.FirewalldInit(); err != nil { + libol.Error("IpInit %s", err) + } +} diff --git a/pkg/network/tap_kernel.go b/pkg/network/tap_kernel.go new file mode 100755 index 0000000..ee7936f --- /dev/null +++ b/pkg/network/tap_kernel.go @@ -0,0 +1,154 @@ +package network + +import ( + "github.com/luscis/openlan/pkg/libol" + "github.com/songgao/water" + "sync" +) + +type KernelTap struct { + lock sync.Mutex + device *water.Interface + master Bridger + tenant string + name string + config TapConfig + ipMtu int +} + +func NewKernelTap(tenant string, c TapConfig) (*KernelTap, error) { + if c.Mtu == 0 { + c.Mtu = 1500 + } + if c.Name == "auto" { + c.Name = Taps.GenName() + } + device, err := WaterNew(c) + if err != nil { + return nil, err + } + tap := &KernelTap{ + tenant: tenant, + device: device, + name: device.Name(), + config: c, + ipMtu: c.Mtu, + } + Taps.Add(tap) + return tap, nil +} + +func (t *KernelTap) Has(v uint) bool { + if v == UsClose { + return t.device == nil + } else if v == UsUp { + return t.device != nil + } + return false +} + +func (t *KernelTap) Type() string { + return ProviderKer +} + +func (t *KernelTap) Tenant() string { + return t.tenant +} + +func (t *KernelTap) IsTun() bool { + return t.config.Type == TUN +} + +func (t *KernelTap) Name() string { + return t.name +} + +func (t *KernelTap) Read(p []byte) (int, error) { + t.lock.Lock() + if t.device == nil { + t.lock.Unlock() + return 0, libol.NewErr("Closed") + } + t.lock.Unlock() + if n, err := t.device.Read(p); err == nil { + return n, nil + } else { + return 0, err + } +} + +func (t *KernelTap) Write(p []byte) (int, error) { + t.lock.Lock() + if t.device == nil { + t.lock.Unlock() + return 0, libol.NewErr("Closed") + } + t.lock.Unlock() + return t.device.Write(p) +} + +func (t *KernelTap) Recv(p []byte) (int, error) { + return t.Read(p) +} + +func (t *KernelTap) Send(p []byte) (int, error) { + return t.Write(p) +} + +func (t *KernelTap) Close() error { + t.lock.Lock() + defer t.lock.Unlock() + libol.Debug("KernelTap.Close %s", t.name) + if t.device == nil { + return nil + } + if t.master != nil { + _ = t.master.DelSlave(t.name) + t.master = nil + } + err := t.device.Close() + Taps.Del(t.name) + t.device = nil + return err +} + +func (t *KernelTap) Master() Bridger { + t.lock.Lock() + defer t.lock.Unlock() + return t.master +} + +func (t *KernelTap) SetMaster(dev Bridger) error { + t.lock.Lock() + defer t.lock.Unlock() + if t.master == nil { + t.master = dev + } + return libol.NewErr("already to %s", t.master) +} + +func (t *KernelTap) Up() { + t.lock.Lock() + defer t.lock.Unlock() + libol.Debug("KernelTap.Up %s", t.name) + if _, err := libol.IpLinkUp(t.name); err != nil { + libol.Warn("KernelTap.Up %s: %s", t.name, err) + } +} + +func (t *KernelTap) Down() { + t.lock.Lock() + defer t.lock.Unlock() + libol.Debug("KernelTap.Down %s", t.name) + if _, err := libol.IpLinkDown(t.name); err != nil { + libol.Warn("KernelTap.Down %s: %s", t.name, err) + } +} + +func (t *KernelTap) String() string { + return t.name +} + +func (t *KernelTap) Mtu() int { + return t.ipMtu +} diff --git a/pkg/network/tap_virtual.go b/pkg/network/tap_virtual.go new file mode 100755 index 0000000..8a528bd --- /dev/null +++ b/pkg/network/tap_virtual.go @@ -0,0 +1,200 @@ +package network + +import ( + "github.com/luscis/openlan/pkg/libol" + "sync" +) + +type VirtualTap struct { + lock sync.RWMutex + kernC int + kernQ chan []byte + virtC int + virtQ chan []byte + master Bridger + tenant string + flags uint + cfg TapConfig + name string + ifMtu int + sts DeviceStats +} + +func NewVirtualTap(tenant string, c TapConfig) (*VirtualTap, error) { + name := c.Name + if name == "" { + name = Taps.GenName() + } + tap := &VirtualTap{ + cfg: c, + tenant: tenant, + name: name, + ifMtu: 1514, + } + Taps.Add(tap) + return tap, nil +} + +func (t *VirtualTap) Type() string { + return ProviderVir +} + +func (t *VirtualTap) Tenant() string { + return t.tenant +} + +func (t *VirtualTap) IsTun() bool { + return t.cfg.Type == TUN +} + +func (t *VirtualTap) Name() string { + return t.name +} + +func (t *VirtualTap) hasFlags(v uint) bool { + return t.flags&v == v +} + +func (t *VirtualTap) setFlags(v uint) { + t.flags |= v +} + +func (t *VirtualTap) clearFlags(v uint) { + t.flags &= ^v +} + +func (t *VirtualTap) Has(v uint) bool { + t.lock.RLock() + defer t.lock.RUnlock() + return t.hasFlags(v) +} + +func (t *VirtualTap) Write(p []byte) (int, error) { + if libol.HasLog(libol.DEBUG) { + libol.Debug("VirtualTap.Write: %s % x", t, p[:20]) + } + t.lock.Lock() + defer t.lock.Unlock() + if !t.hasFlags(UsUp) { + return 0, libol.NewErr("notUp") + } + if t.virtC >= t.cfg.VirBuf { + libol.Warn("VirtualTap.Write: buffer fully") + t.sts.Drop++ + return 0, nil + } + t.virtC++ + t.virtQ <- p + return len(p), nil +} + +func (t *VirtualTap) Read(p []byte) (int, error) { + t.lock.Lock() + if !t.hasFlags(UsUp) { + t.lock.Unlock() + return 0, libol.NewErr("notUp") + } + t.lock.Unlock() + data := <-t.kernQ + t.lock.Lock() + t.kernC-- + t.lock.Unlock() + return copy(p, data), nil +} + +func (t *VirtualTap) Recv(p []byte) (int, error) { + t.lock.Lock() + if !t.hasFlags(UsUp) { + t.lock.Unlock() + return 0, libol.NewErr("notUp") + } + t.lock.Unlock() + data := <-t.virtQ + t.lock.Lock() + t.virtC-- + t.lock.Unlock() + return copy(p, data), nil +} + +func (t *VirtualTap) Send(p []byte) (int, error) { + if libol.HasLog(libol.DEBUG) { + libol.Debug("VirtualTap.Send: %s % x", t, p[:20]) + } + t.lock.Lock() + defer t.lock.Unlock() + if !t.hasFlags(UsUp) { + return 0, libol.NewErr("notUp") + } + if t.kernC >= t.cfg.KernBuf { + t.sts.Drop++ + libol.Warn("VirtualTap.Send: buffer fully") + return 0, nil + } + t.kernC++ + t.kernQ <- p + return len(p), nil +} + +func (t *VirtualTap) Close() error { + t.Down() + t.lock.Lock() + defer t.lock.Unlock() + if t.hasFlags(UsClose) { + return nil + } + t.setFlags(UsClose) + t.clearFlags(UsUp) + if t.master != nil { + _ = t.master.DelSlave(t.name) + t.master = nil + } + Taps.Del(t.name) + return nil +} + +func (t *VirtualTap) Master() Bridger { + t.lock.RLock() + defer t.lock.RUnlock() + return t.master +} + +func (t *VirtualTap) SetMaster(dev Bridger) error { + t.lock.Lock() + defer t.lock.Unlock() + if t.master == nil { + t.master = dev + } + return libol.NewErr("already to %s", t.master) +} + +func (t *VirtualTap) Up() { + t.lock.Lock() + defer t.lock.Unlock() + if !t.hasFlags(UsUp) { + t.kernC = 0 + t.kernQ = make(chan []byte, t.cfg.KernBuf) + t.virtC = 0 + t.virtQ = make(chan []byte, t.cfg.VirBuf) + t.setFlags(UsUp) + } +} + +func (t *VirtualTap) Down() { + t.lock.Lock() + defer t.lock.Unlock() + if t.hasFlags(UsUp) { + t.clearFlags(UsUp) + close(t.kernQ) + t.kernQ = nil + close(t.virtQ) + t.virtQ = nil + } +} + +func (t *VirtualTap) String() string { + return t.name +} + +func (t *VirtualTap) Mtu() int { + return t.ifMtu +} diff --git a/pkg/network/taper.go b/pkg/network/taper.go new file mode 100755 index 0000000..506dfd0 --- /dev/null +++ b/pkg/network/taper.go @@ -0,0 +1,115 @@ +package network + +import ( + "fmt" + "sync" +) + +const ( + UsClose = uint(0x02) + UsUp = uint(0x04) + TUN = 0x01 + TAP = 0x02 +) + +type DeviceStats struct { + Send int64 `json:"send"` + Recv int64 `json:"recv"` + Drop int64 `json:"drop"` +} + +type Taper interface { + Type() string + IsTun() bool + Name() string + Read([]byte) (int, error) // read data from kernel to user space + Write([]byte) (int, error) // write data from user space to kernel + Send([]byte) (int, error) // send data from virtual bridge to kernel + Recv([]byte) (int, error) // recv data from kernel to virtual bridge + Close() error + Master() Bridger + SetMaster(dev Bridger) error + Up() + Down() + Tenant() string + Mtu() int + String() string + Has(v uint) bool +} + +func NewTaper(tenant string, c TapConfig) (Taper, error) { + if c.Provider == ProviderVir { + return NewVirtualTap(tenant, c) + } + return NewKernelTap(tenant, c) +} + +type tapers struct { + lock sync.RWMutex + index int + devices map[string]Taper +} + +func (t *tapers) GenName() string { + t.lock.Lock() + defer t.lock.Unlock() + t.index++ + return fmt.Sprintf("vir%d", t.index) +} + +func (t *tapers) Add(tap Taper) { + t.lock.Lock() + defer t.lock.Unlock() + if t.devices == nil { + t.devices = make(map[string]Taper, 1024) + } + t.devices[tap.Name()] = tap +} + +func (t *tapers) Get(name string) Taper { + t.lock.RLock() + defer t.lock.RUnlock() + if t.devices == nil { + return nil + } + if t, ok := t.devices[name]; ok { + return t + } + return nil +} + +func (t *tapers) Del(name string) { + t.lock.Lock() + defer t.lock.Unlock() + if t.devices == nil { + return + } + if _, ok := t.devices[name]; ok { + delete(t.devices, name) + } +} + +func (t *tapers) List() <-chan Taper { + data := make(chan Taper, 32) + go func() { + t.lock.RLock() + defer t.lock.RUnlock() + for _, obj := range t.devices { + data <- obj + } + data <- nil + }() + return data +} + +var Taps = &tapers{} + +type TapConfig struct { + Provider string + Type int + Network string + Name string + VirBuf int + KernBuf int + Mtu int +} diff --git a/pkg/network/water_others.go b/pkg/network/water_others.go new file mode 100644 index 0000000..7d1c060 --- /dev/null +++ b/pkg/network/water_others.go @@ -0,0 +1,21 @@ +// +build !windows + +package network + +import ( + "github.com/songgao/water" +) + +func WaterNew(c TapConfig) (*water.Interface, error) { + deviceType := water.DeviceType(water.TAP) + if c.Type == TUN { + deviceType = water.TUN + } + cfg := water.Config{DeviceType: deviceType} + if c.Name != "" { + cfg.PlatformSpecificParams = water.PlatformSpecificParams{ + Name: c.Name, + } + } + return water.New(cfg) +} diff --git a/pkg/network/water_windows.go b/pkg/network/water_windows.go new file mode 100755 index 0000000..cb310eb --- /dev/null +++ b/pkg/network/water_windows.go @@ -0,0 +1,34 @@ +package network + +import ( + "github.com/songgao/water" +) + +func WaterNew(c TapConfig) (dev *water.Interface, err error) { + deviceType := water.DeviceType(water.TAP) + if c.Type == TUN { + deviceType = water.TUN + } + cfg := water.Config{DeviceType: deviceType} + if c.Name == "" { + return water.New(cfg) + } + cfg.PlatformSpecificParams = water.PlatformSpecificParams{ + ComponentID: "root\\tap0901", + InterfaceName: c.Name, + Network: c.Network, + } + if dev, err = water.New(cfg); err == nil { + return dev, nil + } + // try again. + cfg.PlatformSpecificParams = water.PlatformSpecificParams{ + ComponentID: "tap0901", + InterfaceName: c.Name, + Network: c.Network, + } + if dev, err = water.New(cfg); err == nil { + return dev, nil + } + return nil, err +} diff --git a/pkg/proxy/http.go b/pkg/proxy/http.go new file mode 100755 index 0000000..e7f8940 --- /dev/null +++ b/pkg/proxy/http.go @@ -0,0 +1,187 @@ +package proxy + +import ( + "encoding/base64" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "io" + "net" + "net/http" + "strings" + "time" +) + +type HttpProxy struct { + users map[string]string + out *libol.SubLogger + server *http.Server + cfg *config.HttpProxy +} + +var ( + connectOkay = []byte("HTTP/1.1 200 Connection established\r\n\r\n") +) + +func parseBasicAuth(auth string) (username, password string, ok bool) { + const prefix = "Basic " + if len(auth) < len(prefix) || !strings.EqualFold(auth[:len(prefix)], prefix) { + return + } + c, err := base64.StdEncoding.DecodeString(auth[len(prefix):]) + if err != nil { + return + } + cs := string(c) + s := strings.IndexByte(cs, ':') + if s < 0 { + return + } + return cs[:s], cs[s+1:], true +} + +func NewHttpProxy(cfg *config.HttpProxy) *HttpProxy { + h := &HttpProxy{ + out: libol.NewSubLogger(cfg.Listen), + cfg: cfg, + } + h.server = &http.Server{ + Addr: cfg.Listen, + Handler: h, + } + auth := cfg.Auth + if len(auth.Username) > 0 { + h.users = make(map[string]string, 1) + h.users[auth.Username] = auth.Password + } + return h +} + +func (t *HttpProxy) isAuth(username, password string) bool { + if p, ok := t.users[username]; ok { + return p == password + } + return false +} + +func (t *HttpProxy) CheckAuth(w http.ResponseWriter, r *http.Request) bool { + if len(t.users) == 0 { + return true + } + auth := r.Header.Get("Proxy-Authorization") + user, password, ok := parseBasicAuth(auth) + if !ok || !t.isAuth(user, password) { + w.Header().Set("Proxy-Authenticate", "Basic") + http.Error(w, "Proxy Authentication Required", http.StatusProxyAuthRequired) + return false + } + return true +} + +func (t *HttpProxy) route(w http.ResponseWriter, p *http.Response) { + defer p.Body.Close() + for key, value := range p.Header { + if key == "Proxy-Authorization" { + if len(value) > 0 { // Remove first value for next proxy. + value = value[1:] + } + } + for _, v := range value { + w.Header().Add(key, v) + } + } + w.WriteHeader(p.StatusCode) + _, _ = io.Copy(w, p.Body) +} + +func (t *HttpProxy) tunnel(w http.ResponseWriter, conn net.Conn) { + src, bio, err := w.(http.Hijacker).Hijack() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + defer src.Close() + wait := libol.NewWaitOne(2) + libol.Go(func() { + defer wait.Done() + // The returned bufio.Reader may contain unprocessed buffered data from the client. + // Copy them to dst so we can use src directly. + if n := bio.Reader.Buffered(); n > 0 { + n64, err := io.CopyN(conn, bio, int64(n)) + if n64 != int64(n) || err != nil { + t.out.Warn("HttpProxy.tunnel io.CopyN:", n64, err) + return + } + } + if _, err := io.Copy(conn, src); err != nil { + t.out.Debug("HttpProxy.tunnel from ws %s", err) + } + }) + libol.Go(func() { + defer wait.Done() + if _, err := io.Copy(src, conn); err != nil { + t.out.Debug("HttpProxy.tunnel from target %s", err) + } + }) + wait.Wait() + t.out.Debug("HttpProxy.tunnel %s exit", conn.RemoteAddr()) +} + +func (t *HttpProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { + t.out.Debug("HttpProxy.ServeHTTP %v", r) + t.out.Debug("HttpProxy.ServeHTTP %v", r.URL.Host) + if !t.CheckAuth(w, r) { + t.out.Info("HttpProxy.ServeHTTP Required %v Authentication", r.URL.Host) + return + } + t.out.Info("HttpProxy.ServeHTTP %s %s -> %s", r.Method, r.RemoteAddr, r.URL.Host) + if r.Method == "CONNECT" { //RFC-7231 Tunneling TCP based protocols through Web Proxy servers + conn, err := net.Dial("tcp", r.URL.Host) + if err != nil { + http.Error(w, err.Error(), http.StatusBadGateway) + return + } + _, _ = w.Write(connectOkay) + t.tunnel(w, conn) + } else { //RFC 7230 - HTTP/1.1: Message Syntax and Routing + transport := &http.Transport{} + p, err := transport.RoundTrip(r) + if err != nil { + http.Error(w, err.Error(), http.StatusBadGateway) + return + } + defer transport.CloseIdleConnections() + t.route(w, p) + } +} + +func (t *HttpProxy) Start() { + if t.server == nil || t.cfg == nil { + return + } + crt := t.cfg.Cert + if crt == nil || crt.KeyFile == "" { + t.out.Info("HttpProxy.start http://%s", t.server.Addr) + } else { + t.out.Info("HttpProxy.start https://%s", t.server.Addr) + } + promise := &libol.Promise{ + First: time.Second * 2, + MaxInt: time.Minute, + MinInt: time.Second * 10, + } + promise.Go(func() error { + defer t.server.Shutdown(nil) + if crt == nil || crt.KeyFile == "" { + if err := t.server.ListenAndServe(); err != nil { + t.out.Warn("HttpProxy.start %s", err) + return err + } + } else { + if err := t.server.ListenAndServeTLS(crt.CrtFile, crt.KeyFile); err != nil { + t.out.Error("HttpProxy.start %s", err) + return err + } + } + return nil + }) +} diff --git a/pkg/proxy/proxy.go b/pkg/proxy/proxy.go new file mode 100755 index 0000000..3174a95 --- /dev/null +++ b/pkg/proxy/proxy.go @@ -0,0 +1,95 @@ +package proxy + +import ( + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/proxy/ss" + "os" +) + +type Proxy struct { + cfg *config.Proxy + tcp map[string]*TcpProxy + socks map[string]*SocksProxy + http map[string]*HttpProxy + shadow map[string]*ss.ShadowSocks +} + +func NewProxy(cfg *config.Proxy) *Proxy { + return &Proxy{ + cfg: cfg, + socks: make(map[string]*SocksProxy, 32), + tcp: make(map[string]*TcpProxy, 32), + http: make(map[string]*HttpProxy, 32), + shadow: make(map[string]*ss.ShadowSocks, 32), + } +} + +func (p *Proxy) Initialize() { + if p.cfg == nil { + return + } + for _, c := range p.cfg.Socks { + s := NewSocksProxy(c) + if s == nil { + continue + } + p.socks[c.Listen] = s + } + for _, c := range p.cfg.Tcp { + p.tcp[c.Listen] = NewTcpProxy(c) + } + for _, c := range p.cfg.Http { + if c == nil || c.Listen == "" { + continue + } + h := NewHttpProxy(c) + p.http[c.Listen] = h + } + for _, c := range p.cfg.Shadow { + if c == nil || c.Server == "" { + continue + } + h := ss.NewShadowSocks(c) + p.shadow[c.Server] = h + } +} + +func (p *Proxy) Start() { + if p.cfg == nil { + return + } + libol.Info("Proxy.Start") + for _, s := range p.socks { + s.Start() + } + for _, t := range p.tcp { + t.Start() + } + for _, h := range p.http { + h.Start() + } + for _, s := range p.shadow { + s.Start() + } +} + +func (p *Proxy) Stop() { + if p.cfg == nil { + return + } + libol.Info("Proxy.Stop") + for _, t := range p.tcp { + t.Stop() + } + for _, s := range p.shadow { + s.Stop() + } +} + +func init() { + // HTTP/2.0 not support upgrade for Hijacker + if err := os.Setenv("GODEBUG", "http2server=0"); err != nil { + libol.Warn("proxy.init %s") + } +} diff --git a/pkg/proxy/socks.go b/pkg/proxy/socks.go new file mode 100755 index 0000000..cbf7237 --- /dev/null +++ b/pkg/proxy/socks.go @@ -0,0 +1,61 @@ +package proxy + +import ( + "github.com/armon/go-socks5" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "time" +) + +type SocksProxy struct { + server *socks5.Server + out *libol.SubLogger + cfg *config.SocksProxy +} + +func NewSocksProxy(cfg *config.SocksProxy) *SocksProxy { + s := &SocksProxy{ + cfg: cfg, + out: libol.NewSubLogger(cfg.Listen), + } + // Create a SOCKS5 server + auth := cfg.Auth + authMethods := make([]socks5.Authenticator, 0, 2) + if len(auth.Username) > 0 { + author := socks5.UserPassAuthenticator{ + Credentials: socks5.StaticCredentials{ + auth.Username: auth.Password, + }, + } + authMethods = append(authMethods, author) + } + conf := &socks5.Config{AuthMethods: authMethods} + server, err := socks5.New(conf) + if err != nil { + s.out.Error("NewSocksProxy %s", err) + return nil + } + s.server = server + return s +} + +func (s *SocksProxy) Start() { + if s.server == nil || s.cfg == nil { + return + } + addr := s.cfg.Listen + s.out.Info("Proxy.startSocks") + + promise := &libol.Promise{ + First: time.Second * 2, + MaxInt: time.Minute, + MinInt: time.Second * 10, + } + promise.Go(func() error { + if err := s.server.ListenAndServe("tcp", addr); err != nil { + s.out.Warn("Proxy.startSocks %s", err) + return err + } + return nil + }) +} diff --git a/pkg/proxy/ss/config.go b/pkg/proxy/ss/config.go new file mode 100755 index 0000000..1ef083d --- /dev/null +++ b/pkg/proxy/ss/config.go @@ -0,0 +1,9 @@ +package ss + +import "time" + +var config struct { + Verbose bool + UDPTimeout time.Duration + TCPCork bool +} diff --git a/pkg/proxy/ss/log.go b/pkg/proxy/ss/log.go new file mode 100755 index 0000000..4ff881c --- /dev/null +++ b/pkg/proxy/ss/log.go @@ -0,0 +1,31 @@ +package ss + +import ( + "fmt" + "log" + "os" +) + +var logger = log.New(os.Stderr, "", log.Lshortfile|log.LstdFlags) + +func logf(f string, v ...interface{}) { + if config.Verbose { + _ = logger.Output(2, fmt.Sprintf(f, v...)) + } +} + +type logHelper struct { + prefix string +} + +func (l *logHelper) Write(p []byte) (n int, err error) { + if config.Verbose { + logger.Printf("%s%s\n", l.prefix, p) + return len(p), nil + } + return len(p), nil +} + +func newLogHelper(prefix string) *logHelper { + return &logHelper{prefix} +} diff --git a/pkg/proxy/ss/plugin.go b/pkg/proxy/ss/plugin.go new file mode 100755 index 0000000..621068e --- /dev/null +++ b/pkg/proxy/ss/plugin.go @@ -0,0 +1,118 @@ +package ss + +import ( + "fmt" + "net" + "os" + "os/exec" + "path/filepath" + "syscall" + "time" +) + +var pluginCmd *exec.Cmd + +func startPlugin(plugin, pluginOpts, ssAddr string, isServer bool) (newAddr string, err error) { + logf("starting plugin (%s) with option (%s)....", plugin, pluginOpts) + freePort, err := getFreePort() + if err != nil { + return "", fmt.Errorf("failed to fetch an unused port for plugin (%v)", err) + } + localHost := "127.0.0.1" + ssHost, ssPort, err := net.SplitHostPort(ssAddr) + if err != nil { + return "", err + } + newAddr = localHost + ":" + freePort + if isServer { + if ssHost == "" { + ssHost = "0.0.0.0" + } + logf("plugin (%s) will listen on %s:%s", plugin, ssHost, ssPort) + } else { + logf("plugin (%s) will listen on %s:%s", plugin, localHost, freePort) + } + err = execPlugin(plugin, pluginOpts, ssHost, ssPort, localHost, freePort) + return +} + +func killPlugin() { + if pluginCmd != nil { + pluginCmd.Process.Signal(syscall.SIGTERM) + waitCh := make(chan struct{}) + go func() { + pluginCmd.Wait() + close(waitCh) + }() + timeout := time.After(3 * time.Second) + select { + case <-waitCh: + case <-timeout: + pluginCmd.Process.Kill() + } + } +} + +func execPlugin(plugin, pluginOpts, remoteHost, remotePort, localHost, localPort string) (err error) { + pluginFile := plugin + if fileExists(plugin) { + if !filepath.IsAbs(plugin) { + pluginFile = "./" + plugin + } + } else { + pluginFile, err = exec.LookPath(plugin) + if err != nil { + return err + } + } + logH := newLogHelper("[" + plugin + "]: ") + env := append(os.Environ(), + "SS_REMOTE_HOST="+remoteHost, + "SS_REMOTE_PORT="+remotePort, + "SS_LOCAL_HOST="+localHost, + "SS_LOCAL_PORT="+localPort, + "SS_PLUGIN_OPTIONS="+pluginOpts, + ) + cmd := &exec.Cmd{ + Path: pluginFile, + Env: env, + Stdout: logH, + Stderr: logH, + } + if err = cmd.Start(); err != nil { + return err + } + pluginCmd = cmd + go func() { + if err := cmd.Wait(); err != nil { + logf("plugin exited (%v)\n", err) + os.Exit(2) + } + logf("plugin exited\n") + os.Exit(0) + }() + return nil +} + +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} + +func getFreePort() (string, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return "", err + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return "", err + } + port := fmt.Sprintf("%d", l.Addr().(*net.TCPAddr).Port) + l.Close() + return port, nil +} diff --git a/pkg/proxy/ss/shadow.go b/pkg/proxy/ss/shadow.go new file mode 100755 index 0000000..c3ba9d6 --- /dev/null +++ b/pkg/proxy/ss/shadow.go @@ -0,0 +1,75 @@ +package ss + +import ( + "encoding/base64" + c "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/shadowsocks/go-shadowsocks2/core" + "log" +) + +type ShadowSocks struct { + Server string + Key string + Cipher string + Password string + Plugin string + PluginOpts string + Protocol string + out *libol.SubLogger +} + +func NewShadowSocks(cfg *c.ShadowProxy) *ShadowSocks { + proto := cfg.Protocol + if proto == "" { + proto = "tcp" + } + return &ShadowSocks{ + Server: cfg.Server, + Key: cfg.Key, + Cipher: cfg.Cipher, + Password: cfg.Password, + Plugin: cfg.Plugin, + PluginOpts: cfg.PluginOpts, + Protocol: proto, + out: libol.NewSubLogger(cfg.Server), + } +} + +func (s *ShadowSocks) Start() { + var key []byte + if s.Key != "" { + k, err := base64.URLEncoding.DecodeString(s.Key) + if err != nil { + log.Fatal(err) + } + key = k + } + addr := s.Server + cipher := s.Cipher + password := s.Password + var err error + + udpAddr := addr + if s.Plugin != "" { + addr, err = startPlugin(s.Plugin, s.PluginOpts, addr, true) + if err != nil { + log.Fatal(err) + } + } + ciph, err := core.PickCipher(cipher, key, password) + if err != nil { + log.Fatal(err) + } + + if s.Protocol == "udp" { + go udpRemote(udpAddr, ciph.PacketConn) + } else { + go tcpRemote(addr, ciph.StreamConn) + } + s.out.Info("ShadowSocks.start %s:%s", s.Protocol, s.Server) +} + +func (s *ShadowSocks) Stop() { + killPlugin() +} diff --git a/pkg/proxy/ss/tcp.go b/pkg/proxy/ss/tcp.go new file mode 100755 index 0000000..c28e135 --- /dev/null +++ b/pkg/proxy/ss/tcp.go @@ -0,0 +1,205 @@ +package ss + +import ( + "bufio" + "errors" + "io" + "io/ioutil" + "net" + "os" + "sync" + "time" + + "github.com/shadowsocks/go-shadowsocks2/socks" +) + +// Create a SOCKS server listening on addr and proxy to server. +func socksLocal(addr, server string, shadow func(net.Conn) net.Conn) { + logf("SOCKS proxy %s <-> %s", addr, server) + tcpLocal(addr, server, shadow, func(c net.Conn) (socks.Addr, error) { return socks.Handshake(c) }) +} + +// Create a TCP tunnel from addr to target via server. +func tcpTun(addr, server, target string, shadow func(net.Conn) net.Conn) { + tgt := socks.ParseAddr(target) + if tgt == nil { + logf("invalid target address %q", target) + return + } + logf("TCP tunnel %s <-> %s <-> %s", addr, server, target) + tcpLocal(addr, server, shadow, func(net.Conn) (socks.Addr, error) { return tgt, nil }) +} + +// Listen on addr and proxy to server to reach target from getAddr. +func tcpLocal(addr, server string, shadow func(net.Conn) net.Conn, getAddr func(net.Conn) (socks.Addr, error)) { + l, err := net.Listen("tcp", addr) + if err != nil { + logf("failed to listen on %s: %v", addr, err) + return + } + + for { + c, err := l.Accept() + if err != nil { + logf("failed to accept: %s", err) + continue + } + + go func() { + defer c.Close() + tgt, err := getAddr(c) + if err != nil { + + // UDP: keep the connection until disconnect then free the UDP socket + if err == socks.InfoUDPAssociate { + buf := make([]byte, 1) + // block here + for { + _, err := c.Read(buf) + if err, ok := err.(net.Error); ok && err.Timeout() { + continue + } + logf("UDP Associate End.") + return + } + } + + logf("failed to get target address: %v", err) + return + } + + rc, err := net.Dial("tcp", server) + if err != nil { + logf("failed to connect to server %v: %v", server, err) + return + } + defer rc.Close() + if config.TCPCork { + rc = timedCork(rc, 10*time.Millisecond, 1280) + } + rc = shadow(rc) + + if _, err = rc.Write(tgt); err != nil { + logf("failed to send target address: %v", err) + return + } + + logf("proxy %s <-> %s <-> %s", c.RemoteAddr(), server, tgt) + if err = relay(rc, c); err != nil { + logf("relay error: %v", err) + } + }() + } +} + +// Listen on addr for incoming connections. +func tcpRemote(addr string, shadow func(net.Conn) net.Conn) { + l, err := net.Listen("tcp", addr) + if err != nil { + logf("failed to listen on %s: %v", addr, err) + return + } + + logf("listening TCP on %s", addr) + for { + c, err := l.Accept() + if err != nil { + logf("failed to accept: %v", err) + continue + } + + go func() { + defer c.Close() + if config.TCPCork { + c = timedCork(c, 10*time.Millisecond, 1280) + } + sc := shadow(c) + + tgt, err := socks.ReadAddr(sc) + if err != nil { + logf("failed to get target address from %v: %v", c.RemoteAddr(), err) + // drain c to avoid leaking server behavioral features + // see https://www.ndss-symposium.org/ndss-paper/detecting-probe-resistant-proxies/ + _, err = io.Copy(ioutil.Discard, c) + if err != nil { + logf("discard error: %v", err) + } + return + } + + rc, err := net.Dial("tcp", tgt.String()) + if err != nil { + logf("failed to connect to target: %v", err) + return + } + defer rc.Close() + + logf("proxy %s <-> %s", c.RemoteAddr(), tgt) + if err = relay(sc, rc); err != nil { + logf("relay error: %v", err) + } + }() + } +} + +// relay copies between left and right bidirectionally +func relay(left, right net.Conn) error { + var err, err1 error + var wg sync.WaitGroup + var wait = 5 * time.Second + wg.Add(1) + go func() { + defer wg.Done() + _, err1 = io.Copy(right, left) + right.SetReadDeadline(time.Now().Add(wait)) // unblock read on right + }() + _, err = io.Copy(left, right) + left.SetReadDeadline(time.Now().Add(wait)) // unblock read on left + wg.Wait() + if err1 != nil && !errors.Is(err1, os.ErrDeadlineExceeded) { // requires Go 1.15+ + return err1 + } + if err != nil && !errors.Is(err, os.ErrDeadlineExceeded) { + return err + } + return nil +} + +type corkedConn struct { + net.Conn + bufw *bufio.Writer + corked bool + delay time.Duration + err error + lock sync.Mutex + once sync.Once +} + +func timedCork(c net.Conn, d time.Duration, bufSize int) net.Conn { + return &corkedConn{ + Conn: c, + bufw: bufio.NewWriterSize(c, bufSize), + corked: true, + delay: d, + } +} + +func (w *corkedConn) Write(p []byte) (int, error) { + w.lock.Lock() + defer w.lock.Unlock() + if w.err != nil { + return 0, w.err + } + if w.corked { + w.once.Do(func() { + time.AfterFunc(w.delay, func() { + w.lock.Lock() + defer w.lock.Unlock() + w.corked = false + w.err = w.bufw.Flush() + }) + }) + return w.bufw.Write(p) + } + return w.Conn.Write(p) +} diff --git a/pkg/proxy/ss/tcp_darwin.go b/pkg/proxy/ss/tcp_darwin.go new file mode 100755 index 0000000..77b9343 --- /dev/null +++ b/pkg/proxy/ss/tcp_darwin.go @@ -0,0 +1,24 @@ +package ss + +import ( + "net" + + "github.com/shadowsocks/go-shadowsocks2/pfutil" + "github.com/shadowsocks/go-shadowsocks2/socks" +) + +func redirLocal(addr, server string, shadow func(net.Conn) net.Conn) { + tcpLocal(addr, server, shadow, natLookup) +} + +func redir6Local(addr, server string, shadow func(net.Conn) net.Conn) { + panic("TCP6 redirect not supported") +} + +func natLookup(c net.Conn) (socks.Addr, error) { + if tc, ok := c.(*net.TCPConn); ok { + addr, err := pfutil.NatLookup(tc) + return socks.ParseAddr(addr.String()), err + } + panic("not TCP connection") +} diff --git a/pkg/proxy/ss/tcp_linux.go b/pkg/proxy/ss/tcp_linux.go new file mode 100755 index 0000000..960087b --- /dev/null +++ b/pkg/proxy/ss/tcp_linux.go @@ -0,0 +1,28 @@ +package ss + +import ( + "net" + + "github.com/shadowsocks/go-shadowsocks2/nfutil" + "github.com/shadowsocks/go-shadowsocks2/socks" +) + +func getOrigDst(c net.Conn, ipv6 bool) (socks.Addr, error) { + if tc, ok := c.(*net.TCPConn); ok { + addr, err := nfutil.GetOrigDst(tc, ipv6) + return socks.ParseAddr(addr.String()), err + } + panic("not a TCP connection") +} + +// Listen on addr for netfilter redirected TCP connections +func redirLocal(addr, server string, shadow func(net.Conn) net.Conn) { + logf("TCP redirect %s <-> %s", addr, server) + tcpLocal(addr, server, shadow, func(c net.Conn) (socks.Addr, error) { return getOrigDst(c, false) }) +} + +// Listen on addr for netfilter redirected TCP IPv6 connections. +func redir6Local(addr, server string, shadow func(net.Conn) net.Conn) { + logf("TCP6 redirect %s <-> %s", addr, server) + tcpLocal(addr, server, shadow, func(c net.Conn) (socks.Addr, error) { return getOrigDst(c, true) }) +} diff --git a/pkg/proxy/ss/tcp_other.go b/pkg/proxy/ss/tcp_other.go new file mode 100755 index 0000000..e8feea3 --- /dev/null +++ b/pkg/proxy/ss/tcp_other.go @@ -0,0 +1,15 @@ +// +build !linux,!darwin + +package ss + +import ( + "net" +) + +func redirLocal(addr, server string, shadow func(net.Conn) net.Conn) { + logf("TCP redirect not supported") +} + +func redir6Local(addr, server string, shadow func(net.Conn) net.Conn) { + logf("TCP6 redirect not supported") +} diff --git a/pkg/proxy/ss/udp.go b/pkg/proxy/ss/udp.go new file mode 100755 index 0000000..bc6b20a --- /dev/null +++ b/pkg/proxy/ss/udp.go @@ -0,0 +1,253 @@ +package ss + +import ( + "fmt" + "net" + "sync" + "time" + + "github.com/shadowsocks/go-shadowsocks2/socks" +) + +type mode int + +const ( + remoteServer mode = iota + relayClient + socksClient +) + +const udpBufSize = 64 * 1024 + +// Listen on laddr for UDP packets, encrypt and send to server to reach target. +func udpLocal(laddr, server, target string, shadow func(net.PacketConn) net.PacketConn) { + srvAddr, err := net.ResolveUDPAddr("udp", server) + if err != nil { + logf("UDP server address error: %v", err) + return + } + + tgt := socks.ParseAddr(target) + if tgt == nil { + err = fmt.Errorf("invalid target address: %q", target) + logf("UDP target address error: %v", err) + return + } + + c, err := net.ListenPacket("udp", laddr) + if err != nil { + logf("UDP local listen error: %v", err) + return + } + defer c.Close() + + nm := newNATmap(config.UDPTimeout) + buf := make([]byte, udpBufSize) + copy(buf, tgt) + + logf("UDP tunnel %s <-> %s <-> %s", laddr, server, target) + for { + n, raddr, err := c.ReadFrom(buf[len(tgt):]) + if err != nil { + logf("UDP local read error: %v", err) + continue + } + + pc := nm.Get(raddr.String()) + if pc == nil { + pc, err = net.ListenPacket("udp", "") + if err != nil { + logf("UDP local listen error: %v", err) + continue + } + + pc = shadow(pc) + nm.Add(raddr, c, pc, relayClient) + } + + _, err = pc.WriteTo(buf[:len(tgt)+n], srvAddr) + if err != nil { + logf("UDP local write error: %v", err) + continue + } + } +} + +// Listen on laddr for Socks5 UDP packets, encrypt and send to server to reach target. +func udpSocksLocal(laddr, server string, shadow func(net.PacketConn) net.PacketConn) { + srvAddr, err := net.ResolveUDPAddr("udp", server) + if err != nil { + logf("UDP server address error: %v", err) + return + } + + c, err := net.ListenPacket("udp", laddr) + if err != nil { + logf("UDP local listen error: %v", err) + return + } + defer c.Close() + + nm := newNATmap(config.UDPTimeout) + buf := make([]byte, udpBufSize) + + for { + n, raddr, err := c.ReadFrom(buf) + if err != nil { + logf("UDP local read error: %v", err) + continue + } + + pc := nm.Get(raddr.String()) + if pc == nil { + pc, err = net.ListenPacket("udp", "") + if err != nil { + logf("UDP local listen error: %v", err) + continue + } + logf("UDP socks tunnel %s <-> %s <-> %s", laddr, server, socks.Addr(buf[3:])) + pc = shadow(pc) + nm.Add(raddr, c, pc, socksClient) + } + + _, err = pc.WriteTo(buf[3:n], srvAddr) + if err != nil { + logf("UDP local write error: %v", err) + continue + } + } +} + +// Listen on addr for encrypted packets and basically do UDP NAT. +func udpRemote(addr string, shadow func(net.PacketConn) net.PacketConn) { + c, err := net.ListenPacket("udp", addr) + if err != nil { + logf("UDP remote listen error: %v", err) + return + } + defer c.Close() + c = shadow(c) + + nm := newNATmap(config.UDPTimeout) + buf := make([]byte, udpBufSize) + + logf("listening UDP on %s", addr) + for { + n, raddr, err := c.ReadFrom(buf) + if err != nil { + logf("UDP remote read error: %v", err) + continue + } + + tgtAddr := socks.SplitAddr(buf[:n]) + if tgtAddr == nil { + logf("failed to split target address from packet: %q", buf[:n]) + continue + } + + tgtUDPAddr, err := net.ResolveUDPAddr("udp", tgtAddr.String()) + if err != nil { + logf("failed to resolve target UDP address: %v", err) + continue + } + + payload := buf[len(tgtAddr):n] + + pc := nm.Get(raddr.String()) + if pc == nil { + pc, err = net.ListenPacket("udp", "") + if err != nil { + logf("UDP remote listen error: %v", err) + continue + } + + nm.Add(raddr, c, pc, remoteServer) + } + + _, err = pc.WriteTo(payload, tgtUDPAddr) // accept only UDPAddr despite the signature + if err != nil { + logf("UDP remote write error: %v", err) + continue + } + } +} + +// Packet NAT table +type natmap struct { + sync.RWMutex + m map[string]net.PacketConn + timeout time.Duration +} + +func newNATmap(timeout time.Duration) *natmap { + m := &natmap{} + m.m = make(map[string]net.PacketConn) + m.timeout = timeout + return m +} + +func (m *natmap) Get(key string) net.PacketConn { + m.RLock() + defer m.RUnlock() + return m.m[key] +} + +func (m *natmap) Set(key string, pc net.PacketConn) { + m.Lock() + defer m.Unlock() + + m.m[key] = pc +} + +func (m *natmap) Del(key string) net.PacketConn { + m.Lock() + defer m.Unlock() + + pc, ok := m.m[key] + if ok { + delete(m.m, key) + return pc + } + return nil +} + +func (m *natmap) Add(peer net.Addr, dst, src net.PacketConn, role mode) { + m.Set(peer.String(), src) + + go func() { + timedCopy(dst, peer, src, m.timeout, role) + if pc := m.Del(peer.String()); pc != nil { + pc.Close() + } + }() +} + +// copy from src to dst at target with read timeout +func timedCopy(dst net.PacketConn, target net.Addr, src net.PacketConn, timeout time.Duration, role mode) error { + buf := make([]byte, udpBufSize) + + for { + src.SetReadDeadline(time.Now().Add(timeout)) + n, raddr, err := src.ReadFrom(buf) + if err != nil { + return err + } + + switch role { + case remoteServer: // server -> client: add original packet source + srcAddr := socks.ParseAddr(raddr.String()) + copy(buf[len(srcAddr):], buf[:n]) + copy(buf, srcAddr) + _, err = dst.WriteTo(buf[:len(srcAddr)+n], target) + case relayClient: // client -> user: strip original packet source + srcAddr := socks.SplitAddr(buf[:n]) + _, err = dst.WriteTo(buf[len(srcAddr):n], target) + case socksClient: // client -> socks5 program: just set RSV and FRAG = 0 + _, err = dst.WriteTo(append([]byte{0, 0, 0}, buf[:n]...), target) + } + + if err != nil { + return err + } + } +} diff --git a/pkg/proxy/tcp.go b/pkg/proxy/tcp.go new file mode 100755 index 0000000..755e9c7 --- /dev/null +++ b/pkg/proxy/tcp.go @@ -0,0 +1,112 @@ +package proxy + +import ( + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "io" + "net" + "time" +) + +type TcpProxy struct { + listen string + target []string + listener net.Listener + out *libol.SubLogger + rr uint64 +} + +func NewTcpProxy(cfg *config.TcpProxy) *TcpProxy { + return &TcpProxy{ + listen: cfg.Listen, + target: cfg.Target, + out: libol.NewSubLogger(cfg.Listen), + } +} + +func (t *TcpProxy) tunnel(src net.Conn, dst net.Conn) { + defer dst.Close() + defer src.Close() + t.out.Info("TcpProxy.tunnel %s -> %s", src.RemoteAddr(), dst.RemoteAddr()) + wait := libol.NewWaitOne(2) + libol.Go(func() { + defer wait.Done() + if _, err := io.Copy(dst, src); err != nil { + t.out.Debug("TcpProxy.tunnel from ws %s", err) + } + }) + libol.Go(func() { + defer wait.Done() + if _, err := io.Copy(src, dst); err != nil { + t.out.Debug("TcpProxy.tunnel from target %s", err) + } + }) + wait.Wait() + t.out.Debug("TcpProxy.tunnel %s exit", dst.RemoteAddr()) +} + +func (t *TcpProxy) loadBalance(fail int) string { + size := len(t.target) + if fail < size { + i := t.rr % uint64(size) + t.rr++ + return t.target[i] + } + return "" +} + +func (t *TcpProxy) Start() { + var listen net.Listener + promise := &libol.Promise{ + First: time.Second * 2, + MaxInt: time.Minute, + MinInt: time.Second * 10, + } + promise.Done(func() error { + var err error + listen, err = net.Listen("tcp", t.listen) + if err != nil { + t.out.Warn("TcpProxy.Start %s", err) + } + return err + }) + t.listener = listen + t.out.Info("TcpProxy.Start: %s", t.target) + libol.Go(func() { + defer listen.Close() + for { + conn, err := listen.Accept() + if err != nil { + t.out.Error("TcpServer.Accept: %s", err) + break + } + // connect target and pipe it. + fail := 0 + for { + backend := t.loadBalance(fail) + if backend == "" { + break + } + target, err := net.Dial("tcp", backend) + if err != nil { + t.out.Error("TcpProxy.Accept %s", err) + fail++ + continue + } + libol.Go(func() { + t.tunnel(conn, target) + }) + break + } + } + }) + return +} + +func (t *TcpProxy) Stop() { + if t.listener != nil { + _ = t.listener.Close() + } + t.out.Info("TcpProxy.Stop") + t.listener = nil +} diff --git a/pkg/public/favicon.ico b/pkg/public/favicon.ico new file mode 100644 index 0000000..ca21694 Binary files /dev/null and b/pkg/public/favicon.ico differ diff --git a/pkg/public/index.html b/pkg/public/index.html new file mode 100755 index 0000000..e6e6593 --- /dev/null +++ b/pkg/public/index.html @@ -0,0 +1,300 @@ + + + + + {{ .Worker.Alias }} - OpenLAN + + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Project: + openlan +
UUID:{{ .Worker.UUID }}
UpTime:{{ prettyTime .Worker.Uptime }}
Protocol:{{ .Worker.Protocol }}
Alias:{{ .Worker.Alias }}
Version:{{ .Version.Version }}
Built on:{{ .Version.Date }}
+
+
+ +
+
+

+ The point accessed to, and total {{ len .Clients }}+{{ len .Points }}. +

+
+
+ + + + + + + + + + + + + + + {{ range .Clients }} + + + + + + + + + + + {{ end }} + {{ range .Points }} + + + + + + + + + + + {{ end }} + +
UUIDAliveDeviceAliasConnectionUserr/t|TrafficState
openvpn{{ prettyTime .AliveTime }}{{ .Device }}{{ .Address }}{{ .Remote }}{{ .Name }}{{ prettyBytes .RxBytes }}/{{ prettyBytes .TxBytes }}{{ .State }}
{{ .UUID }}{{ prettyTime .AliveTime }}{{ .Device }}{{ .Alias }}{{ .Remote }}{{ .User }}@{{ .Network }}{{ prettyBytes .RxBytes }}/{{ prettyBytes .TxBytes }}{{ .State }}
+
+
+
+
+
+ The neighbor we discovered on, and total {{ len .Neighbors }}. +
+
+
+ + + + + + + + + + + + {{ range .Neighbors }} + + + + + + + + {{ end }} + +
UpTimeEthernetIP AddressNetworkConnection
{{ prettyTime .Uptime }}{{ .HwAddr }}{{ .IpAddr }}{{ .Device }}@{{ .Network }}{{ .Client }}
+
+
+
+
+
+ The flow traced from point, and total {{ len .OnLines }}. +
+
+
+ + + + + + + + + + + + + {{ range .OnLines }} + + + + + + + + + {{ end }} + +
LastTimeSourceDestination AddressProtocolSourceDestination Port
{{ prettyTime .HitTime }}{{ .IpSource }}{{ .IpDest }}{{ .IpProto }}{{ .PortSource }}{{ .PortDest }}
+
+
+
+ + diff --git a/pkg/public/openlan-point.png b/pkg/public/openlan-point.png new file mode 100644 index 0000000..34f5718 Binary files /dev/null and b/pkg/public/openlan-point.png differ diff --git a/pkg/public/openlan.png b/pkg/public/openlan.png new file mode 100644 index 0000000..ec5a69f Binary files /dev/null and b/pkg/public/openlan.png differ diff --git a/pkg/schema/acl.go b/pkg/schema/acl.go new file mode 100755 index 0000000..c22dc56 --- /dev/null +++ b/pkg/schema/acl.go @@ -0,0 +1,16 @@ +package schema + +type ACL struct { + Name string `json:"name"` + Rules []ACLRule `json:"rules"` +} + +type ACLRule struct { + Name string `json:"name"` + SrcIp string `json:"src"` + DstIp string `json:"dst"` + Proto string `json:"proto"` + SrcPort int `json:"sport"` + DstPort int `json:"dport"` + Action string `json:"action"` +} diff --git a/pkg/schema/device.go b/pkg/schema/device.go new file mode 100755 index 0000000..11ffb0a --- /dev/null +++ b/pkg/schema/device.go @@ -0,0 +1,23 @@ +package schema + +type Device struct { + Name string `json:"name"` + Address string `json:"address,omitempty"` + Mac string `json:"mac,omitempty"` + Type string `json:"type,omitempty"` + Provider string `json:"provider"` + Mtu int `json:"mtu,omitempty"` +} + +type HwMacInfo struct { + Uptime int64 `json:"uptime"` + Address string `json:"address"` + Device string `json:"device"` +} + +type Bridge struct { + Device + Macs []HwMacInfo `json:"macs"` + Slaves []Device `json:"slaves"` + Stats interface{} `json:"stats"` +} diff --git a/pkg/schema/esp.go b/pkg/schema/esp.go new file mode 100644 index 0000000..560bde5 --- /dev/null +++ b/pkg/schema/esp.go @@ -0,0 +1,44 @@ +package schema + +import "net" + +type Esp struct { + Name string `json:"name"` + Address string `json:"address"` + Members []EspMember `json:"members,omitempty"` +} + +type EspState struct { + Name string `json:"name"` + AliveTime int64 `json:"alive"` + Spi int `json:"spi"` + Local net.IP `json:"source"` + Mode uint8 `json:"mode"` + Proto uint8 `json:"proto"` + Remote net.IP `json:"destination"` + Auth string `json:"auth"` + Crypt string `json:"crypt"` + Encap string `json:"encap" ` + RemotePort int `json:"remotePort"` + TxBytes int64 `json:"txBytes"` + TxPackages int64 `json:"txPackages"` + RxBytes int64 `json:"rxBytes"` + RxPackages int64 `json:"rxPackages"` +} + +type EspPolicy struct { + Name string `json:"name"` + Spi int `json:"spi"` + Local net.IP `json:"local"` + Remote net.IP `json:"remote"` + Source string `json:"source"` + Dest string `json:"destination"` +} + +type EspMember struct { + Name string `json:"name"` + Spi uint32 `json:"spi"` + Peer string `json:"peer"` + State EspState `json:"state"` + Policy []EspPolicy `json:"policy"` +} diff --git a/pkg/schema/graph.go b/pkg/schema/graph.go new file mode 100755 index 0000000..d66dfb7 --- /dev/null +++ b/pkg/schema/graph.go @@ -0,0 +1,24 @@ +package schema + +type Category struct { + Name string `json:"name"` +} + +type Label struct { + Show bool `json:"show"` +} + +type GraphNode struct { + Name string `json:"name"` + Value int `json:"value"` + SymbolSize int `json:"symbolSize"` + Category int `json:"category"` + Id int `json:"id"` + Label *Label `json:"label,omitempty"` +} + +type GraphLink struct { + Source int `json:"source"` + Target int `json:"target"` + Weight int `json:"weight"` +} diff --git a/pkg/schema/index.go b/pkg/schema/index.go new file mode 100755 index 0000000..97fbd74 --- /dev/null +++ b/pkg/schema/index.go @@ -0,0 +1,23 @@ +package schema + +type Index struct { + Version Version `json:"version"` + Worker Worker `json:"worker"` + Points []Point `json:"points"` + Links []Link `json:"links"` + Neighbors []Neighbor `json:"neighbors"` + OnLines []OnLine `json:"online"` + Network []Network `json:"network"` + Clients []VPNClient `json:"clients"` + States []EspState `json:"states"` +} + +type Ctrl struct { + Url string `json:"url"` + Token string `json:"token"` +} + +type Message struct { + Code int `json:"code"` + Message string `json:"message"` +} diff --git a/pkg/schema/link.go b/pkg/schema/link.go new file mode 100755 index 0000000..dca304f --- /dev/null +++ b/pkg/schema/link.go @@ -0,0 +1,17 @@ +package schema + +type Link struct { + Uptime int64 `json:"uptime"` + UUID string `json:"uuid"` + Alias string `json:"alias"` + Network string `json:"network"` + User string `json:"user"` + Protocol string `json:"protocol"` + Server string `json:"server"` + Device string `json:"device"` + RxBytes int64 `json:"rxBytes"` + TxBytes int64 `json:"txBytes"` + ErrPkt int64 `json:"errors"` + State string `json:"state"` + AliveTime int64 `json:"aliveTime"` +} diff --git a/pkg/schema/neighbor.go b/pkg/schema/neighbor.go new file mode 100755 index 0000000..6147c14 --- /dev/null +++ b/pkg/schema/neighbor.go @@ -0,0 +1,12 @@ +package schema + +type Neighbor struct { + Uptime int64 `json:"uptime"` + UUID string `json:"uuid"` + HwAddr string `json:"ethernet"` + IpAddr string `json:"address"` + Client string `json:"client"` + Switch string `json:"switch"` + Network string `json:"network"` + Device string `json:"device"` +} diff --git a/pkg/schema/network.go b/pkg/schema/network.go new file mode 100755 index 0000000..dbdae85 --- /dev/null +++ b/pkg/schema/network.go @@ -0,0 +1,26 @@ +package schema + +type Lease struct { + Address string `json:"address"` + UUID string `json:"uuid"` + Alias string `json:"alias"` + Client string `json:"client"` + Type string `json:"type"` + Network string `json:"network"` +} + +type PrefixRoute struct { + Prefix string `json:"prefix"` + NextHop string `json:"nexthop"` + Metric int `json:"metric"` + Mode string `json:"mode"` +} + +type Network struct { + Name string `json:"name"` + IfAddr string `json:"ifAddr,omitempty"` + IpStart string `json:"ipStart,omitempty"` + IpEnd string `json:"ipEnd,omitempty"` + Netmask string `json:"netmask"` + Routes []PrefixRoute `json:"routes"` +} diff --git a/pkg/schema/online.go b/pkg/schema/online.go new file mode 100644 index 0000000..4a8cd2d --- /dev/null +++ b/pkg/schema/online.go @@ -0,0 +1,12 @@ +package schema + +type OnLine struct { + HitTime int64 `json:"hittime"` + UpTime int64 `json:"uptime"` + EthType uint16 `json:"ethType"` + IpSource string `json:"ipSource"` + IpDest string `json:"ipDestination"` + IpProto string `json:"ipProtocol"` + PortSource uint16 `json:"portSource"` + PortDest uint16 `json:"portDestination"` +} diff --git a/pkg/schema/openvpn.go b/pkg/schema/openvpn.go new file mode 100755 index 0000000..3d3b566 --- /dev/null +++ b/pkg/schema/openvpn.go @@ -0,0 +1,17 @@ +package schema + +type VPNClient struct { + Uptime int64 `json:"uptime"` + Name string `json:"name"` + UUID string `json:"uuid"` + Network string `json:"network"` + User string `json:"user"` + Remote string `json:"remote"` + Device string `json:"device"` + RxBytes int64 `json:"rxBytes"` + TxBytes int64 `json:"txBytes"` + ErrPkt string `json:"errors"` + State string `json:"state"` + AliveTime int64 `json:"aliveTime"` + Address string `json:"address"` +} diff --git a/pkg/schema/point.go b/pkg/schema/point.go new file mode 100755 index 0000000..e4c3aff --- /dev/null +++ b/pkg/schema/point.go @@ -0,0 +1,20 @@ +package schema + +type Point struct { + Uptime int64 `json:"uptime"` + UUID string `json:"uuid"` + Network string `json:"network"` + User string `json:"user"` + Alias string `json:"alias"` + Protocol string `json:"protocol"` + Remote string `json:"remote"` + Switch string `json:"switch,omitempty"` + Device string `json:"device"` + RxBytes int64 `json:"rxBytes"` + TxBytes int64 `json:"txBytes"` + ErrPkt int64 `json:"errors"` + State string `json:"state"` + AliveTime int64 `json:"aliveTime"` + System string `json:"system"` + Address Network `json:"address"` +} diff --git a/pkg/schema/pprof.go b/pkg/schema/pprof.go new file mode 100755 index 0000000..7dffe07 --- /dev/null +++ b/pkg/schema/pprof.go @@ -0,0 +1,5 @@ +package schema + +type PProf struct { + Listen string `json:"listen"` +} diff --git a/pkg/schema/switch.go b/pkg/schema/switch.go new file mode 100644 index 0000000..579ca07 --- /dev/null +++ b/pkg/schema/switch.go @@ -0,0 +1,8 @@ +package schema + +type Switch struct { + Uptime int64 `json:"uptime"` + UUID string `json:"uuid"` + Alias string `json:"alias"` + Address string `json:"address"` +} diff --git a/pkg/schema/user.go b/pkg/schema/user.go new file mode 100755 index 0000000..bc41553 --- /dev/null +++ b/pkg/schema/user.go @@ -0,0 +1,10 @@ +package schema + +type User struct { + Alias string `json:"alias,omitempty"` + Role string `json:"role,omitempty"` // admin, guest or other + Name string `json:"name"` + Password string `json:"password"` + Network string `json:"network"` + Lease string `json:"leaseTime"` +} diff --git a/pkg/schema/version.go b/pkg/schema/version.go new file mode 100755 index 0000000..2887353 --- /dev/null +++ b/pkg/schema/version.go @@ -0,0 +1,15 @@ +package schema + +import "github.com/luscis/openlan/pkg/libol" + +type Version struct { + Version string `json:"version"` + Date string `json:"date"` +} + +func NewVersionSchema() Version { + return Version{ + Version: libol.Version, + Date: libol.Date, + } +} diff --git a/pkg/schema/vxlan.go b/pkg/schema/vxlan.go new file mode 100755 index 0000000..7ef83d5 --- /dev/null +++ b/pkg/schema/vxlan.go @@ -0,0 +1,13 @@ +package schema + +type VxLAN struct { + Name string `json:"name"` + Bridge string `json:"bridge"` + Members []VxLANMember `json:"members"` +} + +type VxLANMember struct { + Vni int `json:"vni"` + Local string `json:"local"` + Remote string `json:"remote"` +} diff --git a/pkg/schema/worker.go b/pkg/schema/worker.go new file mode 100755 index 0000000..778937d --- /dev/null +++ b/pkg/schema/worker.go @@ -0,0 +1,8 @@ +package schema + +type Worker struct { + Uptime int64 `json:"uptime"` + UUID string `json:"uuid"` + Alias string `json:"alias"` + Protocol string `json:"protocol"` +} diff --git a/pkg/switch/confd.go b/pkg/switch/confd.go new file mode 100755 index 0000000..a485152 --- /dev/null +++ b/pkg/switch/confd.go @@ -0,0 +1,386 @@ +package _switch + +import ( + "github.com/luscis/openlan/pkg/api" + "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/database" + "github.com/luscis/openlan/pkg/libol" + "github.com/ovn-org/libovsdb/cache" + "github.com/ovn-org/libovsdb/model" + "strconv" + "strings" +) + +type ConfD struct { + stop chan struct{} + out *libol.SubLogger + api api.Switcher +} + +func NewConfd(api api.Switcher) *ConfD { + c := &ConfD{ + out: libol.NewSubLogger("confd"), + stop: make(chan struct{}), + api: api, + } + return c +} + +func (c *ConfD) Initialize() { +} + +func (c *ConfD) Start() { + handler := &cache.EventHandlerFuncs{ + AddFunc: c.Add, + DeleteFunc: c.Delete, + UpdateFunc: c.Update, + } + if _, err := database.NewDBClient(handler); err != nil { + c.out.Error("Confd.Start open db with %s", err) + return + } +} + +func (c *ConfD) Stop() { +} + +func (c *ConfD) Add(table string, model model.Model) { + c.out.Cmd("ConfD.Add %s %v", table, model) + if obj, ok := model.(*database.Switch); ok { + c.out.Info("ConfD.Add switch %d", obj.Listen) + } + + if obj, ok := model.(*database.VirtualNetwork); ok { + c.out.Info("ConfD.Add virtual network %s:%s", obj.Name, obj.Address) + } + + if obj, ok := model.(*database.VirtualLink); ok { + c.out.Info("ConfD.Add virtual link %s:%s", obj.Network, obj.Connection) + c.AddLink(obj) + } + + if obj, ok := model.(*database.NameCache); ok { + c.out.Info("ConfD.Add name cache %s", obj.Name) + c.UpdateName(obj) + } + + if obj, ok := model.(*database.PrefixRoute); ok { + c.out.Info("ConfD.Add prefix route %s:%s", obj.Network, obj.Prefix) + c.AddRoute(obj) + } +} + +func (c *ConfD) Delete(table string, model model.Model) { + c.out.Cmd("ConfD.Delete %s %v", table, model) + if obj, ok := model.(*database.VirtualNetwork); ok { + c.out.Info("ConfD.Delete virtual network %s:%s", obj.Name, obj.Address) + } + + if obj, ok := model.(*database.VirtualLink); ok { + c.out.Info("ConfD.Delete virtual link %s:%s", obj.Network, obj.Connection) + c.DelLink(obj) + } + + if obj, ok := model.(*database.PrefixRoute); ok { + c.out.Info("ConfD.Delete prefix route %s:%s", obj.Network, obj.Prefix) + c.DelRoute(obj) + } +} + +func (c *ConfD) Update(table string, old model.Model, new model.Model) { + c.out.Cmd("ConfD.Update %s %v", table, new) + if obj, ok := new.(*database.VirtualNetwork); ok { + c.out.Info("ConfD.Update virtual network %s:%s", obj.Name, obj.Address) + } + + if obj, ok := new.(*database.VirtualLink); ok { + c.out.Info("ConfD.Update virtual link %s:%s", obj.Network, obj.Connection) + c.AddLink(obj) + } + + if obj, ok := new.(*database.NameCache); ok { + c.out.Info("ConfD.Update name cache %s", obj.Name) + c.UpdateName(obj) + } +} + +func GetAddrPort(conn string) (string, int) { + values := strings.SplitN(conn, ":", 2) + if len(values) == 2 { + port, _ := strconv.Atoi(values[1]) + return values[0], port + } + return values[0], 0 +} + +func GetRoutes(result *[]database.PrefixRoute, device string) error { + if err := database.Client.WhereList( + func(l *database.PrefixRoute) bool { + return l.Gateway == device + }, result); err != nil { + return err + } + return nil +} + +func (c *ConfD) AddLink(obj *database.VirtualLink) { + worker := GetWorker(obj.Network) + if worker == nil { + c.out.Warn("ConfD.AddLink network %s not found.", obj.Network) + return + } + cfg := worker.Config() + if cfg == nil || cfg.Specifies == nil { + c.out.Warn("ConfD.AddLink config %s not found.", obj.Network) + return + } + if cfg.Provider == "esp" { + link := &MemberLink{ + LinkImpl{ + api: c.api, + out: c.out, + worker: worker, + }, + } + link.Add(obj) + } else if cfg.Provider == "fabric" { + link := &TunnelLink{ + LinkImpl{ + api: c.api, + out: c.out, + worker: worker, + }, + } + link.Add(obj) + } +} + +func (c *ConfD) DelLink(obj *database.VirtualLink) { + worker := GetWorker(obj.Network) + if worker == nil { + c.out.Warn("ConfD.DelLink network %s not found.", obj.Network) + return + } + cfg := worker.Config() + if cfg == nil || cfg.Specifies == nil { + c.out.Warn("ConfD.DelLink config %s not found.", obj.Network) + return + } + if cfg.Provider == "esp" { + link := &MemberLink{ + LinkImpl{ + api: c.api, + out: c.out, + worker: worker, + }, + } + link.Del(obj) + } else if cfg.Provider == "fabric" { + link := &TunnelLink{ + LinkImpl{ + api: c.api, + out: c.out, + worker: worker, + }, + } + link.Del(obj) + } +} + +func (c *ConfD) UpdateName(obj *database.NameCache) { + if obj.Address == "" { + return + } + c.out.Info("ConfD.UpdateName %s %s", obj.Name, obj.Address) + ListWorker(func(w Networker) { + cfg := w.Config() + spec := cfg.Specifies + if spec == nil { + return + } + if specObj, ok := spec.(*config.ESPSpecifies); ok { + if specObj.HasRemote(obj.Name, obj.Address) { + cfg.Correct() + w.Reload(c.api) + } + } + }) +} + +func (c *ConfD) AddRoute(obj *database.PrefixRoute) { + if obj.Prefix == "" { + return + } + c.out.Cmd("ConfD.DelRoute %v", obj.Network) + worker := GetWorker(obj.Network) + if worker == nil { + c.out.Warn("ConfD.DelRoute network %s not found.", obj.Network) + return + } + netCfg := worker.Config() + if netCfg == nil || netCfg.Specifies == nil { + c.out.Warn("ConfD.DelRoute config %s not found.", obj.Network) + return + } + spec := netCfg.Specifies + poCfg := &config.ESPPolicy{ + Source: obj.Source, + Dest: obj.Prefix, + } + if specObj, ok := spec.(*config.ESPSpecifies); ok { + var mem *config.ESPMember + if mem = specObj.GetMember(obj.Gateway); mem != nil { + mem.AddPolicy(poCfg) + } else if libol.GetPrefix(obj.Gateway, 4) == "spi:" { + mem = &config.ESPMember{ + Name: obj.Gateway, + } + specObj.AddMember(mem) + } + if mem != nil { + mem.AddPolicy(poCfg) + specObj.Correct() + worker.Reload(c.api) + } + } +} + +func (c *ConfD) DelRoute(obj *database.PrefixRoute) { + if obj.Prefix == "" { + return + } + c.out.Cmd("ConfD.DelRoute %v", obj.Network) + worker := GetWorker(obj.Network) + if worker == nil { + c.out.Warn("ConfD.DelRoute network %s not found.", obj.Network) + return + } + netCfg := worker.Config() + if netCfg == nil || netCfg.Specifies == nil { + c.out.Warn("ConfD.DelRoute config %s not found.", obj.Network) + return + } + spec := netCfg.Specifies + if specObj, ok := spec.(*config.ESPSpecifies); ok { + if mem := specObj.GetMember(obj.Gateway); mem != nil { + if mem.RemovePolicy(obj.Prefix) { + specObj.Correct() + worker.Reload(c.api) + } + } + } +} + +type LinkImpl struct { + api api.Switcher + out *libol.SubLogger + worker Networker +} + +func (l *LinkImpl) Add(obj *database.VirtualLink) { + l.out.Info("LinkImpl.Add TODO") +} + +func (l *LinkImpl) Update(obj *database.VirtualLink) { + l.out.Info("LinkImpl.Update TODO") +} + +func (l *LinkImpl) Del(obj *database.VirtualLink) { + l.out.Info("LinkImpl.Del TODO") +} + +type MemberLink struct { + LinkImpl +} + +func (l *MemberLink) Add(obj *database.VirtualLink) { + var port int + var remote string + + conn := obj.Connection + if conn == "any" { + remoteConn := obj.Status["remote_connection"] + if libol.GetPrefix(remoteConn, 4) == "udp:" { + remote, port = GetAddrPort(remoteConn[4:]) + } else { + l.out.Warn("MemberLink.Add %s remote not found.", conn) + return + } + } else if libol.GetPrefix(conn, 4) == "udp:" { + remoteConn := obj.Connection + remote, port = GetAddrPort(remoteConn[4:]) + } else { + return + } + l.out.Info("MemberLink.Add remote link %s:%d", remote, port) + memCfg := &config.ESPMember{ + Name: obj.Device, + Address: obj.OtherConfig["local_address"], + Peer: obj.OtherConfig["remote_address"], + State: config.EspState{ + Remote: remote, + RemotePort: port, + Auth: obj.Authentication["password"], + Crypt: obj.Authentication["username"], + }, + } + var routes []database.PrefixRoute + _ = GetRoutes(&routes, obj.Device) + for _, route := range routes { + l.out.Info("MemberLink.Add %s via %s", route.Prefix, obj.Device) + memCfg.AddPolicy(&config.ESPPolicy{ + Source: route.Source, + Dest: route.Prefix, + }) + } + l.out.Cmd("MemberLink.Add %v", memCfg) + spec := l.worker.Config().Specifies + if specObj, ok := spec.(*config.ESPSpecifies); ok { + specObj.AddMember(memCfg) + specObj.Correct() + l.worker.Reload(l.api) + } +} + +func (l *MemberLink) Update(obj *database.VirtualLink) { + +} + +func (l *MemberLink) Del(obj *database.VirtualLink) { + l.out.Info("MemberLink.Del remote link %s", obj.Device) + spec := l.worker.Config().Specifies + if specObj, ok := spec.(*config.ESPSpecifies); ok { + if specObj.DelMember(obj.Device) { + specObj.Correct() + l.worker.Reload(l.api) + } + } +} + +type TunnelLink struct { + LinkImpl +} + +func (l *TunnelLink) Add(obj *database.VirtualLink) { + tunCfg := &config.FabricTunnel{ + Remote: obj.Connection, + } + l.out.Cmd("TunnelLink.Add %v", tunCfg) + spec := l.worker.Config().Specifies + if specObj, ok := spec.(*config.FabricSpecifies); ok { + specObj.AddTunnel(tunCfg) + specObj.Correct() + l.worker.Reload(l.api) + } +} + +func (l *TunnelLink) Del(obj *database.VirtualLink) { + l.out.Info("TunnelLink.Del remote link %s", obj.Connection) + spec := l.worker.Config().Specifies + if specObj, ok := spec.(*config.FabricSpecifies); ok { + if specObj.DelTunnel(obj.Connection) { + specObj.Correct() + l.worker.Reload(l.api) + } + } +} diff --git a/pkg/switch/dhcp.go b/pkg/switch/dhcp.go new file mode 100755 index 0000000..ff74f48 --- /dev/null +++ b/pkg/switch/dhcp.go @@ -0,0 +1,130 @@ +package _switch + +import ( + "fmt" + co "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" +) + +const ( + DhcpBin = "dnsmasq" + DhcpDir = "/var/openlan/dhcp" +) + +type Dhcp struct { + cfg *co.Dhcp + out *libol.SubLogger + uuid string +} + +func NewDhcp(cfg *co.Dhcp) *Dhcp { + return &Dhcp{ + uuid: cfg.Name, + cfg: cfg, + out: libol.NewSubLogger(cfg.Name), + } +} + +func (d *Dhcp) Initialize() { +} + +func (d *Dhcp) Conf() *co.Dhcp { + return d.cfg +} + +func (d *Dhcp) UUID() string { + return d.uuid +} + +func (d *Dhcp) Path() string { + return DhcpBin +} + +func (d *Dhcp) PidFile() string { + return filepath.Join(DhcpDir, d.uuid+".pid") +} + +func (d *Dhcp) LeaseFile() string { + return filepath.Join(DhcpDir, d.uuid+".leases") +} + +func (d *Dhcp) ConfFile() string { + return filepath.Join(DhcpDir, d.uuid+".conf") +} + +func (d *Dhcp) LogFile() string { + return filepath.Join(DhcpDir, d.uuid+".log") +} + +const tmpl = `#Generate by OpenLAN +strict-order +except-interface=lo +bind-interfaces +interface=%s +dhcp-range=%s,%s,12h +dhcp-leasefile=%s +` + +func (d *Dhcp) SaveConf() { + cfg := d.cfg + data := fmt.Sprintf(tmpl, + cfg.Bridge.Name, + cfg.Subnet.Start, + cfg.Subnet.End, + d.LeaseFile(), + ) + _ = ioutil.WriteFile(d.ConfFile(), []byte(data), 0600) +} + +func (d *Dhcp) Start() { + log, err := libol.CreateFile(d.LogFile()) + if err != nil { + d.out.Warn("Dhcp.Start %s", err) + return + } + d.SaveConf() + libol.Go(func() { + args := []string{ + "--conf-file=" + d.ConfFile(), + "--pid-file=" + d.PidFile(), + } + d.out.Debug("Dhcp.Start %s %v", d.Path(), args) + cmd := exec.Command(d.Path(), args...) + cmd.Stdout = log + cmd.Stderr = log + if err := cmd.Run(); err != nil { + d.out.Error("Dhcp.Start %s: %s", d.uuid, err) + } + }) +} + +func (d *Dhcp) Clean() { + files := []string{ + d.LogFile(), d.PidFile(), d.ConfFile(), + } + for _, file := range files { + if err := libol.FileExist(file); err == nil { + if err := os.Remove(file); err != nil { + d.out.Warn("Dhcp.Clean %s", err) + } + } + } +} + +func (d *Dhcp) Stop() { + if data, err := ioutil.ReadFile(d.PidFile()); err != nil { + d.out.Info("Dhcp.Stop %s", err) + } else { + pid := strings.TrimSpace(string(data)) + cmd := exec.Command("/usr/bin/kill", pid) + if err := cmd.Run(); err != nil { + d.out.Warn("Dhcp.Stop %s: %s", pid, err) + } + } + d.Clean() +} diff --git a/pkg/switch/esp.go b/pkg/switch/esp.go new file mode 100755 index 0000000..62ebd43 --- /dev/null +++ b/pkg/switch/esp.go @@ -0,0 +1,404 @@ +package _switch + +import ( + "github.com/luscis/openlan/pkg/api" + "github.com/luscis/openlan/pkg/cache" + co "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + nl "github.com/vishvananda/netlink" + "net" + "os/exec" + "strconv" +) + +const ( + UDPBin = "openudp" +) + +func GetStateEncap(mode string, sport, dport int) *nl.XfrmStateEncap { + if mode == "udp" { + return &nl.XfrmStateEncap{ + Type: nl.XFRM_ENCAP_ESPINUDP, + SrcPort: sport, + DstPort: dport, + OriginalAddress: net.ParseIP("0.0.0.0"), + } + } + return nil +} + +type EspWorker struct { + *WorkerImpl + proto nl.Proto + mode nl.Mode + states []*models.EspState + policies []*models.EspPolicy + spec *co.ESPSpecifies +} + +func NewESPWorker(c *co.Network) *EspWorker { + w := &EspWorker{ + WorkerImpl: NewWorkerApi(c), + proto: nl.XFRM_PROTO_ESP, + mode: nl.XFRM_MODE_TUNNEL, + } + w.spec, _ = c.Specifies.(*co.ESPSpecifies) + return w +} + +type StateParameters struct { + spi int + local, remote net.IP + auth, crypt string +} + +func (w *EspWorker) newState(args StateParameters) *nl.XfrmState { + state := &nl.XfrmState{ + Src: args.local, + Dst: args.remote, + Proto: w.proto, + Mode: w.mode, + Spi: args.spi, + Auth: &nl.XfrmStateAlgo{ + Name: "hmac(sha256)", + Key: []byte(args.auth), + }, + Crypt: &nl.XfrmStateAlgo{ + Name: "cbc(aes)", + Key: []byte(args.crypt), + }, + } + return state +} + +type PolicyParameter struct { + spi int + local, remote net.IP + src, dst *net.IPNet + dir nl.Dir +} + +func (w *EspWorker) newPolicy(args PolicyParameter) *nl.XfrmPolicy { + policy := &nl.XfrmPolicy{ + Src: args.src, + Dst: args.dst, + Dir: args.dir, + } + tmpl := nl.XfrmPolicyTmpl{ + Src: args.local, + Dst: args.remote, + Proto: w.proto, + Mode: w.mode, + Spi: args.spi, + } + policy.Tmpls = append(policy.Tmpls, tmpl) + return policy +} + +func (w *EspWorker) addState(ms *models.EspState) { + spi := ms.Spi + w.out.Info("EspWorker.addState %s-%s", ms.Local, ms.Remote) + if st := w.newState(StateParameters{ + spi, ms.Local, ms.Remote, ms.Auth, ms.Crypt, + }); st != nil { + st.Encap = GetStateEncap(ms.Encap, co.EspLocalUdp, ms.RemotePort) + ms.In = st + } else { + return + } + if st := w.newState(StateParameters{ + spi, ms.Remote, ms.Local, ms.Auth, ms.Crypt, + }); st != nil { + st.Encap = GetStateEncap(ms.Encap, ms.RemotePort, co.EspLocalUdp) + ms.Out = st + } else { + return + } + w.states = append(w.states, ms) + cache.EspState.Add(ms) +} + +func (w *EspWorker) delState(ms *models.EspState) { + w.out.Info("EspWorker.delState %s-%s", ms.Local, ms.Remote) + cache.EspState.Del(ms.ID()) +} + +func (w *EspWorker) addPolicy(mp *models.EspPolicy) { + spi := mp.Spi + src, err := libol.ParseNet(mp.Source) + if err != nil { + w.out.Error("EspWorker.addPolicy %s %s", mp.Source, err) + return + } + dst, err := libol.ParseNet(mp.Dest) + if err != nil { + w.out.Error("EspWorker.addPolicy %s %s", mp.Dest, err) + return + } + w.out.Info("EspWorker.addPolicy %s-%s", mp.Source, mp.Dest) + if po := w.newPolicy(PolicyParameter{ + spi, mp.Local, mp.Remote, src, dst, nl.XFRM_DIR_OUT, + }); po != nil { + mp.Out = po + } else { + return + } + if po := w.newPolicy(PolicyParameter{ + spi, mp.Remote, mp.Local, dst, src, nl.XFRM_DIR_FWD, + }); po != nil { + mp.Fwd = po + } else { + return + } + if po := w.newPolicy(PolicyParameter{ + spi, mp.Remote, mp.Local, dst, src, nl.XFRM_DIR_IN, + }); po != nil { + mp.In = po + } else { + return + } + w.policies = append(w.policies, mp) + cache.EspPolicy.Add(mp) +} + +func (w *EspWorker) delPolicy(mp *models.EspPolicy) { + w.out.Info("EspWorker.delPolicy %s-%s", mp.Source, mp.Dest) + cache.EspPolicy.Del(mp.ID()) +} + +func (w *EspWorker) updateXfrm() { + for _, mem := range w.spec.Members { + if mem == nil { + continue + } + state := mem.State + if state.LocalIp == nil || state.RemoteIp == nil { + continue + } + + ms := &models.EspState{ + EspState: &schema.EspState{ + Name: w.spec.Name, + Spi: mem.Spi, + Local: state.LocalIp, + Remote: state.RemoteIp, + Proto: uint8(w.proto), + Mode: uint8(w.mode), + Encap: state.Encap, + Auth: state.Auth, + Crypt: state.Crypt, + RemotePort: state.RemotePort, + }, + } + w.addState(ms) + for _, pol := range mem.Policies { + if pol == nil || pol.Dest == "" { + continue + } + mp := &models.EspPolicy{ + EspPolicy: &schema.EspPolicy{ + Name: w.spec.Name, + Spi: mem.Spi, + Local: state.LocalIp, + Remote: state.RemoteIp, + Source: pol.Source, + Dest: pol.Dest, + }, + } + w.addPolicy(mp) + } + } +} + +func (w *EspWorker) Initialize() { + w.WorkerImpl.Initialize() + w.updateXfrm() +} + +func (w *EspWorker) AddRoute(device, src, remote string) error { + link, err := nl.LinkByName(device) + if link == nil { + return err + } + // add peer routes. + dst, err := libol.ParseNet(remote) + if err != nil { + return libol.NewErr("%s %s.", err, remote) + } + gw := libol.ParseAddr(src) + rte := &nl.Route{ + Dst: dst, + Gw: gw, + LinkIndex: link.Attrs().Index, + Priority: 600, + } + w.out.Debug("EspWorker.AddRoute: %s", rte) + if err := nl.RouteReplace(rte); err != nil { + return libol.NewErr("%s %s.", err, remote) + } + return nil +} + +func (w *EspWorker) UpDummy(name, addr, peer string) error { + link, _ := nl.LinkByName(name) + if link == nil { + port := &nl.Dummy{ + LinkAttrs: nl.LinkAttrs{ + TxQLen: -1, + Name: name, + }, + } + if err := nl.LinkAdd(port); err != nil { + return err + } + link, _ = nl.LinkByName(name) + } + if err := nl.LinkSetUp(link); err != nil { + w.out.Error("EspWorker.UpDummy: %s", err) + } + if addr != "" { + ipAddr, err := nl.ParseAddr(addr) + if err != nil { + return libol.NewErr("%s %s.", err, addr) + } + if err := nl.AddrReplace(link, ipAddr); err != nil { + w.out.Warn("EspWorker.UpDummy: %s", err) + } + } + w.out.Info("EspWorker.Open %s success", name) + return nil +} + +func (w *EspWorker) addXfrm() { + for _, state := range w.states { + w.out.Debug("EspWorker.AddXfrm State %s", state.In.Spi) + if err := nl.XfrmStateAdd(state.In); err != nil { + w.out.Error("EspWorker.Start.in State.in %s", err) + } + if err := nl.XfrmStateAdd(state.Out); err != nil { + w.out.Error("EspWorker.Start.out State.out %s", err) + } + } + for _, policy := range w.policies { + w.out.Debug("EspWorker.AddXfrm Policy %s", policy.Out.Dst) + if err := nl.XfrmPolicyAdd(policy.In); err != nil { + w.out.Error("EspWorker.addXfrm.in Policy %s", err) + } + if err := nl.XfrmPolicyAdd(policy.Fwd); err != nil { + w.out.Error("EspWorker.addXfrm.fwd Policy %s", err) + } + if err := nl.XfrmPolicyAdd(policy.Out); err != nil { + w.out.Error("EspWorker.addXfrm.out Policy %s", err) + } + } +} + +func (w *EspWorker) Start(v api.Switcher) { + w.uuid = v.UUID() + w.upMember() + w.addXfrm() + cache.Esp.Add(&models.Esp{ + Name: w.cfg.Name, + Address: w.spec.Address, + }) + w.WorkerImpl.Start(v) +} + +func (w *EspWorker) DownDummy(name string) error { + link, _ := nl.LinkByName(name) + if link == nil { + return nil + } + port := &nl.Dummy{ + LinkAttrs: nl.LinkAttrs{ + TxQLen: -1, + Name: name, + }, + } + if err := nl.LinkDel(port); err != nil { + return err + } + return nil +} + +func (w *EspWorker) delXfrm() { + for _, mp := range w.policies { + w.delPolicy(mp) + if err := nl.XfrmPolicyDel(mp.In); err != nil { + w.out.Warn("EspWorker.delXfrm Policy %s-%s: %s", mp.Source, mp.Dest, err) + } + if err := nl.XfrmPolicyDel(mp.Fwd); err != nil { + w.out.Warn("EspWorker.delXfrm Policy %s-%s: %s", mp.Source, mp.Dest, err) + } + if err := nl.XfrmPolicyDel(mp.Out); err != nil { + w.out.Warn("EspWorker.delXfrm Policy %s-%s: %s", mp.Source, mp.Dest, err) + } + } + w.policies = nil + for _, ms := range w.states { + w.delState(ms) + if err := nl.XfrmStateDel(ms.In); err != nil { + w.out.Warn("EspWorker.delXfrm State %s-%s: %s", ms.Local, ms.Remote, err) + } + if err := nl.XfrmStateDel(ms.Out); err != nil { + w.out.Warn("EspWorker.delXfrm State %s-%s: %s", ms.Local, ms.Remote, err) + } + } + w.states = nil +} + +func (w *EspWorker) Stop() { + w.WorkerImpl.Stop() + cache.Esp.Del(w.cfg.Name) + w.downMember() + w.delXfrm() +} + +func (w *EspWorker) Reload(v api.Switcher) { + w.Stop() + w.Initialize() + w.Start(v) +} + +func (w *EspWorker) upMember() { + for _, mem := range w.spec.Members { + if mem.Peer == "" { + continue + } + if err := w.UpDummy(w.spec.Name, mem.Address, mem.Peer); err != nil { + w.out.Warn("EspWorker.UpDummy %d %s", mem.Spi, err) + } + for _, po := range mem.Policies { + if err := w.AddRoute(w.spec.Name, mem.Address, po.Dest); err != nil { + w.out.Warn("EspWorker.AddRoute %d %s", mem.Spi, err) + } + } + } +} + +func (w *EspWorker) downMember() { + for _, mem := range w.spec.Members { + if mem.Peer == "" { + continue + } + if err := w.DownDummy(w.spec.Name); err != nil { + w.out.Error("EspWorker.downMember %d %s", mem.Spi, err) + } + } +} + +func OpenUDP() { + libol.Go(func() { + args := []string{ + "-p", strconv.Itoa(co.EspLocalUdp), + "-vconsole:emer", + "--log-file=/var/openlan/openudp.log", + } + cmd := exec.Command(UDPBin, args...) + if err := cmd.Run(); err != nil { + libol.Error("esp.init %s", err) + } + }) +} diff --git a/pkg/switch/esp_test.go b/pkg/switch/esp_test.go new file mode 100755 index 0000000..4e62cce --- /dev/null +++ b/pkg/switch/esp_test.go @@ -0,0 +1,14 @@ +package _switch + +import ( + "fmt" + "net" + "testing" +) + +func TestDNS_lookup(t *testing.T) { + addr, err := net.LookupHost("nj.openlan.net") + fmt.Println(addr, err) + addr, err = net.LookupHost("114.221.197.118") + fmt.Println(addr, err) +} diff --git a/pkg/switch/fabric.go b/pkg/switch/fabric.go new file mode 100644 index 0000000..5e0b9fb --- /dev/null +++ b/pkg/switch/fabric.go @@ -0,0 +1,507 @@ +package _switch + +import ( + "fmt" + "github.com/danieldin95/go-openvswitch/ovs" + "github.com/luscis/openlan/pkg/api" + co "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + cn "github.com/luscis/openlan/pkg/network" + "github.com/vishvananda/netlink" + "strings" +) + +type Fabricer interface { + AddNetwork(cfg *co.Network) + DelNetwork(bridge string, vni uint32) + TcpMss() int +} + +var fabrics = make(map[string]Fabricer) + +func GetFabricer(name string) Fabricer { + return fabrics[name] +} + +type OvsBridge struct { + name string + cli *ovs.Client + out *libol.SubLogger +} + +func NewOvsBridge(name string) *OvsBridge { + return &OvsBridge{ + name: name, + cli: ovs.New(), + out: libol.NewSubLogger(name), + } +} + +func (o *OvsBridge) delFlow(flow *ovs.MatchFlow) error { + if err := o.cli.OpenFlow.DelFlows(o.name, flow); err != nil { + o.out.Warn("OvsBridge.addFlow %s", err) + return err + } + return nil +} + +func (o *OvsBridge) addFlow(flow *ovs.Flow) error { + if err := o.cli.OpenFlow.AddFlow(o.name, flow); err != nil { + o.out.Warn("OvsBridge.addFlow %s", err) + return err + } + return nil +} + +func (o *OvsBridge) setDown() error { + if err := o.cli.VSwitch.DeleteBridge(o.name); err != nil { + o.out.Error("OvsBridge.DeleteBridge %s %s", o.name, err) + return err + } + return nil +} + +func (o *OvsBridge) setUp() error { + if err := o.cli.VSwitch.AddBridge(o.name); err != nil { + o.out.Error("OvsBridge.AddBridge %s %s", o.name, err) + return err + } + return nil +} + +func (o *OvsBridge) setMode(mode ovs.FailMode) error { + if err := o.cli.VSwitch.SetFailMode(o.name, mode); err != nil { + o.out.Warn("OvsBridge.setMode %s %s", mode, err) + return err + } + return nil +} + +func (o *OvsBridge) addPort(name string, options *ovs.InterfaceOptions) error { + if err := o.cli.VSwitch.AddPort(o.name, name); err != nil { + o.out.Warn("OvsBridge.addPort %s %s", name, err) + return err + } + if options == nil { + return nil + } + if err := o.cli.VSwitch.Set.Interface(name, *options); err != nil { + o.out.Warn("OvsBridge.addPort %s %s", name, err) + return err + } + return nil +} + +func (o *OvsBridge) delPort(name string) error { + if err := o.cli.VSwitch.DeletePort(o.name, name); err != nil { + o.out.Warn("OvsBridge.delPort %s %s", name, err) + return err + } + return nil +} + +func (o *OvsBridge) setPort(name string, options ovs.InterfaceOptions) error { + if err := o.cli.VSwitch.Set.Interface(name, options); err != nil { + o.out.Warn("OvsBridge.setPort %s %s", name, err) + return err + } + return nil +} + +func (o *OvsBridge) dumpPort(name string) *ovs.PortStats { + if port, err := o.cli.OpenFlow.DumpPort(o.name, name); err != nil { + o.out.Warn("OvsBridge.dumpPort %s %s", name, err) + return nil + } else { + return port + } +} + +const ( + TLsToTun = 2 // From a switch include border to tunnels. + TTunToLs = 4 // From tunnels to a switch. + TSourceLearn = 10 // Learning source mac. + TUcastToTun = 20 // Forwarding by fdb. + TFloodToTun = 30 // Flooding to tunnels or patch by flags. + TFloodToBor = 31 // Flooding to border in a switch. + TFloodLoop = 32 // Flooding to patch in a switch from border. +) + +const ( + FFromLs = 2 // In a logical switch. + FFromTun = 4 // From peer tunnels. +) + +const ( + MatchRegFlag = "reg10" + NxmRegFlag = "NXM_NX_REG10[0..31]" + NxmRegEthDst = "NXM_OF_ETH_DST[]" + NxmRegEthSrc = "NXM_OF_ETH_SRC[]" + NxmRegTunId = "NXM_NX_TUN_ID[0..31]" + NxmRegInPort = "NXM_OF_IN_PORT[]" +) + +type OvsPort struct { + name string + portId int + options ovs.InterfaceOptions +} + +type FabricWorker struct { + *WorkerImpl + spec *co.FabricSpecifies + ovs *OvsBridge + cookie uint64 + tunnels map[string]*OvsPort + borders map[string]*OvsPort + bridge map[string]*cn.LinuxBridge +} + +func NewFabricWorker(c *co.Network) *FabricWorker { + w := &FabricWorker{ + WorkerImpl: NewWorkerApi(c), + ovs: NewOvsBridge(c.Bridge.Name), + tunnels: make(map[string]*OvsPort, 1024), + borders: make(map[string]*OvsPort, 1024), + bridge: make(map[string]*cn.LinuxBridge, 1024), + } + w.spec, _ = c.Specifies.(*co.FabricSpecifies) + fabrics[c.Name] = w + return w +} + +func (w *FabricWorker) upTables() { + _ = w.ovs.addFlow(&ovs.Flow{ + Actions: []ovs.Action{ovs.Drop()}, + }) + // Table 2: set flags from logical switch. + _ = w.ovs.addFlow(&ovs.Flow{ + Table: TLsToTun, + Priority: 1, + Actions: []ovs.Action{ + ovs.Load(libol.Uint2S(FFromLs), NxmRegFlag), + ovs.Resubmit(0, TSourceLearn), + }, + }) + // Table 4: set flags from tunnels. + _ = w.ovs.addFlow(&ovs.Flow{ + Table: TTunToLs, + Priority: 1, + Actions: []ovs.Action{ + ovs.Load(libol.Uint2S(FFromTun), NxmRegFlag), + ovs.Resubmit(0, TSourceLearn), + }, + }) + // Table 10: source learning + w.addLearning() + // Table 20: default to flood 30 + _ = w.ovs.addFlow(&ovs.Flow{ + Table: TUcastToTun, + Actions: []ovs.Action{ + ovs.Resubmit(0, TFloodToTun), + }, + }) + // Table 30: default drop. + _ = w.ovs.addFlow(&ovs.Flow{ + Table: TFloodToTun, + Actions: []ovs.Action{ovs.Drop()}, + }) + // Table 31: default drop. + _ = w.ovs.addFlow(&ovs.Flow{ + Table: TFloodToBor, + Actions: []ovs.Action{ovs.Drop()}, + }) +} + +func (w *FabricWorker) Initialize() { + w.WorkerImpl.Initialize() + if err := w.ovs.setUp(); err != nil { + return + } + _ = w.ovs.setMode("secure") + w.upTables() + ListWorker(func(n Networker) { + if w.IsSlave(n) { + n.Initialize() + } + }) +} + +func (w *FabricWorker) vni2peer(vni uint32) (string, string) { + tunPort := fmt.Sprintf("vb-%08d", vni) + brPort := fmt.Sprintf("vt-%08d", vni) + return brPort, tunPort +} + +func (w *FabricWorker) UpLink(bridge string, vni uint32, addr string) *ovs.PortStats { + brPort, tunPort := w.vni2peer(vni) + link := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: tunPort}, + PeerName: brPort, + } + if err := netlink.LinkAdd(link); err != nil { + w.out.Warn("FabricWorker.addLink %s", err) + } + if err := netlink.LinkSetUp(link); err != nil { + w.out.Warn("FabricWorker.setLinkUp %s", err) + } + // Setup linux bridge for outputs + br := cn.NewLinuxBridge(bridge, 0) + br.Open(addr) + _ = br.AddSlave(brPort) + if err := br.CallIptables(1); err != nil { + w.out.Warn("FabricWorker.IpTables %s", err) + } + w.bridge[bridge] = br + // Add port to OvS tunnel bridge + _ = w.ovs.addPort(tunPort, nil) + if port := w.ovs.dumpPort(tunPort); port != nil { + return port + } + return nil +} + +func (w *FabricWorker) addLearning() { + // Table 10: source mac learning + learnSpecs := []ovs.Match{ + ovs.FieldMatch(NxmRegTunId, NxmRegTunId), + ovs.FieldMatch(NxmRegEthDst, NxmRegEthSrc), + } + learnActions := []ovs.Action{ + ovs.OutputField(NxmRegInPort), + } + _ = w.ovs.addFlow(&ovs.Flow{ + Table: TSourceLearn, + Actions: []ovs.Action{ + ovs.Learn(&ovs.LearnedFlow{ + Table: TUcastToTun, + Matches: learnSpecs, + Priority: 1, + HardTimeout: 300, + Actions: learnActions, + }), + ovs.Resubmit(0, TUcastToTun), + }, + }) +} + +func (w *FabricWorker) AddNetwork(cfg *co.Network) { + spec, _ := cfg.Specifies.(*co.VxLANSpecifies) + libol.Info("Fabric.AddNetwork %d", spec.Vni) + patch := w.UpLink(cfg.Bridge.Name, spec.Vni, cfg.Bridge.Address) + // Table 0: load tunnel id from patch port. + _ = w.ovs.addFlow(&ovs.Flow{ + InPort: patch.PortID, + Priority: 1, + Actions: []ovs.Action{ + ovs.Load(libol.Uint2S(spec.Vni), NxmRegTunId), + ovs.Resubmit(0, TLsToTun), + }, + }) + // Table 30: flooding to patch from tunnels. + _ = w.ovs.addFlow(&ovs.Flow{ + Table: TFloodToTun, + Priority: 2, + Matches: []ovs.Match{ + ovs.FieldMatch(NxmRegTunId, libol.Uint2S(spec.Vni)), + ovs.FieldMatch(MatchRegFlag, libol.Uint2S(FFromTun)), + }, + Actions: []ovs.Action{ + ovs.Output(patch.PortID), + ovs.Resubmit(0, TFloodToBor), + }, + }) + // Table 32: flooding to patch from border. + _ = w.ovs.addFlow(&ovs.Flow{ + Table: TFloodLoop, + Priority: 2, + Matches: []ovs.Match{ + ovs.FieldMatch(NxmRegTunId, libol.Uint2S(spec.Vni)), + ovs.FieldMatch(MatchRegFlag, libol.Uint2S(FFromLs)), + }, + Actions: []ovs.Action{ + ovs.Output(patch.PortID), + }, + }) +} + +func (w *FabricWorker) Addr2Port(addr, pre string) string { + name := pre + strings.ReplaceAll(addr, ".", "") + return libol.IfName(name) +} + +func (w *FabricWorker) flood2Tunnel() { + var actions []ovs.Action + for _, tun := range w.tunnels { + actions = append(actions, ovs.Output(tun.portId)) + } + actions = append(actions, ovs.Resubmit(0, TFloodToBor)) + // Table 30: Flooding to tunnels from patch. + _ = w.ovs.addFlow(&ovs.Flow{ + Table: TFloodToTun, + Priority: 1, + Matches: []ovs.Match{ + ovs.FieldMatch(MatchRegFlag, libol.Uint2S(FFromLs)), + }, + Actions: actions, + }) +} + +func (w *FabricWorker) flood2Border() { + var actions []ovs.Action + for _, port := range w.borders { + actions = append(actions, ovs.Output(port.portId)) + } + // Table 31: flooding to border from tunnels. + _ = w.ovs.addFlow(&ovs.Flow{ + Table: TFloodToBor, + Priority: 1, + Matches: []ovs.Match{ + ovs.FieldMatch(MatchRegFlag, libol.Uint2S(FFromTun)), + }, + Actions: actions, + }) + // Table 32: flooding to border from a border. + actions = append(actions, ovs.Resubmit(0, TFloodLoop)) + _ = w.ovs.addFlow(&ovs.Flow{ + Table: TFloodToBor, + Priority: 1, + Matches: []ovs.Match{ + ovs.FieldMatch(MatchRegFlag, libol.Uint2S(FFromLs)), + }, + Actions: actions, + }) +} + +func (w *FabricWorker) tunnelType() ovs.InterfaceType { + if w.spec.Driver == "stt" { + return ovs.InterfaceTypeSTT + } + return ovs.InterfaceTypeVXLAN +} + +func (w *FabricWorker) AddTunnel(cfg co.FabricTunnel) { + name := w.Addr2Port(cfg.Remote, "vx-") + options := ovs.InterfaceOptions{ + Type: w.tunnelType(), + BfdEnable: true, + RemoteIP: cfg.Remote, + Key: "flow", + DstPort: cfg.DstPort, + } + if w.spec.Fragment { + options.DfDefault = "false" + } else { + options.DfDefault = "true" + } + if err := w.ovs.addPort(name, &options); err != nil { + return + } + port := w.ovs.dumpPort(name) + if port == nil { + return + } + if cfg.Mode == "border" { + _ = w.ovs.addFlow(&ovs.Flow{ + InPort: port.PortID, + Priority: 1, + Actions: []ovs.Action{ + ovs.Resubmit(0, TLsToTun), + }, + }) + w.borders[name] = &OvsPort{ + name: name, + portId: port.PortID, + options: options, + } + // Update flow for flooding to border. + w.flood2Border() + } else { + _ = w.ovs.addFlow(&ovs.Flow{ + InPort: port.PortID, + Priority: 1, + Actions: []ovs.Action{ + ovs.Resubmit(0, TTunToLs), + }, + }) + w.tunnels[name] = &OvsPort{ + name: name, + portId: port.PortID, + options: options, + } + // Update flow for flooding to tunnels. + w.flood2Tunnel() + } +} + +func (w *FabricWorker) Start(v api.Switcher) { + w.out.Info("FabricWorker.Start") + for _, tunnel := range w.spec.Tunnels { + w.AddTunnel(*tunnel) + } + w.WorkerImpl.Start(v) + ListWorker(func(n Networker) { + if w.IsSlave(n) { + n.Start(v) + } + }) +} + +func (w *FabricWorker) downTables() { + _ = w.ovs.delFlow(nil) +} + +func (w *FabricWorker) DelNetwork(bridge string, vni uint32) { + brPort, tunPort := w.vni2peer(vni) + if err := w.ovs.delPort(tunPort); err != nil { + libol.Warn("FabricWorker.downNetwork %s", err) + } + link := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: tunPort}, + PeerName: brPort, + } + _ = netlink.LinkDel(link) + if br, ok := w.bridge[bridge]; ok { + _ = br.Close() + } +} + +func (w *FabricWorker) DelTunnel(name string) { + _ = w.ovs.delPort(name) +} + +func (w *FabricWorker) IsSlave(n Networker) bool { + cfg := n.Config() + if cfg == nil || cfg.Specifies == nil { + return false + } + spec, ok := cfg.Specifies.(*co.VxLANSpecifies) + if !ok || spec.Fabric != w.cfg.Name { + return false + } + return true +} + +func (w *FabricWorker) Stop() { + w.out.Info("FabricWorker.Stop") + ListWorker(func(n Networker) { + if w.IsSlave(n) { + n.Stop() + } + }) + w.WorkerImpl.Stop() + w.downTables() + for _, tunnel := range w.tunnels { + w.DelTunnel(tunnel.name) + } +} + +func (w *FabricWorker) Reload(v api.Switcher) { + w.Stop() + w.Initialize() + w.Start(v) +} + +func (w *FabricWorker) TcpMss() int { + return w.cfg.Bridge.Mss +} diff --git a/pkg/switch/http.go b/pkg/switch/http.go new file mode 100755 index 0000000..b2521ec --- /dev/null +++ b/pkg/switch/http.go @@ -0,0 +1,334 @@ +package _switch + +import ( + "context" + "crypto/md5" + "encoding/hex" + "fmt" + "github.com/gorilla/mux" + "github.com/luscis/openlan/pkg/api" + "github.com/luscis/openlan/pkg/cache" + co "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + "io/ioutil" + "net/http" + "net/http/pprof" + "os" + "path" + "sort" + "strings" + "text/template" + "time" +) + +type Http struct { + switcher api.Switcher + listen string + adminToken string + guestToken string + adminFile string + server *http.Server + crtFile string + keyFile string + pubDir string + router *mux.Router +} + +func NewHttp(switcher api.Switcher) (h *Http) { + c := co.Manager.Switch + h = &Http{ + switcher: switcher, + listen: c.Http.Listen, + adminFile: c.TokenFile, + pubDir: c.Http.Public, + } + if c.Cert != nil { + h.crtFile = c.Cert.CrtFile + h.keyFile = c.Cert.KeyFile + } + return +} + +func (h *Http) Initialize() { + r := h.Router() + if h.server == nil { + h.server = &http.Server{ + Addr: h.listen, + Handler: r, + ReadTimeout: 5 * time.Minute, + WriteTimeout: 10 * time.Minute, + } + } + h.LoadToken() + h.SaveToken() + h.LoadRouter() +} + +func (h *Http) PProf(r *mux.Router) { + if r != nil { + r.HandleFunc("/debug/pprof/", pprof.Index) + r.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) + r.HandleFunc("/debug/pprof/profile", pprof.Profile) + r.HandleFunc("/debug/pprof/symbol", pprof.Symbol) + r.HandleFunc("/debug/pprof/trace", pprof.Trace) + } +} + +func (h *Http) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + libol.Info("Http.Middleware %s %s", r.Method, r.URL.Path) + if h.IsAuth(w, r) { + next.ServeHTTP(w, r) + } else { + w.Header().Set("WWW-Authenticate", "Basic") + http.Error(w, "Authorization Required", http.StatusUnauthorized) + } + }) +} + +func (h *Http) Router() *mux.Router { + if h.router == nil { + h.router = mux.NewRouter() + h.router.Use(h.Middleware) + } + + return h.router +} + +func (h *Http) SaveToken() { + f, err := os.OpenFile(h.adminFile, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0600) + if err != nil { + libol.Error("Http.SaveToken: %s", err) + return + } + defer f.Close() + if _, err := f.Write([]byte(h.adminToken)); err != nil { + libol.Error("Http.SaveToken: %s", err) + return + } +} + +func (h *Http) LoadRouter() { + router := h.Router() + + router.HandleFunc("/", h.IndexHtml) + router.HandleFunc("/index.html", h.IndexHtml) + router.HandleFunc("/favicon.ico", h.PubFile) + + h.PProf(router) + router.HandleFunc("/api/index", h.GetIndex).Methods("GET") + api.Add(router, h.switcher) +} + +func (h *Http) LoadToken() { + token := "" + if _, err := os.Stat(h.adminFile); os.IsNotExist(err) { + libol.Info("Http.LoadToken: file:%s does not exist", h.adminFile) + } else { + contents, err := ioutil.ReadFile(h.adminFile) + if err != nil { + libol.Error("Http.LoadToken: file:%s %s", h.adminFile, err) + } else { + token = strings.TrimSpace(string(contents)) + } + } + if token == "" { + token = libol.GenRandom(32) + } + h.SetToken(token) +} + +func (h *Http) SetToken(value string) { + sum := md5.Sum([]byte(value)) + h.adminToken = value + h.guestToken = hex.EncodeToString(sum[:16])[:12] +} + +func (h *Http) Start() { + h.Initialize() + + libol.Info("Http.Start %s", h.listen) + promise := &libol.Promise{ + First: time.Second * 2, + MaxInt: time.Minute, + MinInt: time.Second * 10, + } + promise.Done(func() error { + if h.keyFile == "" || h.crtFile == "" { + if err := h.server.ListenAndServe(); err != nil { + libol.Error("Http.Start on %s: %s", h.listen, err) + return err + } + } else { + if err := h.server.ListenAndServeTLS(h.crtFile, h.keyFile); err != nil { + libol.Error("Http.Start on %s: %s", h.listen, err) + return err + } + } + return nil + }) +} + +func (h *Http) Shutdown() { + libol.Info("Http.Shutdown %s", h.listen) + if err := h.server.Shutdown(context.Background()); err != nil { + // Error from closing listeners, or context timeout: + libol.Error("Http.Shutdown: %v", err) + } +} + +func (h *Http) IsAuth(w http.ResponseWriter, r *http.Request) bool { + token, pass, ok := r.BasicAuth() + libol.Debug("Http.IsAuth token: %s, pass: %s", token, pass) + if strings.HasPrefix(r.URL.Path, "/api/") { + if !ok || token != h.adminToken { + return false + } + } else if strings.HasPrefix(r.URL.Path, "/get/") { + if !ok || token != h.guestToken { + return false + } + } + return true +} + +func (h *Http) getFile(name string) string { + return fmt.Sprintf("%s%s", h.pubDir, name) +} + +func (h *Http) PubFile(w http.ResponseWriter, r *http.Request) { + realpath := h.getFile(r.URL.Path) + contents, err := ioutil.ReadFile(realpath) + if err != nil { + _, _ = fmt.Fprintf(w, "404") + return + } + _, _ = fmt.Fprintf(w, "%s\n", contents) +} + +func (h *Http) getIndex(body *schema.Index) *schema.Index { + body.Version = schema.NewVersionSchema() + body.Worker = api.NewWorkerSchema(h.switcher) + + // display accessed point. + for p := range cache.Point.List() { + if p == nil { + break + } + body.Points = append(body.Points, models.NewPointSchema(p)) + } + sort.SliceStable(body.Points, func(i, j int) bool { + ii := body.Points[i] + jj := body.Points[j] + return ii.Network+ii.Remote > jj.Network+jj.Remote + }) + // display neighbor. + for n := range cache.Neighbor.List() { + if n == nil { + break + } + body.Neighbors = append(body.Neighbors, models.NewNeighborSchema(n)) + } + sort.SliceStable(body.Neighbors, func(i, j int) bool { + return body.Neighbors[i].IpAddr > body.Neighbors[j].IpAddr + }) + // display links. + for l := range cache.Link.List() { + if l == nil { + break + } + body.Links = append(body.Links, models.NewLinkSchema(l)) + } + sort.SliceStable(body.Links, func(i, j int) bool { + ii := body.Links[i] + jj := body.Links[j] + return ii.Network+ii.Server > jj.Network+jj.Server + }) + // display online flow. + for l := range cache.Online.List() { + if l == nil { + break + } + body.OnLines = append(body.OnLines, models.NewOnLineSchema(l)) + } + sort.SliceStable(body.OnLines, func(i, j int) bool { + return body.OnLines[i].HitTime < body.OnLines[j].HitTime + }) + // display OpenVPN Clients. + for n := range cache.Network.List() { + if n == nil { + break + } + for c := range cache.VPNClient.List(n.Name) { + if c == nil { + break + } + body.Clients = append(body.Clients, *c) + } + sort.SliceStable(body.Clients, func(i, j int) bool { + return body.Clients[i].Name < body.Clients[j].Name + }) + } + // display esp state + for s := range cache.EspState.List("") { + if s == nil { + break + } + body.States = append(body.States, models.NewEspStateSchema(s)) + } + sort.SliceStable(body.States, func(i, j int) bool { + ii := body.States[i] + jj := body.States[j] + return ii.Spi > jj.Spi + }) + return body +} + +func (h *Http) ParseFiles(w http.ResponseWriter, name string, data interface{}) error { + file := path.Base(name) + tmpl, err := template.New(file).Funcs(template.FuncMap{ + "prettyTime": libol.PrettyTime, + "prettyBytes": libol.PrettyBytes, + "getIpAddr": libol.GetIPAddr, + }).ParseFiles(name) + if err != nil { + _, _ = fmt.Fprintf(w, "template.ParseFiles %s", err) + return err + } + if err := tmpl.Execute(w, data); err != nil { + _, _ = fmt.Fprintf(w, "template.ParseFiles %s", err) + return err + } + return nil +} + +func (h *Http) IndexHtml(w http.ResponseWriter, r *http.Request) { + body := schema.Index{ + Points: make([]schema.Point, 0, 128), + Links: make([]schema.Link, 0, 128), + Neighbors: make([]schema.Neighbor, 0, 128), + OnLines: make([]schema.OnLine, 0, 128), + Clients: make([]schema.VPNClient, 0, 128), + States: make([]schema.EspState, 0, 128), + } + h.getIndex(&body) + file := h.getFile("/index.html") + if err := h.ParseFiles(w, file, &body); err != nil { + libol.Error("Http.Index %s", err) + } +} + +func (h *Http) GetIndex(w http.ResponseWriter, r *http.Request) { + body := schema.Index{ + Points: make([]schema.Point, 0, 128), + Links: make([]schema.Link, 0, 128), + Neighbors: make([]schema.Neighbor, 0, 128), + OnLines: make([]schema.OnLine, 0, 128), + Network: make([]schema.Network, 0, 128), + Clients: make([]schema.VPNClient, 0, 128), + } + h.getIndex(&body) + api.ResponseJson(w, body) +} diff --git a/pkg/switch/link.go b/pkg/switch/link.go new file mode 100755 index 0000000..472554f --- /dev/null +++ b/pkg/switch/link.go @@ -0,0 +1,157 @@ +package _switch + +import ( + co "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/schema" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +const ( + OlapBin = "openlan-point" + OlapDir = "/var/openlan/point" +) + +type Link struct { + cfg *co.Point + status *schema.Point + out *libol.SubLogger + uuid string +} + +func NewLink(uuid string, cfg *co.Point) *Link { + return &Link{ + uuid: uuid, + cfg: cfg, + out: libol.NewSubLogger(cfg.Network), + } +} + +func (l *Link) Model() *models.Link { + cfg := l.Conf() + return &models.Link{ + User: cfg.Username, + Network: cfg.Network, + Protocol: cfg.Protocol, + StatusFile: l.StatusFile(), + } +} + +func (l *Link) Initialize() { + file := l.ConfFile() + l.cfg.StatusFile = l.StatusFile() + l.cfg.PidFile = l.PidFile() + _ = libol.MarshalSave(l.cfg, file, true) +} + +func (l *Link) Conf() *co.Point { + return l.cfg +} + +func (l *Link) UUID() string { + return l.uuid +} + +func (l *Link) Path() string { + return OlapBin +} + +func (l *Link) ConfFile() string { + return filepath.Join(OlapDir, l.uuid+".json") +} + +func (l *Link) StatusFile() string { + return filepath.Join(OlapDir, l.uuid+".status") +} + +func (l *Link) PidFile() string { + return filepath.Join(OlapDir, l.uuid+".pid") +} + +func (l *Link) LogFile() string { + return filepath.Join(OlapDir, l.uuid+".log") +} + +func (l *Link) Start() { + file := l.ConfFile() + log, err := libol.CreateFile(l.LogFile()) + if err != nil { + l.out.Warn("Link.Start %s", err) + return + } + libol.Go(func() { + args := []string{ + "-alias", l.cfg.Network, + "-conn", l.cfg.Connection, + "-conf", file, + "-terminal", "ww", + } + l.out.Debug("Link.Start %s %v", l.Path(), args) + cmd := exec.Command(l.Path(), args...) + cmd.Stdout = log + cmd.Stderr = log + if err := cmd.Run(); err != nil { + l.out.Error("Link.Start %s: %s", l.uuid, err) + } + }) +} + +func (l *Link) Clean() { + files := []string{ + l.LogFile(), l.StatusFile(), l.PidFile(), l.ConfFile(), + } + for _, file := range files { + if err := libol.FileExist(file); err == nil { + if err := os.Remove(file); err != nil { + l.out.Warn("Link.Clean %s", err) + } + } + } +} + +func (l *Link) Stop() { + if data, err := ioutil.ReadFile(l.PidFile()); err != nil { + l.out.Debug("Link.Stop %s", err) + } else { + pid := strings.TrimSpace(string(data)) + cmd := exec.Command("/usr/bin/kill", pid) + if err := cmd.Run(); err != nil { + l.out.Warn("Link.Stop %s: %s", pid, err) + } + } + l.Clean() +} + +type Links struct { + lock sync.RWMutex + links map[string]*Link +} + +func NewLinks() *Links { + return &Links{ + links: make(map[string]*Link), + } +} + +func (ls *Links) Add(l *Link) { + ls.lock.Lock() + defer ls.lock.Unlock() + ls.links[l.cfg.Connection] = l +} + +func (ls *Links) Remove(addr string) *Link { + ls.lock.Lock() + defer ls.lock.Unlock() + if p, ok := ls.links[addr]; ok { + p.Stop() + delete(ls.links, addr) + return p + } + return nil +} diff --git a/pkg/switch/network.go b/pkg/switch/network.go new file mode 100755 index 0000000..8b8f84f --- /dev/null +++ b/pkg/switch/network.go @@ -0,0 +1,299 @@ +package _switch + +import ( + "fmt" + "github.com/luscis/openlan/pkg/api" + co "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + cn "github.com/luscis/openlan/pkg/network" + "github.com/vishvananda/netlink" + "strconv" + "strings" +) + +type Networker interface { + String() string + ID() string + Initialize() + Start(v api.Switcher) + Stop() + Bridge() cn.Bridger + Config() *co.Network + Subnet() string + Reload(v api.Switcher) + Provider() string +} + +var workers = make(map[string]Networker) + +func NewNetworker(c *co.Network) Networker { + var obj Networker + switch c.Provider { + case "esp": + obj = NewESPWorker(c) + case "vxlan": + obj = NewVxLANWorker(c) + case "fabric": + obj = NewFabricWorker(c) + default: + obj = NewOpenLANWorker(c) + } + workers[c.Name] = obj + return obj +} + +func GetWorker(name string) Networker { + return workers[name] +} + +func ListWorker(call func(w Networker)) { + for _, worker := range workers { + call(worker) + } +} + +type LinuxPort struct { + name string // gre:xx, vxlan:xx + vlan int + link string +} + +type WorkerImpl struct { + uuid string + cfg *co.Network + out *libol.SubLogger + dhcp *Dhcp + outputs []*LinuxPort +} + +func NewWorkerApi(c *co.Network) *WorkerImpl { + return &WorkerImpl{ + cfg: c, + out: libol.NewSubLogger(c.Name), + } +} + +func (w *WorkerImpl) Provider() string { + return w.cfg.Provider +} + +func (w *WorkerImpl) Initialize() { + if w.cfg.Dhcp == "enable" { + w.dhcp = NewDhcp(&co.Dhcp{ + Name: w.cfg.Name, + Subnet: w.cfg.Subnet, + Bridge: w.cfg.Bridge, + }) + } +} + +func (w *WorkerImpl) AddPhysical(bridge string, vlan int, output string) { + link, err := netlink.LinkByName(output) + if err != nil { + w.out.Error("WorkerImpl.LinkByName %s %s", output, err) + return + } + slaver := output + if vlan > 0 { + if err := netlink.LinkSetUp(link); err != nil { + w.out.Warn("WorkerImpl.LinkSetUp %s %s", output, err) + } + subLink := &netlink.Vlan{ + LinkAttrs: netlink.LinkAttrs{ + Name: fmt.Sprintf("%s.%d", output, vlan), + ParentIndex: link.Attrs().Index, + }, + VlanId: vlan, + } + if err := netlink.LinkAdd(subLink); err != nil { + w.out.Error("WorkerImpl.LinkAdd %s %s", subLink.Name, err) + return + } + slaver = subLink.Name + } + br := cn.NewBrCtl(bridge, 0) + if err := br.AddPort(slaver); err != nil { + w.out.Warn("WorkerImpl.AddPhysical %s", err) + } +} + +func (w *WorkerImpl) AddOutput(bridge string, port *LinuxPort) { + name := port.name + values := strings.SplitN(name, ":", 6) + if values[0] == "gre" { + if port.link == "" { + port.link = co.GenName("ge-") + } + link := &netlink.Gretap{ + LinkAttrs: netlink.LinkAttrs{ + Name: port.link, + }, + Local: libol.ParseAddr("0.0.0.0"), + Remote: libol.ParseAddr(values[1]), + PMtuDisc: 1, + } + if err := netlink.LinkAdd(link); err != nil { + w.out.Error("WorkerImpl.LinkAdd %s %s", name, err) + return + } + } else if values[0] == "vxlan" { + if len(values) < 3 { + w.out.Error("WorkerImpl.LinkAdd %s wrong", name) + return + } + if port.link == "" { + port.link = co.GenName("vn-") + } + dport := 8472 + if len(values) == 4 { + dport, _ = strconv.Atoi(values[3]) + } + vni, _ := strconv.Atoi(values[2]) + link := &netlink.Vxlan{ + VxlanId: vni, + LinkAttrs: netlink.LinkAttrs{ + TxQLen: -1, + Name: port.link, + }, + Group: libol.ParseAddr(values[1]), + Port: dport, + } + if err := netlink.LinkAdd(link); err != nil { + w.out.Error("WorkerImpl.LinkAdd %s %s", name, err) + return + } + } else { + port.link = name + } + w.out.Info("WorkerImpl.AddOutput %s %s", port.link, port.name) + w.AddPhysical(bridge, port.vlan, port.link) +} + +func (w *WorkerImpl) Start(v api.Switcher) { + cfg := w.cfg + fire := v.Firewall() + + if cfg.Acl != "" { + fire.AddRule(cn.IpRule{ + Table: cn.TRaw, + Chain: cn.OLCPre, + Input: cfg.Bridge.Name, + Jump: cfg.Acl, + }) + } + fire.AddRule(cn.IpRule{ + Table: cn.TFilter, + Chain: cn.OLCForward, + Input: cfg.Bridge.Name, + Output: cfg.Bridge.Name, + }) + if cfg.Bridge.Mss > 0 { + fire.AddRule(cn.IpRule{ + Table: cn.TMangle, + Chain: cn.OLCPost, + Output: cfg.Bridge.Name, + Proto: "tcp", + Match: "tcp", + TcpFlag: []string{"SYN,RST", "SYN"}, + Jump: "TCPMSS", + SetMss: cfg.Bridge.Mss, + }) + } + for _, output := range cfg.Outputs { + port := &LinuxPort{ + name: output.Interface, + vlan: output.Vlan, + } + w.AddOutput(cfg.Bridge.Name, port) + w.outputs = append(w.outputs, port) + } + if w.dhcp != nil { + w.dhcp.Start() + fire.AddRule(cn.IpRule{ + Table: cn.TNat, + Chain: cn.OLCPost, + Source: cfg.Bridge.Address, + NoDest: cfg.Bridge.Address, + Jump: cn.CMasq, + }) + } +} + +func (w *WorkerImpl) DelPhysical(bridge string, vlan int, output string) { + if vlan > 0 { + subLink := &netlink.Vlan{ + LinkAttrs: netlink.LinkAttrs{ + Name: fmt.Sprintf("%s.%d", output, vlan), + }, + } + if err := netlink.LinkDel(subLink); err != nil { + w.out.Error("WorkerImpl.DelPhysical.LinkDel %s %s", subLink.Name, err) + return + } + } else { + br := cn.NewBrCtl(bridge, 0) + if err := br.DelPort(output); err != nil { + w.out.Warn("WorkerImpl.DelPhysical %s", err) + } + } +} + +func (w *WorkerImpl) DelOutput(bridge string, port *LinuxPort) { + w.out.Info("WorkerImpl.DelOutput %s %s", port.link, port.name) + w.DelPhysical(bridge, port.vlan, port.link) + values := strings.SplitN(port.name, ":", 6) + if values[0] == "gre" { + link := &netlink.Gretap{ + LinkAttrs: netlink.LinkAttrs{ + Name: port.link, + }, + } + if err := netlink.LinkDel(link); err != nil { + w.out.Error("WorkerImpl.DelOutput.LinkDel %s %s", link.Name, err) + return + } + } else if values[0] == "vxlan" { + link := &netlink.Vxlan{ + LinkAttrs: netlink.LinkAttrs{ + Name: port.link, + }, + } + if err := netlink.LinkDel(link); err != nil { + w.out.Error("WorkerImpl.DelOutput.LinkDel %s %s", link.Name, err) + return + } + } +} + +func (w *WorkerImpl) Stop() { + if w.dhcp != nil { + w.dhcp.Stop() + } + for _, output := range w.outputs { + w.DelOutput(w.cfg.Bridge.Name, output) + } + w.outputs = nil +} + +func (w *WorkerImpl) String() string { + return w.cfg.Name +} + +func (w *WorkerImpl) ID() string { + return w.uuid +} + +func (w *WorkerImpl) Bridge() cn.Bridger { + return nil +} + +func (w *WorkerImpl) Config() *co.Network { + return w.cfg +} + +func (w *WorkerImpl) Subnet() string { + return "" +} + +func (w *WorkerImpl) Reload(v api.Switcher) { +} diff --git a/pkg/switch/openlan.go b/pkg/switch/openlan.go new file mode 100755 index 0000000..5a77830 --- /dev/null +++ b/pkg/switch/openlan.go @@ -0,0 +1,345 @@ +package _switch + +import ( + "fmt" + "github.com/luscis/openlan/pkg/api" + "github.com/luscis/openlan/pkg/cache" + co "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/network" + "github.com/vishvananda/netlink" + "net" + "strings" + "time" +) + +func PeerName(name, prefix string) (string, string) { + return name + prefix + "i", name + prefix + "o" +} + +type OpenLANWorker struct { + *WorkerImpl + alias string + newTime int64 + startTime int64 + links *Links + bridge network.Bridger + openVPN []*OpenVPN +} + +func NewOpenLANWorker(c *co.Network) *OpenLANWorker { + return &OpenLANWorker{ + WorkerImpl: NewWorkerApi(c), + alias: c.Alias, + newTime: time.Now().Unix(), + startTime: 0, + links: NewLinks(), + } +} + +func (w *OpenLANWorker) Initialize() { + brCfg := w.cfg.Bridge + n := models.Network{ + Name: w.cfg.Name, + IpStart: w.cfg.Subnet.Start, + IpEnd: w.cfg.Subnet.End, + Netmask: w.cfg.Subnet.Netmask, + IfAddr: w.cfg.Bridge.Address, + Routes: make([]*models.Route, 0, 2), + } + for _, rt := range w.cfg.Routes { + if rt.NextHop == "" { + w.out.Warn("OpenLANWorker.Initialize: %s noNextHop", rt.Prefix) + continue + } + rte := models.NewRoute(rt.Prefix, rt.NextHop, rt.Mode) + if rt.Metric > 0 { + rte.Metric = rt.Metric + } + n.Routes = append(n.Routes, rte) + } + cache.Network.Add(&n) + for _, ht := range w.cfg.Hosts { + lease := cache.Network.AddLease(ht.Hostname, ht.Address) + if lease != nil { + lease.Type = "static" + lease.Network = w.cfg.Name + } + } + w.bridge = network.NewBridger(brCfg.Provider, brCfg.Name, brCfg.IPMtu) + vCfg := w.cfg.OpenVPN + if vCfg != nil { + obj := NewOpenVPN(vCfg) + obj.Initialize() + w.openVPN = append(w.openVPN, obj) + for _, _vCfg := range vCfg.Breed { + if _vCfg == nil { + continue + } + obj := NewOpenVPN(_vCfg) + obj.Initialize() + w.openVPN = append(w.openVPN, obj) + } + } + w.WorkerImpl.Initialize() +} + +func (w *OpenLANWorker) LoadLinks() { + if w.cfg.Links != nil { + for _, lin := range w.cfg.Links { + lin.Default() + w.AddLink(&lin) + } + } +} + +func (w *OpenLANWorker) UnLoadLinks() { + w.links.lock.RLock() + defer w.links.lock.RUnlock() + for _, l := range w.links.links { + l.Stop() + } +} + +func (w *OpenLANWorker) LoadRoutes() { + // install routes + cfg := w.cfg + w.out.Debug("OpenLANWorker.LoadRoute: %v", cfg.Routes) + ifAddr := strings.SplitN(cfg.Bridge.Address, "/", 2)[0] + link, err := netlink.LinkByName(w.bridge.Name()) + if err != nil { + return + } + for _, rt := range cfg.Routes { + _, dst, err := net.ParseCIDR(rt.Prefix) + if err != nil { + continue + } + if ifAddr == rt.NextHop && rt.MultiPath == nil { + // route's next-hop is local not install again. + continue + } + nlrt := netlink.Route{Dst: dst} + for _, hop := range rt.MultiPath { + nxhe := &netlink.NexthopInfo{ + Hops: hop.Weight, + Gw: net.ParseIP(hop.NextHop), + } + nlrt.MultiPath = append(nlrt.MultiPath, nxhe) + } + if rt.MultiPath == nil { + nlrt.LinkIndex = link.Attrs().Index + nlrt.Gw = net.ParseIP(rt.NextHop) + nlrt.Priority = rt.Metric + } + w.out.Debug("OpenLANWorker.LoadRoute: %s", nlrt) + promise := &libol.Promise{ + First: time.Second * 2, + MaxInt: time.Minute, + MinInt: time.Second * 10, + } + promise.Go(func() error { + if err := netlink.RouteAdd(&nlrt); err != nil { + w.out.Warn("OpenLANWorker.LoadRoute: %s", err) + return err + } + w.out.Info("OpenLANWorker.LoadRoute: %v", rt) + return nil + }) + } +} + +func (w *OpenLANWorker) UnLoadRoutes() { + cfg := w.cfg + link, err := netlink.LinkByName(w.bridge.Name()) + if err != nil { + return + } + for _, rt := range cfg.Routes { + _, dst, err := net.ParseCIDR(rt.Prefix) + if err != nil { + continue + } + nlRt := netlink.Route{Dst: dst} + if rt.MultiPath == nil { + nlRt.LinkIndex = link.Attrs().Index + nlRt.Gw = net.ParseIP(rt.NextHop) + nlRt.Priority = rt.Metric + } + w.out.Debug("OpenLANWorker.UnLoadRoute: %s", nlRt) + if err := netlink.RouteDel(&nlRt); err != nil { + w.out.Warn("OpenLANWorker.UnLoadRoute: %s", err) + continue + } + w.out.Info("OpenLANWorker.UnLoadRoute: %v", rt) + } +} + +func (w *OpenLANWorker) UpBridge(cfg *co.Bridge) { + master := w.bridge + // new it and configure address + master.Open(cfg.Address) + // configure stp + if cfg.Stp == "on" { + if err := master.Stp(true); err != nil { + w.out.Warn("OpenLANWorker.UpBridge: Stp %s", err) + } + } else { + _ = master.Stp(false) + } + // configure forward delay + if err := master.Delay(cfg.Delay); err != nil { + w.out.Warn("OpenLANWorker.UpBridge: Delay %s", err) + } + w.connectPeer(cfg) + call := 1 + if w.cfg.Acl == "" { + call = 0 + } + if err := master.CallIptables(call); err != nil { + w.out.Warn("OpenLANWorker.Start: CallIptables %s", err) + } +} + +func (w *OpenLANWorker) connectPeer(cfg *co.Bridge) { + if cfg.Peer == "" { + return + } + in, ex := PeerName(cfg.Network, "-e") + link := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: in}, + PeerName: ex, + } + br := network.NewBrCtl(cfg.Peer, cfg.IPMtu) + promise := &libol.Promise{ + First: time.Second * 2, + MaxInt: time.Minute, + MinInt: time.Second * 10, + } + promise.Go(func() error { + if !br.Has() { + w.out.Warn("%s notFound", br.Name) + return libol.NewErr("%s notFound", br.Name) + } + err := netlink.LinkAdd(link) + if err != nil { + w.out.Error("OpenLANWorker.connectPeer: %s", err) + return nil + } + br0 := network.NewBrCtl(cfg.Name, cfg.IPMtu) + if err := br0.AddPort(in); err != nil { + w.out.Error("OpenLANWorker.connectPeer: %s", err) + } + br1 := network.NewBrCtl(cfg.Peer, cfg.IPMtu) + if err := br1.AddPort(ex); err != nil { + w.out.Error("OpenLANWorker.connectPeer: %s", err) + } + return nil + }) +} + +func (w *OpenLANWorker) Start(v api.Switcher) { + w.out.Info("OpenLANWorker.Start") + w.UpBridge(w.cfg.Bridge) + w.uuid = v.UUID() + w.LoadLinks() + w.LoadRoutes() + for _, vpn := range w.openVPN { + vpn.Start() + } + w.startTime = time.Now().Unix() + w.WorkerImpl.Start(v) +} + +func (w *OpenLANWorker) downBridge(cfg *co.Bridge) { + w.closePeer(cfg) + _ = w.bridge.Close() +} + +func (w *OpenLANWorker) closePeer(cfg *co.Bridge) { + if cfg.Peer == "" { + return + } + in, ex := PeerName(cfg.Network, "-e") + link := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: in}, + PeerName: ex, + } + err := netlink.LinkDel(link) + if err != nil { + w.out.Error("OpenLANWorker.closePeer: %s", err) + return + } +} + +func (w *OpenLANWorker) Stop() { + w.out.Info("OpenLANWorker.Close") + w.WorkerImpl.Stop() + for _, vpn := range w.openVPN { + vpn.Stop() + } + w.UnLoadRoutes() + w.UnLoadLinks() + w.startTime = 0 + w.downBridge(w.cfg.Bridge) +} + +func (w *OpenLANWorker) UpTime() int64 { + if w.startTime != 0 { + return time.Now().Unix() - w.startTime + } + return 0 +} + +func (w *OpenLANWorker) AddLink(c *co.Point) { + br := w.cfg.Bridge + uuid := libol.GenRandom(13) + + c.Alias = w.alias + c.Network = w.cfg.Name + c.Interface.Name = network.Taps.GenName() + c.Interface.Bridge = br.Name + c.Interface.Address = br.Address + c.Interface.Provider = br.Provider + c.Interface.IPMtu = br.IPMtu + c.Log.File = "/dev/null" + + l := NewLink(uuid, c) + l.Initialize() + cache.Link.Add(uuid, l.Model()) + w.links.Add(l) + l.Start() +} + +func (w *OpenLANWorker) DelLink(addr string) { + if l := w.links.Remove(addr); l != nil { + cache.Link.Del(l.uuid) + } +} + +func (w *OpenLANWorker) Subnet() string { + cfg := w.cfg + + ipAddr := cfg.Bridge.Address + ipMask := cfg.Subnet.Netmask + if ipAddr == "" { + ipAddr = cfg.Subnet.Start + } + if ipAddr != "" { + addr := ipAddr + if ipMask != "" { + prefix := libol.Netmask2Len(ipMask) + ifAddr := strings.SplitN(ipAddr, "/", 2)[0] + addr = fmt.Sprintf("%s/%d", ifAddr, prefix) + } + if _, inet, err := net.ParseCIDR(addr); err == nil { + return inet.String() + } + } + return "" +} + +func (w *OpenLANWorker) Bridge() network.Bridger { + return w.bridge +} diff --git a/pkg/switch/openvpn.go b/pkg/switch/openvpn.go new file mode 100755 index 0000000..5c4b6e5 --- /dev/null +++ b/pkg/switch/openvpn.go @@ -0,0 +1,504 @@ +package _switch + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "text/template" + + co "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" +) + +const ( + OpenVPNBin = "openvpn" + DefaultCurDir = "/var/openlan/openvpn/default" +) + +type OpenVPNData struct { + Local string + Port string + CertNot bool + Ca string + Cert string + Key string + DhPem string + TlsAuth string + Cipher string + Server string + Device string + Protocol string + Script string + Routes []string + Renego int + Stats string + IpIp string + Push []string + ClientConfigDir string +} + +const ( + xAuthConfTmpl = `# Generate by OpenLAN +local {{ .Local }} +port {{ .Port }} +proto {{ .Protocol }} +dev {{ .Device }} +reneg-sec {{ .Renego }} +keepalive 10 120 +persist-key +persist-tun +ca {{ .Ca }} +cert {{ .Cert }} +key {{ .Key }} +dh {{ .DhPem }} +server {{ .Server }} +{{- range .Routes }} +push "route {{ . }}" +{{- end }} +{{- range .Push }} +push "{{ . }}" +{{- end }} +ifconfig-pool-persist {{ .Protocol }}{{ .Port }}ipp +tls-auth {{ .TlsAuth }} 0 +cipher {{ .Cipher }} +status {{ .Protocol }}{{ .Port }}server.status 5 +{{- if .CertNot }} +client-cert-not-required +{{- else }} +verify-client-cert none +{{- end }} +script-security 3 +auth-user-pass-verify "{{ .Script }}" via-env +username-as-common-name +client-config-dir {{ .ClientConfigDir }} +verb 3 +` + certConfTmpl = `# Generate by OpenLAN +local {{ .Local }} +port {{ .Port }} +proto {{ .Protocol }} +dev {{ .Device }} +reneg-sec {{ .Renego }} +keepalive 10 120 +persist-key +persist-tun +ca {{ .Ca }} +cert {{ .Cert }} +key {{ .Key }} +dh {{ .DhPem }} +server {{ .Server }} +{{- range .Routes }} +push "route {{ . }}" +{{- end }} +ifconfig-pool-persist {{ .Protocol }}{{ .Port }}ipp +tls-auth {{ .TlsAuth }} 0 +cipher {{ .Cipher }} +status {{ .Protocol }}{{ .Port }}server.status 5 +client-config-dir {{ .ClientConfigDir }} +verb 3 +` +) + +func NewOpenVpnDataFromConf(obj *OpenVPN) *OpenVPNData { + cfg := obj.Cfg + data := &OpenVPNData{ + Local: obj.Local, + Port: obj.Port, + CertNot: true, + Ca: cfg.RootCa, + Cert: cfg.ServerCrt, + Key: cfg.ServerKey, + DhPem: cfg.DhPem, + TlsAuth: cfg.TlsAuth, + Cipher: cfg.Cipher, + Device: cfg.Device, + Protocol: cfg.Protocol, + Script: cfg.Script, + Renego: cfg.Renego, + Push: cfg.Push, + } + if cfg.Version > 23 { + data.CertNot = false + } + addr, _ := libol.IPNetwork(cfg.Subnet) + data.Server = strings.ReplaceAll(addr, "/", " ") + for _, rt := range cfg.Routes { + if addr, err := libol.IPNetwork(rt); err == nil { + r := strings.ReplaceAll(addr, "/", " ") + data.Routes = append(data.Routes, r) + } + } + data.ClientConfigDir = obj.DirectoryClientConfig() + return data +} + +type OpenVPN struct { + Cfg *co.OpenVPN + out *libol.SubLogger + Protocol string + Local string + Port string +} + +func NewOpenVPN(cfg *co.OpenVPN) *OpenVPN { + obj := &OpenVPN{ + Cfg: cfg, + out: libol.NewSubLogger(cfg.Network), + Protocol: cfg.Protocol, + Local: "0.0.0.0", + Port: "4494", + } + obj.Local = strings.SplitN(cfg.Listen, ":", 2)[0] + if strings.Contains(cfg.Listen, ":") { + obj.Port = strings.SplitN(cfg.Listen, ":", 2)[1] + } + return obj +} + +func (o *OpenVPN) ID() string { + return o.Protocol + o.Port +} + +func (o *OpenVPN) Path() string { + return OpenVPNBin +} + +func (o *OpenVPN) Directory() string { + if o.Cfg == nil { + return DefaultCurDir + } + return o.Cfg.Directory +} + +func (o *OpenVPN) FileCfg(full bool) string { + if o.Cfg == nil { + return "" + } + name := o.ID() + "server.conf" + if !full { + return name + } + return filepath.Join(o.Cfg.Directory, name) +} + +func (o *OpenVPN) FileClient(full bool) string { + if o.Cfg == nil { + return "" + } + name := o.ID() + "client.ovpn" + if !full { + return name + } + return filepath.Join(o.Cfg.Directory, name) +} + +func (o *OpenVPN) FileLog(full bool) string { + if o.Cfg == nil { + return "" + } + name := o.ID() + "server.log" + if !full { + return name + } + return filepath.Join(o.Cfg.Directory, name) +} + +func (o *OpenVPN) FilePid(full bool) string { + if o.Cfg == nil { + return "" + } + name := o.ID() + "server.pid" + if !full { + return name + } + return filepath.Join(o.Cfg.Directory, name) +} + +func (o *OpenVPN) FileStats(full bool) string { + if o.Cfg == nil { + return "" + } + name := o.ID() + "server.stats" + if !full { + return name + } + return filepath.Join(o.Cfg.Directory, name) +} + +func (o *OpenVPN) ServerTmpl() string { + tmplStr := xAuthConfTmpl + if o.Cfg.Auth == "cert" { + tmplStr = certConfTmpl + } + cfgTmpl := filepath.Join(o.Cfg.Directory, o.ID()+"server.tmpl") + _ = ioutil.WriteFile(cfgTmpl, []byte(tmplStr), 0600) + return tmplStr +} + +func (o *OpenVPN) FileIpp(full bool) string { + if o.Cfg == nil { + return "" + } + name := o.ID() + "ipp" + if !full { + return name + } + return filepath.Join(o.Cfg.Directory, name) +} + +func (o *OpenVPN) DirectoryClientConfig() string { + if o.Cfg == nil { + return path.Join(DefaultCurDir, "ccd") + } + return path.Join(o.Cfg.Directory, "ccd") +} + +func (o *OpenVPN) WriteConf(path string) error { + fp, err := libol.CreateFile(path) + if err != nil || fp == nil { + return err + } + defer fp.Close() + data := NewOpenVpnDataFromConf(o) + o.out.Debug("OpenVPN.WriteConf %v", data) + if data.ClientConfigDir != "" { + _ = o.writeClientConfig() + } + tmplStr := o.ServerTmpl() + if tmpl, err := template.New("main").Parse(tmplStr); err != nil { + return err + } else { + if err := tmpl.Execute(fp, data); err != nil { + return err + } + } + return nil +} + +func (o *OpenVPN) writeClientConfig() error { + // make client dir and config file + ccd := o.DirectoryClientConfig() + if err := os.Mkdir(ccd, 0600); err != nil { + o.out.Info("OpenVPN.writeClientConfig %s", err) + } + for _, fic := range o.Cfg.Clients { + if fic.Name == "" || fic.Address == "" { + continue + } + ficFile := filepath.Join(ccd, fic.Name) + pushIP := fmt.Sprintf("ifconfig-push %s %s", fic.Address, fic.Netmask) + if err := ioutil.WriteFile(ficFile, []byte(pushIP), 0600); err != nil { + o.out.Warn("OpenVPN.writeClientConfig %s", err) + } + } + + return nil +} + +func (o *OpenVPN) Clean() { + ccd := o.DirectoryClientConfig() + for _, fic := range o.Cfg.Clients { + if fic.Name == "" || fic.Address == "" { + continue + } + file := filepath.Join(ccd, fic.Name) + if err := libol.FileExist(file); err == nil { + if err := os.Remove(file); err != nil { + o.out.Warn("OpenVPN.Clean %s", err) + } + } + } + files := []string{o.FileStats(true), o.FileIpp(true)} + for _, file := range files { + if err := libol.FileExist(file); err == nil { + if err := os.Remove(file); err != nil { + o.out.Warn("OpenVPN.Clean %s", err) + } + } + } +} + +func (o *OpenVPN) Initialize() { + if !o.ValidConf() { + return + } + o.Clean() + if err := os.Mkdir(o.Directory(), 0600); err != nil { + o.out.Info("OpenVPN.Initialize %s", err) + } + if err := o.WriteConf(o.FileCfg(true)); err != nil { + o.out.Warn("OpenVPN.Initialize %s", err) + return + } + if ctx, err := o.Profile(); err == nil { + file := o.FileClient(true) + if err := ioutil.WriteFile(file, ctx, 0600); err != nil { + o.out.Warn("OpenVPN.Initialize %s", err) + } + } else { + o.out.Warn("OpenVPN.Initialize %s", err) + } +} + +func (o *OpenVPN) ValidConf() bool { + if o.Cfg == nil { + return false + } + if o.Cfg.Listen == "" || o.Cfg.Subnet == "" { + return false + } + return true +} + +func (o *OpenVPN) Start() { + if !o.ValidConf() { + return + } + log, err := libol.CreateFile(o.FileLog(true)) + if err != nil { + o.out.Warn("OpenVPN.Start %s", err) + return + } + libol.Go(func() { + defer log.Close() + args := []string{ + "--cd", o.Directory(), + "--config", o.FileCfg(false), + "--writepid", o.FilePid(false), + } + cmd := exec.Command(o.Path(), args...) + cmd.Stdout = log + cmd.Stderr = log + if err := cmd.Run(); err != nil { + o.out.Error("OpenVPN.Start %s: %s", o.ID(), err) + } + }) +} + +func (o *OpenVPN) Stop() { + if !o.ValidConf() { + return + } + if data, err := ioutil.ReadFile(o.FilePid(true)); err != nil { + o.out.Debug("OpenVPN.Stop %s", err) + } else { + pid := strings.TrimSpace(string(data)) + cmd := exec.Command("/usr/bin/kill", pid) + if err := cmd.Run(); err != nil { + o.out.Warn("OpenVPN.Stop %s: %s", pid, err) + } + } + o.Clean() +} + +func (o *OpenVPN) ProfileTmpl() string { + tmplStr := xAuthClientProfile + if o.Cfg.Auth == "cert" { + tmplStr = certClientProfile + } + cfgTmpl := filepath.Join(o.Cfg.Directory, o.ID()+"client.tmpl") + _ = ioutil.WriteFile(cfgTmpl, []byte(tmplStr), 0600) + return tmplStr +} + +func (o *OpenVPN) Profile() ([]byte, error) { + data := NewOpenVpnProfileFromConf(o) + tmplStr := o.ProfileTmpl() + tmpl, err := template.New("main").Parse(tmplStr) + if err != nil { + return nil, err + } + var out bytes.Buffer + if err := tmpl.Execute(&out, data); err == nil { + return out.Bytes(), nil + } else { + return nil, err + } +} + +type OpenVPNProfile struct { + Server string + Port string + Ca string + Cert string + Key string + TlsAuth string + Cipher string + Device string + Protocol string + Renego int +} + +const ( + xAuthClientProfile = `# Generate by OpenLAN +client +dev {{ .Device }} +route-metric 300 +proto {{ .Protocol }} +remote {{ .Server }} {{ .Port }} +reneg-sec {{ .Renego }} +resolv-retry infinite +nobind +persist-key +persist-tun + +{{ .Ca -}} + +remote-cert-tls server + +{{ .TlsAuth -}} + +key-direction 1 +cipher {{ .Cipher }} +auth-nocache +verb 4 +auth-user-pass +` + certClientProfile = `# Generate by OpenLAN +client +dev {{ .Device }} +route-metric 300 +proto {{ .Protocol }} +remote {{ .Server }} {{ .Port }} +reneg-sec {{ .Renego }} +resolv-retry infinite +nobind +persist-key +persist-tun + +{{ .Ca -}} + +remote-cert-tls server + +{{ .TlsAuth -}} + +key-direction 1 +cipher {{ .Cipher }} +auth-nocache +verb 4 +` +) + +func NewOpenVpnProfileFromConf(obj *OpenVPN) *OpenVPNProfile { + cfg := obj.Cfg + data := &OpenVPNProfile{ + Server: obj.Local, + Port: obj.Port, + Cipher: cfg.Cipher, + Device: cfg.Device[:3], + Protocol: cfg.Protocol, + Renego: cfg.Renego, + } + if ctx, err := ioutil.ReadFile(cfg.RootCa); err == nil { + data.Ca = string(ctx) + } + if ctx, err := ioutil.ReadFile(cfg.TlsAuth); err == nil { + data.TlsAuth = string(ctx) + } + return data +} diff --git a/pkg/switch/switch.go b/pkg/switch/switch.go new file mode 100755 index 0000000..2965e41 --- /dev/null +++ b/pkg/switch/switch.go @@ -0,0 +1,770 @@ +package _switch + +import ( + "encoding/json" + "github.com/luscis/openlan/pkg/app" + "github.com/luscis/openlan/pkg/cache" + co "github.com/luscis/openlan/pkg/config" + "github.com/luscis/openlan/pkg/libol" + "github.com/luscis/openlan/pkg/models" + "github.com/luscis/openlan/pkg/network" + "net" + "strings" + "sync" + "time" +) + +func GetSocketServer(s *co.Switch) libol.SocketServer { + switch s.Protocol { + case "kcp": + c := libol.NewKcpConfig() + c.Block = co.GetBlock(s.Crypt) + c.Timeout = time.Duration(s.Timeout) * time.Second + return libol.NewKcpServer(s.Listen, c) + case "tcp": + c := &libol.TcpConfig{ + Block: co.GetBlock(s.Crypt), + Timeout: time.Duration(s.Timeout) * time.Second, + RdQus: s.Queue.SockRd, + WrQus: s.Queue.SockWr, + } + return libol.NewTcpServer(s.Listen, c) + case "udp": + c := &libol.UdpConfig{ + Block: co.GetBlock(s.Crypt), + Timeout: time.Duration(s.Timeout) * time.Second, + } + return libol.NewUdpServer(s.Listen, c) + case "ws": + c := &libol.WebConfig{ + Block: co.GetBlock(s.Crypt), + Timeout: time.Duration(s.Timeout) * time.Second, + RdQus: s.Queue.SockRd, + WrQus: s.Queue.SockWr, + } + return libol.NewWebServer(s.Listen, c) + case "wss": + c := &libol.WebConfig{ + Block: co.GetBlock(s.Crypt), + Timeout: time.Duration(s.Timeout) * time.Second, + RdQus: s.Queue.SockRd, + WrQus: s.Queue.SockWr, + } + if s.Cert != nil { + c.Cert = &libol.WebCert{ + Crt: s.Cert.CrtFile, + Key: s.Cert.KeyFile, + } + } + return libol.NewWebServer(s.Listen, c) + default: + c := &libol.TcpConfig{ + Block: co.GetBlock(s.Crypt), + Timeout: time.Duration(s.Timeout) * time.Second, + RdQus: s.Queue.SockRd, + WrQus: s.Queue.SockWr, + } + if s.Cert != nil { + c.Tls = s.Cert.GetTlsCfg() + } + return libol.NewTcpServer(s.Listen, c) + } +} + +type Apps struct { + Auth *app.Access + Request *app.Request + Neighbor *app.Neighbors + OnLines *app.Online +} + +type Hook func(client libol.SocketClient, frame *libol.FrameMessage) error + +type Switch struct { + lock sync.Mutex + cfg *co.Switch + apps Apps + firewall *network.FireWall + hooks []Hook + http *Http + server libol.SocketServer + worker map[string]Networker + uuid string + newTime int64 + out *libol.SubLogger + confd *ConfD +} + +func NewSwitch(c *co.Switch) *Switch { + server := GetSocketServer(c) + v := &Switch{ + cfg: c, + firewall: network.NewFireWall(c.FireWall), + worker: make(map[string]Networker, 32), + server: server, + newTime: time.Now().Unix(), + hooks: make([]Hook, 0, 64), + out: libol.NewSubLogger(c.Alias), + } + v.confd = NewConfd(v) + return v +} + +func (v *Switch) Protocol() string { + if v.cfg == nil { + return "" + } + return v.cfg.Protocol +} + +func (v *Switch) enablePort(protocol, port string) { + v.out.Info("Switch.enablePort %s %s", protocol, port) + // allowed forward between source and prefix. + v.firewall.AddRule(network.IpRule{ + Table: network.TFilter, + Chain: network.OLCInput, + Proto: protocol, + Match: "multiport", + DstPort: port, + }) +} + +func (v *Switch) enableFwd(input, output, source, prefix string) { + v.out.Debug("Switch.enableFwd %s:%s %s:%s", input, output, source, prefix) + // allowed forward between source and prefix. + v.firewall.AddRule(network.IpRule{ + Table: network.TFilter, + Chain: network.OLCForward, + Input: input, + Output: output, + Source: source, + Dest: prefix, + }) + if source != prefix { + v.firewall.AddRule(network.IpRule{ + Table: network.TFilter, + Chain: network.OLCForward, + Output: input, + Input: output, + Source: prefix, + Dest: source, + }) + } +} + +func (v *Switch) enableMasq(input, output, source, prefix string) { + if source == prefix { + return + } + // enable masquerade from source to prefix. + if prefix == "" || prefix == "0.0.0.0/0" { + v.firewall.AddRule(network.IpRule{ + Table: network.TNat, + Chain: network.OLCPost, + Source: source, + NoDest: source, + Jump: network.CMasq, + }) + } else { + v.firewall.AddRule(network.IpRule{ + Table: network.TNat, + Chain: network.OLCPost, + Source: source, + Dest: prefix, + Jump: network.CMasq, + }) + } +} + +func (v *Switch) enableSnat(input, output, source, prefix string) { + if source == prefix { + return + } + // enable masquerade from source to prefix. + v.firewall.AddRule(network.IpRule{ + Table: network.TNat, + Chain: network.OLCPost, + ToSource: source, + Dest: prefix, + Jump: network.CSnat, + }) +} + +func (v *Switch) preWorkerVPN(w Networker, vCfg *co.OpenVPN) { + if w == nil || vCfg == nil { + return + } + cfg := w.Config() + routes := vCfg.Routes + routes = append(routes, vCfg.Subnet) + if addr := w.Subnet(); addr != "" { + libol.Info("Switch.preWorkerVPN %s subnet %s", cfg.Name, addr) + routes = append(routes, addr) + } + for _, rt := range cfg.Routes { + addr := rt.Prefix + if addr == "0.0.0.0/0" { + vCfg.Push = append(vCfg.Push, "redirect-gateway def1") + continue + } + if _, inet, err := net.ParseCIDR(addr); err == nil { + routes = append(routes, inet.String()) + } + } + vCfg.Routes = routes + for _, _vCfg := range vCfg.Breed { + v.preWorkerVPN(w, _vCfg) + } +} + +func (v *Switch) preWorker(w Networker) { + cfg := w.Config() + if cfg.OpenVPN != nil { + v.preWorkerVPN(w, cfg.OpenVPN) + } + br := cfg.Bridge + if br.Mss > 0 { + v.firewall.AddRule(network.IpRule{ + Table: network.TMangle, + Chain: network.OLCPost, + Output: br.Name, + Proto: "tcp", + Match: "tcp", + TcpFlag: []string{"SYN,RST", "SYN"}, + Jump: "TCPMSS", + SetMss: br.Mss, + }) + } +} + +func (v *Switch) enableAcl(acl, input string) { + if input == "" { + return + } + if acl != "" { + v.firewall.AddRule(network.IpRule{ + Table: network.TRaw, + Chain: network.OLCPre, + Input: input, + Jump: acl, + }) + } +} + +func (v *Switch) preNetVPN0(nCfg *co.Network, vCfg *co.OpenVPN) { + if nCfg == nil || vCfg == nil { + return + } + devName := vCfg.Device + v.enableAcl(nCfg.Acl, devName) + for _, rt := range vCfg.Routes { + v.enableFwd(devName, "", vCfg.Subnet, rt) + v.enableMasq(devName, "", vCfg.Subnet, rt) + } + for _, _vCfg := range vCfg.Breed { + v.preNetVPN0(nCfg, _vCfg) + } +} + +func (v *Switch) preNetVPN1(bridge, prefix string, vCfg *co.OpenVPN) { + if vCfg == nil { + return + } + // Enable MASQUERADE, and allowed forward. + v.enableFwd("", bridge, vCfg.Subnet, prefix) + v.enableMasq("", bridge, vCfg.Subnet, prefix) + for _, _vCfg := range vCfg.Breed { + v.preNetVPN1(bridge, prefix, _vCfg) + } +} + +func (v *Switch) preNets() { + for _, nCfg := range v.cfg.Network { + name := nCfg.Name + w := NewNetworker(nCfg) + v.worker[name] = w + brCfg := nCfg.Bridge + if brCfg == nil { + continue + } + + v.preWorker(w) + brName := brCfg.Name + vCfg := nCfg.OpenVPN + + ifAddr := strings.SplitN(brCfg.Address, "/", 2)[0] + // Enable MASQUERADE for OpenVPN + if vCfg != nil { + v.preNetVPN0(nCfg, vCfg) + } + if ifAddr == "" { + continue + } + subnet := w.Subnet() + // Enable MASQUERADE, and allowed forward. + for _, rt := range nCfg.Routes { + v.preNetVPN1(brName, rt.Prefix, vCfg) + if rt.NextHop != ifAddr { + continue + } + v.enableFwd(brName, "", subnet, rt.Prefix) + if rt.MultiPath != nil { + v.enableSnat(brName, "", ifAddr, rt.Prefix) + } else if rt.Mode == "snat" { + v.enableMasq(brName, "", subnet, rt.Prefix) + } + } + } +} + +func (v *Switch) preApps() { + // Append accessed auth for point + v.apps.Auth = app.NewAccess(v) + v.hooks = append(v.hooks, v.apps.Auth.OnFrame) + // Append request process + v.apps.Request = app.NewRequest(v) + v.hooks = append(v.hooks, v.apps.Request.OnFrame) + + inspect := "" + for _, v := range v.cfg.Inspect { + inspect += v + } + // Check whether inspect neighbor + if strings.Contains(inspect, "neighbor") { + v.apps.Neighbor = app.NewNeighbors(v) + v.hooks = append(v.hooks, v.apps.Neighbor.OnFrame) + } + // Check whether inspect online flow by five-tuple. + if strings.Contains(inspect, "online") { + v.apps.OnLines = app.NewOnline(v) + v.hooks = append(v.hooks, v.apps.OnLines.OnFrame) + } + for i, h := range v.hooks { + v.out.Debug("Switch.preApps: id %d, func %s", i, libol.FunName(h)) + } +} + +func (v *Switch) preAcl() { + for _, acl := range v.cfg.Acl { + if acl.Name == "" { + continue + } + v.firewall.AddChain(network.IpChain{ + Table: network.TRaw, + Name: acl.Name, + }) + for _, rule := range acl.Rules { + v.firewall.AddRule(network.IpRule{ + Table: network.TRaw, + Chain: acl.Name, + Source: rule.SrcIp, + Dest: rule.DstIp, + Proto: rule.Proto, + SrcPort: rule.SrcPort, + DstPort: rule.DstPort, + Jump: rule.Action, + }) + } + } +} + +func (v *Switch) GetPort(listen string) string { + _, port := libol.GetHostPort(listen) + return port +} + +func (v *Switch) preAllowVPN(cfg *co.OpenVPN) { + if cfg == nil { + return + } + port := v.GetPort(cfg.Listen) + if cfg.Protocol == "udp" { + v.enablePort("udp", port) + } else { + v.enablePort("tcp", port) + } + for _, _cfg := range cfg.Breed { + v.preAllowVPN(_cfg) + } +} + +func (v *Switch) preAllow() { + port := v.GetPort(v.cfg.Listen) + UdpPorts := []string{"4500", "4500", "8472", "4789", port} + TcpPorts := []string{"7471", port} + if v.cfg.Http != nil { + TcpPorts = append(TcpPorts, v.GetPort(v.cfg.Http.Listen)) + } + v.enablePort("udp", strings.Join(UdpPorts, ",")) + v.enablePort("tcp", strings.Join(TcpPorts, ",")) + for _, nCfg := range v.cfg.Network { + if nCfg.OpenVPN == nil { + continue + } + v.preAllowVPN(nCfg.OpenVPN) + } +} + +func (v *Switch) SetLdap(ldap *co.LDAP) { + if ldap == nil || ldap.Server == "" { + return + } + cfg := libol.LDAPConfig{ + Server: ldap.Server, + BindDN: ldap.BindDN, + Password: ldap.Password, + BaseDN: ldap.BaseDN, + Attr: ldap.Attribute, + Filter: ldap.Filter, + EnableTls: ldap.EnableTls, + } + cache.User.SetLdap(&cfg) +} + +func (v *Switch) SetPass(file string) { + cache.User.SetFile(file) +} + +func (v *Switch) LoadPass() { + cache.User.Load() +} + +func (v *Switch) Initialize() { + v.lock.Lock() + defer v.lock.Unlock() + + v.preAcl() + v.preAllow() + v.preApps() + if v.cfg.Http != nil { + v.http = NewHttp(v) + } + v.preNets() + // FireWall + v.firewall.Initialize() + for _, w := range v.worker { + if w.Provider() == "vxlan" { + continue + } + w.Initialize() + } + // Load password for guest access + v.SetPass(v.cfg.PassFile) + v.LoadPass() + v.SetLdap(v.cfg.Ldap) + // Start confd monitor + v.confd.Initialize() +} + +func (v *Switch) onFrame(client libol.SocketClient, frame *libol.FrameMessage) error { + for _, h := range v.hooks { + if v.out.Has(libol.LOG) { + v.out.Log("Switch.onFrame: %s", libol.FunName(h)) + } + if h != nil { + if err := h(client, frame); err != nil { + return err + } + } + } + return nil +} + +func (v *Switch) OnClient(client libol.SocketClient) error { + client.SetStatus(libol.ClConnected) + v.out.Info("Switch.onClient: %s", client.String()) + return nil +} + +func (v *Switch) SignIn(client libol.SocketClient) error { + v.out.Cmd("Switch.SignIn %s", client.String()) + data := struct { + Address string `json:"address"` + Switch string `json:"switch"` + }{ + Address: client.String(), + Switch: client.LocalAddr(), + } + body, err := json.Marshal(data) + if err != nil { + v.out.Error("Switch.SignIn: %s", err) + return err + } + v.out.Cmd("Switch.SignIn: %s", body) + m := libol.NewControlFrame(libol.SignReq, body) + if err := client.WriteMsg(m); err != nil { + v.out.Error("Switch.SignIn: %s", err) + return err + } + return nil +} + +func (v *Switch) ReadClient(client libol.SocketClient, frame *libol.FrameMessage) error { + addr := client.RemoteAddr() + if v.out.Has(libol.LOG) { + v.out.Log("Switch.ReadClient: %s %x", addr, frame.Frame()) + } + frame.Decode() + if err := v.onFrame(client, frame); err != nil { + v.out.Debug("Switch.ReadClient: %s dropping by %s", addr, err) + if frame.Action() == libol.PingReq { + // send sign message to point require login. + _ = v.SignIn(client) + } + return nil + } + if frame.IsControl() { + return nil + } + // process ethernet frame message. + private := client.Private() + if private == nil { + return libol.NewErr("point %s notFound.", addr) + } + point, ok := private.(*models.Point) + if !ok { + return libol.NewErr("point %s notRight.", addr) + } + device := point.Device + if point == nil || device == nil { + return libol.NewErr("Tap devices is nil") + } + if _, err := device.Write(frame.Frame()); err != nil { + v.out.Error("Switch.ReadClient: %s", err) + return err + } + return nil +} + +func (v *Switch) OnClose(client libol.SocketClient) error { + addr := client.RemoteAddr() + v.out.Info("Switch.OnClose: %s", addr) + // already not need support free list for device. + uuid := cache.Point.GetUUID(addr) + if cache.Point.GetAddr(uuid) == addr { // not has newer + cache.Network.DelLease(uuid) + } + cache.Point.Del(addr) + return nil +} + +func (v *Switch) Start() { + v.lock.Lock() + defer v.lock.Unlock() + + OpenUDP() + // firstly, start network. + for _, w := range v.worker { + if w.Provider() == "vxlan" { + continue + } + w.Start(v) + } + // start server for accessing + libol.Go(v.server.Accept) + call := libol.ServerListener{ + OnClient: v.OnClient, + OnClose: v.OnClose, + ReadAt: v.ReadClient, + } + libol.Go(func() { v.server.Loop(call) }) + if v.http != nil { + libol.Go(v.http.Start) + } + libol.Go(v.firewall.Start) + libol.Go(v.confd.Start) +} + +func (v *Switch) Stop() { + v.lock.Lock() + defer v.lock.Unlock() + + v.out.Debug("Switch.Stop") + v.confd.Stop() + // firstly, notify leave to point. + for p := range cache.Point.List() { + if p == nil { + break + } + v.leftClient(p.Client) + } + v.firewall.Stop() + if v.http != nil { + v.http.Shutdown() + v.http = nil + } + v.server.Close() + // stop network. + for _, w := range v.worker { + if w.Provider() == "vxlan" { + continue + } + w.Stop() + } +} + +func (v *Switch) Alias() string { + return v.cfg.Alias +} + +func (v *Switch) UpTime() int64 { + return time.Now().Unix() - v.newTime +} + +func (v *Switch) Server() libol.SocketServer { + return v.server +} + +func (v *Switch) GetBridge(tenant string) (network.Bridger, error) { + w, ok := v.worker[tenant] + if !ok { + return nil, libol.NewErr("bridge %s notFound", tenant) + } + return w.Bridge(), nil +} + +func (v *Switch) NewTap(tenant string) (network.Taper, error) { + v.lock.Lock() + defer v.lock.Unlock() + v.out.Debug("Switch.NewTap") + + // already not need support free list for device. + // dropped firstly packages during 15s because of forwarding delay. + br, err := v.GetBridge(tenant) + if err != nil { + v.out.Error("Switch.NewTap: %s", err) + return nil, err + } + dev, err := network.NewTaper(tenant, network.TapConfig{ + Provider: br.Type(), + Type: network.TAP, + VirBuf: v.cfg.Queue.VirWrt, + KernBuf: v.cfg.Queue.VirSnd, + Name: "auto", + }) + if err != nil { + v.out.Error("Switch.NewTap: %s", err) + return nil, err + } + dev.Up() + // add new tap to bridge. + _ = br.AddSlave(dev.Name()) + v.out.Info("Switch.NewTap: %s on %s", dev.Name(), tenant) + return dev, nil +} + +func (v *Switch) FreeTap(dev network.Taper) error { + v.lock.Lock() + defer v.lock.Unlock() + name := dev.Name() + tenant := dev.Tenant() + v.out.Debug("Switch.FreeTap %s", name) + w, ok := v.worker[tenant] + if !ok { + return libol.NewErr("bridge %s notFound", tenant) + } + br := w.Bridge() + _ = br.DelSlave(dev.Name()) + v.out.Info("Switch.FreeTap: %s", name) + return nil +} + +func (v *Switch) UUID() string { + if v.uuid == "" { + v.uuid = libol.GenRandom(13) + } + return v.uuid +} + +func (v *Switch) ReadTap(device network.Taper, readAt func(f *libol.FrameMessage) error) { + name := device.Name() + v.out.Info("Switch.ReadTap: %s", name) + done := make(chan bool, 2) + queue := make(chan *libol.FrameMessage, v.cfg.Queue.TapWr) + libol.Go(func() { + for { + frame := libol.NewFrameMessage(0) + n, err := device.Read(frame.Frame()) + if err != nil { + v.out.Error("Switch.ReadTap: %s", err) + done <- true + break + } + frame.SetSize(n) + if v.out.Has(libol.LOG) { + v.out.Log("Switch.ReadTap: %x\n", frame.Frame()[:n]) + } + queue <- frame + } + }) + defer device.Close() + for { + select { + case frame := <-queue: + if err := readAt(frame); err != nil { + v.out.Error("Switch.ReadTap: readAt %s %s", name, err) + return + } + case <-done: + return + } + } +} + +func (v *Switch) OffClient(client libol.SocketClient) { + v.out.Info("Switch.OffClient: %s", client) + if v.server != nil { + v.server.OffClient(client) + } +} + +func (v *Switch) Config() *co.Switch { + return co.Manager.Switch +} + +func (v *Switch) leftClient(client libol.SocketClient) { + if client == nil { + return + } + v.out.Info("Switch.leftClient: %s", client.String()) + data := struct { + DateTime int64 `json:"datetime"` + UUID string `json:"uuid"` + Alias string `json:"alias"` + Connection string `json:"connection"` + Address string `json:"address"` + }{ + DateTime: time.Now().Unix(), + UUID: v.UUID(), + Alias: v.Alias(), + Address: client.LocalAddr(), + Connection: client.RemoteAddr(), + } + body, err := json.Marshal(data) + if err != nil { + v.out.Error("Switch.leftClient: %s", err) + return + } + v.out.Cmd("Switch.leftClient: %s", body) + m := libol.NewControlFrame(libol.LeftReq, body) + if err := client.WriteMsg(m); err != nil { + v.out.Error("Switch.leftClient: %s", err) + return + } +} + +func (v *Switch) Firewall() *network.FireWall { + return v.firewall +} + +func (v *Switch) Reload() { + cache.Reload() + for _, w := range v.worker { + w.Reload(v) + } + libol.Go(v.firewall.Start) +} + +func (v *Switch) Save() { + v.cfg.Save() +} diff --git a/pkg/switch/switch_test.go b/pkg/switch/switch_test.go new file mode 100755 index 0000000..06e55d8 --- /dev/null +++ b/pkg/switch/switch_test.go @@ -0,0 +1,21 @@ +package _switch + +import ( + "fmt" + "github.com/luscis/openlan/pkg/cache" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSwitch_LoadPass(t *testing.T) { + sw := &Switch{} + sw.LoadPass("../../.password.no") + sw.LoadPass("../../packaging/resource/password.example") + for user := range cache.User.List() { + if user == nil { + break + } + fmt.Printf("%v\n", user) + } + assert.Equal(t, 2, cache.User.Users.Len(), "notEqual") +} diff --git a/pkg/switch/vxlan.go b/pkg/switch/vxlan.go new file mode 100755 index 0000000..35745f7 --- /dev/null +++ b/pkg/switch/vxlan.go @@ -0,0 +1,51 @@ +package _switch + +import ( + "github.com/luscis/openlan/pkg/api" + co "github.com/luscis/openlan/pkg/config" +) + +type VxLANWorker struct { + *WorkerImpl + spec *co.VxLANSpecifies +} + +func NewVxLANWorker(c *co.Network) *VxLANWorker { + w := &VxLANWorker{ + WorkerImpl: NewWorkerApi(c), + } + w.spec, _ = c.Specifies.(*co.VxLANSpecifies) + return w +} + +func (w *VxLANWorker) Initialize() { + w.WorkerImpl.Initialize() +} + +func (w *VxLANWorker) Start(v api.Switcher) { + w.uuid = v.UUID() + master := GetFabricer(w.spec.Fabric) + if master == nil { + w.out.Warn("VxLANWorker.Start %s not found", w.spec.Fabric) + return + } + w.cfg.Bridge.Mss = master.TcpMss() + master.AddNetwork(w.cfg) + w.WorkerImpl.Start(v) +} + +func (w *VxLANWorker) Stop() { + w.WorkerImpl.Stop() + master := GetFabricer(w.spec.Fabric) + if master == nil { + w.out.Warn("VxLANWorker.Stop %s not found", w.spec.Fabric) + return + } + master.DelNetwork(w.cfg.Bridge.Name, w.spec.Vni) +} + +func (w *VxLANWorker) Reload(v api.Switcher) { + w.Stop() + w.Initialize() + w.Start(v) +} diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 0000000..1fb13ab --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1,4 @@ +logrus +vendor + +.idea/ diff --git a/vendor/github.com/Sirupsen/logrus/.golangci.yml b/vendor/github.com/Sirupsen/logrus/.golangci.yml new file mode 100644 index 0000000..65dc285 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 0000000..c1dbd5a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,15 @@ +language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 +env: + - GO111MODULE=on +go: 1.15.x +os: linux +install: + - ./travis/install.sh +script: + - cd ci + - go run mage.go -v -w ../ crossBuild + - go run mage.go -v -w ../ lint + - go run mage.go -v -w ../ test diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 0000000..7567f61 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,259 @@ +# 1.8.1 +Code quality: + * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer + * improve timestamp format documentation + +Fixes: + * fix race condition on logger hooks + + +# 1.8.0 + +Correct versioning number replacing v1.7.1. + +# 1.7.1 + +Beware this release has introduced a new public API and its semver is therefore incorrect. + +Code quality: + * use go 1.15 in travis + * use magefile as task runner + +Fixes: + * small fixes about new go 1.13 error formatting system + * Fix for long time race condiction with mutating data hooks + +Features: + * build support for zos + +# 1.7.0 +Fixes: + * the dependency toward a windows terminal library has been removed + +Features: + * a new buffer pool management API has been added + * a set of `Fn()` functions have been added + +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 0000000..f090cb4 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md new file mode 100644 index 0000000..5152b6a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,513 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + + +#### Level logging + +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Trace("Something very low level.") +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000..8fd189e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -0,0 +1,76 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/vendor/github.com/Sirupsen/logrus/appveyor.yml b/vendor/github.com/Sirupsen/logrus/appveyor.yml new file mode 100644 index 0000000..df9d65c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/Sirupsen/logrus/buffer_pool.go b/vendor/github.com/Sirupsen/logrus/buffer_pool.go new file mode 100644 index 0000000..4545dec --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/buffer_pool.go @@ -0,0 +1,52 @@ +package logrus + +import ( + "bytes" + "sync" +) + +var ( + bufferPool BufferPool +) + +type BufferPool interface { + Put(*bytes.Buffer) + Get() *bytes.Buffer +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func getBuffer() *bytes.Buffer { + return bufferPool.Get() +} + +func putBuffer(buf *bytes.Buffer) { + buf.Reset() + bufferPool.Put(buf) +} + +// SetBufferPool allows to replace the default logrus buffer pool +// to better meets the specific needs of an application. +func SetBufferPool(bp BufferPool) { + bufferPool = bp +} + +func init() { + SetBufferPool(&defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + }) +} diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 0000000..da67aba --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 0000000..07a1e5f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,431 @@ +package logrus + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +func (entry *Entry) Dup() *Entry { + data := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} +} + +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Bytes() + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + fieldErr := entry.err + for k, v := range fields { + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch { + case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: + isErrField = true + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, maximumCallerDepth) + _ = runtime.Callers(0, pcs) + + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f //nolint:scopelint + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +func (entry *Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + newEntry := entry.Dup() + + if newEntry.Time.IsZero() { + newEntry.Time = time.Now() + } + + newEntry.Level = level + newEntry.Message = msg + + newEntry.Logger.mu.Lock() + reportCaller := newEntry.Logger.ReportCaller + newEntry.Logger.mu.Unlock() + + if reportCaller { + newEntry.Caller = getCaller() + } + + newEntry.fireHooks() + + buffer = getBuffer() + defer func() { + newEntry.Buffer = nil + putBuffer(buffer) + }() + buffer.Reset() + newEntry.Buffer = buffer + + newEntry.write() + + newEntry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(newEntry) + } +} + +func (entry *Entry) fireHooks() { + var tmpHooks LevelHooks + entry.Logger.mu.Lock() + tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) + for k, v := range entry.Logger.Hooks { + tmpHooks[k] = v + } + entry.Logger.mu.Unlock() + + err := tmpHooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + return + } + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + if _, err := entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } +} + +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + entry.Log(InfoLevel, args...) +} + +func (entry *Entry) Warn(args ...interface{}) { + entry.Log(WarnLevel, args...) +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + entry.Log(ErrorLevel, args...) +} + +func (entry *Entry) Fatal(args ...interface{}) { + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + entry.Log(PanicLevel, args...) +} + +// Entry Printf family functions + +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + entry.Logf(InfoLevel, format, args...) +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + entry.Logf(WarnLevel, format, args...) +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + entry.Logf(ErrorLevel, format, args...) +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + entry.Logf(PanicLevel, format, args...) +} + +// Entry Println family functions + +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + +func (entry *Entry) Infoln(args ...interface{}) { + entry.Logln(InfoLevel, args...) +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + entry.Logln(WarnLevel, args...) +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + entry.Logln(ErrorLevel, args...) +} + +func (entry *Entry) Fatalln(args ...interface{}) { + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + entry.Logln(PanicLevel, args...) +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 0000000..017c30c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,270 @@ +package logrus + +import ( + "context" + "io" + "time" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.AddHook(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// WithTime creates an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// TraceFn logs a message from a func at level Trace on the standard logger. +func TraceFn(fn LogFunction) { + std.TraceFn(fn) +} + +// DebugFn logs a message from a func at level Debug on the standard logger. +func DebugFn(fn LogFunction) { + std.DebugFn(fn) +} + +// PrintFn logs a message from a func at level Info on the standard logger. +func PrintFn(fn LogFunction) { + std.PrintFn(fn) +} + +// InfoFn logs a message from a func at level Info on the standard logger. +func InfoFn(fn LogFunction) { + std.InfoFn(fn) +} + +// WarnFn logs a message from a func at level Warn on the standard logger. +func WarnFn(fn LogFunction) { + std.WarnFn(fn) +} + +// WarningFn logs a message from a func at level Warn on the standard logger. +func WarningFn(fn LogFunction) { + std.WarningFn(fn) +} + +// ErrorFn logs a message from a func at level Error on the standard logger. +func ErrorFn(fn LogFunction) { + std.ErrorFn(fn) +} + +// PanicFn logs a message from a func at level Panic on the standard logger. +func PanicFn(fn LogFunction) { + std.PanicFn(fn) +} + +// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. +func FatalFn(fn LogFunction) { + std.FatalFn(fn) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 0000000..4088837 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/vendor/github.com/Sirupsen/logrus/go.mod b/vendor/github.com/Sirupsen/logrus/go.mod new file mode 100644 index 0000000..b3919d5 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/go.mod @@ -0,0 +1,10 @@ +module github.com/sirupsen/logrus + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 +) + +go 1.13 diff --git a/vendor/github.com/Sirupsen/logrus/go.sum b/vendor/github.com/Sirupsen/logrus/go.sum new file mode 100644 index 0000000..694c18b --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/go.sum @@ -0,0 +1,8 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 0000000..3f151cd --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000..c96dc56 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,128 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 0000000..3377044 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,404 @@ +package logrus + +import ( + "context" + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +// LogFunction For big messages, it can be more efficient to pass a function +// and only call it if the log level is actually enables rather than +// generating the log message and then checking if the level is enabled +type LogFunction func() []interface{} + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventurous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc +} + +type exitFunc func(int) + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} + logger.entryPool.Put(entry) +} + +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logf(level, format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + logger.Logf(InfoLevel, format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + logger.Logf(WarnLevel, format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + logger.Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + logger.Logf(ErrorLevel, format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + logger.Logf(PanicLevel, format, args...) +} + +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) LogFn(level Level, fn LogFunction) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, fn()...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + logger.Log(DebugLevel, args...) +} + +func (logger *Logger) Info(args ...interface{}) { + logger.Log(InfoLevel, args...) +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Print(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + logger.Log(WarnLevel, args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + logger.Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + logger.Log(ErrorLevel, args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + logger.Log(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) TraceFn(fn LogFunction) { + logger.LogFn(TraceLevel, fn) +} + +func (logger *Logger) DebugFn(fn LogFunction) { + logger.LogFn(DebugLevel, fn) +} + +func (logger *Logger) InfoFn(fn LogFunction) { + logger.LogFn(InfoLevel, fn) +} + +func (logger *Logger) PrintFn(fn LogFunction) { + entry := logger.newEntry() + entry.Print(fn()...) + logger.releaseEntry(entry) +} + +func (logger *Logger) WarnFn(fn LogFunction) { + logger.LogFn(WarnLevel, fn) +} + +func (logger *Logger) WarningFn(fn LogFunction) { + logger.WarnFn(fn) +} + +func (logger *Logger) ErrorFn(fn LogFunction) { + logger.LogFn(ErrorLevel, fn) +} + +func (logger *Logger) FatalFn(fn LogFunction) { + logger.LogFn(FatalLevel, fn) + logger.Exit(1) +} + +func (logger *Logger) PanicFn(fn LogFunction) { + logger.LogFn(PanicLevel, fn) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logln(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + logger.Logln(DebugLevel, args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + logger.Logln(InfoLevel, args...) +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + logger.Logln(WarnLevel, args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + logger.Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + logger.Logln(ErrorLevel, args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + logger.Logln(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +// SetLevel sets the logger level. +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 0000000..2f16224 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,186 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" + } +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + case "trace": + return TraceLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = l + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, + TraceLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 0000000..2403de9 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 0000000..4997899 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_js.go b/vendor/github.com/Sirupsen/logrus/terminal_check_js.go new file mode 100644 index 0000000..ebdae3e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/Sirupsen/logrus/terminal_check_no_terminal.go new file mode 100644 index 0000000..97af92c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_no_terminal.go @@ -0,0 +1,11 @@ +// +build js nacl plan9 + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 0000000..3293fb3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,17 @@ +// +build !appengine,!js,!windows,!nacl,!plan9 + +package logrus + +import ( + "io" + "os" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return isTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 0000000..f6710b3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/Sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 0000000..04748b8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix zos +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 0000000..2879eb5 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,27 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/windows" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + handle := windows.Handle(v.Fd()) + var mode uint32 + if err := windows.GetConsoleMode(handle, &mode); err != nil { + return false + } + mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + if err := windows.SetConsoleMode(handle, mode); err != nil { + return false + } + return true + } + return false +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000..be2c6ef --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,339 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + red = 31 + yellow = 33 + blue = 36 + gray = 37 +) + +var baseTimestamp time.Time + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": + isColored = true + case ok && force == "0", os.Getenv("CLICOLOR") == "0": + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) + } else { + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + case InfoLevel: + levelColor = blue + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation && !f.PadLevelText { + levelText = levelText[0:4] + } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + switch { + case f.DisableTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + case !f.FullTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + default: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + if f.DisableQuote { + return false + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 0000000..72e8e3a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,70 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +// Writer at INFO level. See WriterLevel for details. +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case TraceLevel: + printFunc = entry.Trace + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/armon/go-socks5/.gitignore b/vendor/github.com/armon/go-socks5/.gitignore new file mode 100644 index 0000000..0026861 --- /dev/null +++ b/vendor/github.com/armon/go-socks5/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/armon/go-socks5/.travis.yml b/vendor/github.com/armon/go-socks5/.travis.yml new file mode 100644 index 0000000..8d61700 --- /dev/null +++ b/vendor/github.com/armon/go-socks5/.travis.yml @@ -0,0 +1,4 @@ +language: go +go: + - 1.1 + - tip diff --git a/vendor/github.com/armon/go-socks5/LICENSE b/vendor/github.com/armon/go-socks5/LICENSE new file mode 100644 index 0000000..a5df10e --- /dev/null +++ b/vendor/github.com/armon/go-socks5/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-socks5/README.md b/vendor/github.com/armon/go-socks5/README.md new file mode 100644 index 0000000..9cd1563 --- /dev/null +++ b/vendor/github.com/armon/go-socks5/README.md @@ -0,0 +1,45 @@ +go-socks5 [![Build Status](https://travis-ci.org/armon/go-socks5.png)](https://travis-ci.org/armon/go-socks5) +========= + +Provides the `socks5` package that implements a [SOCKS5 server](http://en.wikipedia.org/wiki/SOCKS). +SOCKS (Secure Sockets) is used to route traffic between a client and server through +an intermediate proxy layer. This can be used to bypass firewalls or NATs. + +Feature +======= + +The package has the following features: +* "No Auth" mode +* User/Password authentication +* Support for the CONNECT command +* Rules to do granular filtering of commands +* Custom DNS resolution +* Unit tests + +TODO +==== + +The package still needs the following: +* Support for the BIND command +* Support for the ASSOCIATE command + + +Example +======= + +Below is a simple example of usage + +```go +// Create a SOCKS5 server +conf := &socks5.Config{} +server, err := socks5.New(conf) +if err != nil { + panic(err) +} + +// Create SOCKS5 proxy on localhost port 8000 +if err := server.ListenAndServe("tcp", "127.0.0.1:8000"); err != nil { + panic(err) +} +``` + diff --git a/vendor/github.com/armon/go-socks5/auth.go b/vendor/github.com/armon/go-socks5/auth.go new file mode 100644 index 0000000..7811e2a --- /dev/null +++ b/vendor/github.com/armon/go-socks5/auth.go @@ -0,0 +1,151 @@ +package socks5 + +import ( + "fmt" + "io" +) + +const ( + NoAuth = uint8(0) + noAcceptable = uint8(255) + UserPassAuth = uint8(2) + userAuthVersion = uint8(1) + authSuccess = uint8(0) + authFailure = uint8(1) +) + +var ( + UserAuthFailed = fmt.Errorf("User authentication failed") + NoSupportedAuth = fmt.Errorf("No supported authentication mechanism") +) + +// A Request encapsulates authentication state provided +// during negotiation +type AuthContext struct { + // Provided auth method + Method uint8 + // Payload provided during negotiation. + // Keys depend on the used auth method. + // For UserPassauth contains Username + Payload map[string]string +} + +type Authenticator interface { + Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error) + GetCode() uint8 +} + +// NoAuthAuthenticator is used to handle the "No Authentication" mode +type NoAuthAuthenticator struct{} + +func (a NoAuthAuthenticator) GetCode() uint8 { + return NoAuth +} + +func (a NoAuthAuthenticator) Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error) { + _, err := writer.Write([]byte{socks5Version, NoAuth}) + return &AuthContext{NoAuth, nil}, err +} + +// UserPassAuthenticator is used to handle username/password based +// authentication +type UserPassAuthenticator struct { + Credentials CredentialStore +} + +func (a UserPassAuthenticator) GetCode() uint8 { + return UserPassAuth +} + +func (a UserPassAuthenticator) Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error) { + // Tell the client to use user/pass auth + if _, err := writer.Write([]byte{socks5Version, UserPassAuth}); err != nil { + return nil, err + } + + // Get the version and username length + header := []byte{0, 0} + if _, err := io.ReadAtLeast(reader, header, 2); err != nil { + return nil, err + } + + // Ensure we are compatible + if header[0] != userAuthVersion { + return nil, fmt.Errorf("Unsupported auth version: %v", header[0]) + } + + // Get the user name + userLen := int(header[1]) + user := make([]byte, userLen) + if _, err := io.ReadAtLeast(reader, user, userLen); err != nil { + return nil, err + } + + // Get the password length + if _, err := reader.Read(header[:1]); err != nil { + return nil, err + } + + // Get the password + passLen := int(header[0]) + pass := make([]byte, passLen) + if _, err := io.ReadAtLeast(reader, pass, passLen); err != nil { + return nil, err + } + + // Verify the password + if a.Credentials.Valid(string(user), string(pass)) { + if _, err := writer.Write([]byte{userAuthVersion, authSuccess}); err != nil { + return nil, err + } + } else { + if _, err := writer.Write([]byte{userAuthVersion, authFailure}); err != nil { + return nil, err + } + return nil, UserAuthFailed + } + + // Done + return &AuthContext{UserPassAuth, map[string]string{"Username": string(user)}}, nil +} + +// authenticate is used to handle connection authentication +func (s *Server) authenticate(conn io.Writer, bufConn io.Reader) (*AuthContext, error) { + // Get the methods + methods, err := readMethods(bufConn) + if err != nil { + return nil, fmt.Errorf("Failed to get auth methods: %v", err) + } + + // Select a usable method + for _, method := range methods { + cator, found := s.authMethods[method] + if found { + return cator.Authenticate(bufConn, conn) + } + } + + // No usable method found + return nil, noAcceptableAuth(conn) +} + +// noAcceptableAuth is used to handle when we have no eligible +// authentication mechanism +func noAcceptableAuth(conn io.Writer) error { + conn.Write([]byte{socks5Version, noAcceptable}) + return NoSupportedAuth +} + +// readMethods is used to read the number of methods +// and proceeding auth methods +func readMethods(r io.Reader) ([]byte, error) { + header := []byte{0} + if _, err := r.Read(header); err != nil { + return nil, err + } + + numMethods := int(header[0]) + methods := make([]byte, numMethods) + _, err := io.ReadAtLeast(r, methods, numMethods) + return methods, err +} diff --git a/vendor/github.com/armon/go-socks5/credentials.go b/vendor/github.com/armon/go-socks5/credentials.go new file mode 100644 index 0000000..9666427 --- /dev/null +++ b/vendor/github.com/armon/go-socks5/credentials.go @@ -0,0 +1,17 @@ +package socks5 + +// CredentialStore is used to support user/pass authentication +type CredentialStore interface { + Valid(user, password string) bool +} + +// StaticCredentials enables using a map directly as a credential store +type StaticCredentials map[string]string + +func (s StaticCredentials) Valid(user, password string) bool { + pass, ok := s[user] + if !ok { + return false + } + return password == pass +} diff --git a/vendor/github.com/armon/go-socks5/request.go b/vendor/github.com/armon/go-socks5/request.go new file mode 100644 index 0000000..b615fcb --- /dev/null +++ b/vendor/github.com/armon/go-socks5/request.go @@ -0,0 +1,364 @@ +package socks5 + +import ( + "fmt" + "io" + "net" + "strconv" + "strings" + + "golang.org/x/net/context" +) + +const ( + ConnectCommand = uint8(1) + BindCommand = uint8(2) + AssociateCommand = uint8(3) + ipv4Address = uint8(1) + fqdnAddress = uint8(3) + ipv6Address = uint8(4) +) + +const ( + successReply uint8 = iota + serverFailure + ruleFailure + networkUnreachable + hostUnreachable + connectionRefused + ttlExpired + commandNotSupported + addrTypeNotSupported +) + +var ( + unrecognizedAddrType = fmt.Errorf("Unrecognized address type") +) + +// AddressRewriter is used to rewrite a destination transparently +type AddressRewriter interface { + Rewrite(ctx context.Context, request *Request) (context.Context, *AddrSpec) +} + +// AddrSpec is used to return the target AddrSpec +// which may be specified as IPv4, IPv6, or a FQDN +type AddrSpec struct { + FQDN string + IP net.IP + Port int +} + +func (a *AddrSpec) String() string { + if a.FQDN != "" { + return fmt.Sprintf("%s (%s):%d", a.FQDN, a.IP, a.Port) + } + return fmt.Sprintf("%s:%d", a.IP, a.Port) +} + +// Address returns a string suitable to dial; prefer returning IP-based +// address, fallback to FQDN +func (a AddrSpec) Address() string { + if 0 != len(a.IP) { + return net.JoinHostPort(a.IP.String(), strconv.Itoa(a.Port)) + } + return net.JoinHostPort(a.FQDN, strconv.Itoa(a.Port)) +} + +// A Request represents request received by a server +type Request struct { + // Protocol version + Version uint8 + // Requested command + Command uint8 + // AuthContext provided during negotiation + AuthContext *AuthContext + // AddrSpec of the the network that sent the request + RemoteAddr *AddrSpec + // AddrSpec of the desired destination + DestAddr *AddrSpec + // AddrSpec of the actual destination (might be affected by rewrite) + realDestAddr *AddrSpec + bufConn io.Reader +} + +type conn interface { + Write([]byte) (int, error) + RemoteAddr() net.Addr +} + +// NewRequest creates a new Request from the tcp connection +func NewRequest(bufConn io.Reader) (*Request, error) { + // Read the version byte + header := []byte{0, 0, 0} + if _, err := io.ReadAtLeast(bufConn, header, 3); err != nil { + return nil, fmt.Errorf("Failed to get command version: %v", err) + } + + // Ensure we are compatible + if header[0] != socks5Version { + return nil, fmt.Errorf("Unsupported command version: %v", header[0]) + } + + // Read in the destination address + dest, err := readAddrSpec(bufConn) + if err != nil { + return nil, err + } + + request := &Request{ + Version: socks5Version, + Command: header[1], + DestAddr: dest, + bufConn: bufConn, + } + + return request, nil +} + +// handleRequest is used for request processing after authentication +func (s *Server) handleRequest(req *Request, conn conn) error { + ctx := context.Background() + + // Resolve the address if we have a FQDN + dest := req.DestAddr + if dest.FQDN != "" { + ctx_, addr, err := s.config.Resolver.Resolve(ctx, dest.FQDN) + if err != nil { + if err := sendReply(conn, hostUnreachable, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Failed to resolve destination '%v': %v", dest.FQDN, err) + } + ctx = ctx_ + dest.IP = addr + } + + // Apply any address rewrites + req.realDestAddr = req.DestAddr + if s.config.Rewriter != nil { + ctx, req.realDestAddr = s.config.Rewriter.Rewrite(ctx, req) + } + + // Switch on the command + switch req.Command { + case ConnectCommand: + return s.handleConnect(ctx, conn, req) + case BindCommand: + return s.handleBind(ctx, conn, req) + case AssociateCommand: + return s.handleAssociate(ctx, conn, req) + default: + if err := sendReply(conn, commandNotSupported, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Unsupported command: %v", req.Command) + } +} + +// handleConnect is used to handle a connect command +func (s *Server) handleConnect(ctx context.Context, conn conn, req *Request) error { + // Check if this is allowed + if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok { + if err := sendReply(conn, ruleFailure, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Connect to %v blocked by rules", req.DestAddr) + } else { + ctx = ctx_ + } + + // Attempt to connect + dial := s.config.Dial + if dial == nil { + dial = func(ctx context.Context, net_, addr string) (net.Conn, error) { + return net.Dial(net_, addr) + } + } + target, err := dial(ctx, "tcp", req.realDestAddr.Address()) + if err != nil { + msg := err.Error() + resp := hostUnreachable + if strings.Contains(msg, "refused") { + resp = connectionRefused + } else if strings.Contains(msg, "network is unreachable") { + resp = networkUnreachable + } + if err := sendReply(conn, resp, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Connect to %v failed: %v", req.DestAddr, err) + } + defer target.Close() + + // Send success + local := target.LocalAddr().(*net.TCPAddr) + bind := AddrSpec{IP: local.IP, Port: local.Port} + if err := sendReply(conn, successReply, &bind); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + + // Start proxying + errCh := make(chan error, 2) + go proxy(target, req.bufConn, errCh) + go proxy(conn, target, errCh) + + // Wait + for i := 0; i < 2; i++ { + e := <-errCh + if e != nil { + // return from this function closes target (and conn). + return e + } + } + return nil +} + +// handleBind is used to handle a connect command +func (s *Server) handleBind(ctx context.Context, conn conn, req *Request) error { + // Check if this is allowed + if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok { + if err := sendReply(conn, ruleFailure, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Bind to %v blocked by rules", req.DestAddr) + } else { + ctx = ctx_ + } + + // TODO: Support bind + if err := sendReply(conn, commandNotSupported, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return nil +} + +// handleAssociate is used to handle a connect command +func (s *Server) handleAssociate(ctx context.Context, conn conn, req *Request) error { + // Check if this is allowed + if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok { + if err := sendReply(conn, ruleFailure, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Associate to %v blocked by rules", req.DestAddr) + } else { + ctx = ctx_ + } + + // TODO: Support associate + if err := sendReply(conn, commandNotSupported, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return nil +} + +// readAddrSpec is used to read AddrSpec. +// Expects an address type byte, follwed by the address and port +func readAddrSpec(r io.Reader) (*AddrSpec, error) { + d := &AddrSpec{} + + // Get the address type + addrType := []byte{0} + if _, err := r.Read(addrType); err != nil { + return nil, err + } + + // Handle on a per type basis + switch addrType[0] { + case ipv4Address: + addr := make([]byte, 4) + if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil { + return nil, err + } + d.IP = net.IP(addr) + + case ipv6Address: + addr := make([]byte, 16) + if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil { + return nil, err + } + d.IP = net.IP(addr) + + case fqdnAddress: + if _, err := r.Read(addrType); err != nil { + return nil, err + } + addrLen := int(addrType[0]) + fqdn := make([]byte, addrLen) + if _, err := io.ReadAtLeast(r, fqdn, addrLen); err != nil { + return nil, err + } + d.FQDN = string(fqdn) + + default: + return nil, unrecognizedAddrType + } + + // Read the port + port := []byte{0, 0} + if _, err := io.ReadAtLeast(r, port, 2); err != nil { + return nil, err + } + d.Port = (int(port[0]) << 8) | int(port[1]) + + return d, nil +} + +// sendReply is used to send a reply message +func sendReply(w io.Writer, resp uint8, addr *AddrSpec) error { + // Format the address + var addrType uint8 + var addrBody []byte + var addrPort uint16 + switch { + case addr == nil: + addrType = ipv4Address + addrBody = []byte{0, 0, 0, 0} + addrPort = 0 + + case addr.FQDN != "": + addrType = fqdnAddress + addrBody = append([]byte{byte(len(addr.FQDN))}, addr.FQDN...) + addrPort = uint16(addr.Port) + + case addr.IP.To4() != nil: + addrType = ipv4Address + addrBody = []byte(addr.IP.To4()) + addrPort = uint16(addr.Port) + + case addr.IP.To16() != nil: + addrType = ipv6Address + addrBody = []byte(addr.IP.To16()) + addrPort = uint16(addr.Port) + + default: + return fmt.Errorf("Failed to format address: %v", addr) + } + + // Format the message + msg := make([]byte, 6+len(addrBody)) + msg[0] = socks5Version + msg[1] = resp + msg[2] = 0 // Reserved + msg[3] = addrType + copy(msg[4:], addrBody) + msg[4+len(addrBody)] = byte(addrPort >> 8) + msg[4+len(addrBody)+1] = byte(addrPort & 0xff) + + // Send the message + _, err := w.Write(msg) + return err +} + +type closeWriter interface { + CloseWrite() error +} + +// proxy is used to suffle data from src to destination, and sends errors +// down a dedicated channel +func proxy(dst io.Writer, src io.Reader, errCh chan error) { + _, err := io.Copy(dst, src) + if tcpConn, ok := dst.(closeWriter); ok { + tcpConn.CloseWrite() + } + errCh <- err +} diff --git a/vendor/github.com/armon/go-socks5/resolver.go b/vendor/github.com/armon/go-socks5/resolver.go new file mode 100644 index 0000000..b75a5c4 --- /dev/null +++ b/vendor/github.com/armon/go-socks5/resolver.go @@ -0,0 +1,23 @@ +package socks5 + +import ( + "net" + + "golang.org/x/net/context" +) + +// NameResolver is used to implement custom name resolution +type NameResolver interface { + Resolve(ctx context.Context, name string) (context.Context, net.IP, error) +} + +// DNSResolver uses the system DNS to resolve host names +type DNSResolver struct{} + +func (d DNSResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) { + addr, err := net.ResolveIPAddr("ip", name) + if err != nil { + return ctx, nil, err + } + return ctx, addr.IP, err +} diff --git a/vendor/github.com/armon/go-socks5/ruleset.go b/vendor/github.com/armon/go-socks5/ruleset.go new file mode 100644 index 0000000..ba0e353 --- /dev/null +++ b/vendor/github.com/armon/go-socks5/ruleset.go @@ -0,0 +1,41 @@ +package socks5 + +import ( + "golang.org/x/net/context" +) + +// RuleSet is used to provide custom rules to allow or prohibit actions +type RuleSet interface { + Allow(ctx context.Context, req *Request) (context.Context, bool) +} + +// PermitAll returns a RuleSet which allows all types of connections +func PermitAll() RuleSet { + return &PermitCommand{true, true, true} +} + +// PermitNone returns a RuleSet which disallows all types of connections +func PermitNone() RuleSet { + return &PermitCommand{false, false, false} +} + +// PermitCommand is an implementation of the RuleSet which +// enables filtering supported commands +type PermitCommand struct { + EnableConnect bool + EnableBind bool + EnableAssociate bool +} + +func (p *PermitCommand) Allow(ctx context.Context, req *Request) (context.Context, bool) { + switch req.Command { + case ConnectCommand: + return ctx, p.EnableConnect + case BindCommand: + return ctx, p.EnableBind + case AssociateCommand: + return ctx, p.EnableAssociate + } + + return ctx, false +} diff --git a/vendor/github.com/armon/go-socks5/socks5.go b/vendor/github.com/armon/go-socks5/socks5.go new file mode 100644 index 0000000..a17be68 --- /dev/null +++ b/vendor/github.com/armon/go-socks5/socks5.go @@ -0,0 +1,169 @@ +package socks5 + +import ( + "bufio" + "fmt" + "log" + "net" + "os" + + "golang.org/x/net/context" +) + +const ( + socks5Version = uint8(5) +) + +// Config is used to setup and configure a Server +type Config struct { + // AuthMethods can be provided to implement custom authentication + // By default, "auth-less" mode is enabled. + // For password-based auth use UserPassAuthenticator. + AuthMethods []Authenticator + + // If provided, username/password authentication is enabled, + // by appending a UserPassAuthenticator to AuthMethods. If not provided, + // and AUthMethods is nil, then "auth-less" mode is enabled. + Credentials CredentialStore + + // Resolver can be provided to do custom name resolution. + // Defaults to DNSResolver if not provided. + Resolver NameResolver + + // Rules is provided to enable custom logic around permitting + // various commands. If not provided, PermitAll is used. + Rules RuleSet + + // Rewriter can be used to transparently rewrite addresses. + // This is invoked before the RuleSet is invoked. + // Defaults to NoRewrite. + Rewriter AddressRewriter + + // BindIP is used for bind or udp associate + BindIP net.IP + + // Logger can be used to provide a custom log target. + // Defaults to stdout. + Logger *log.Logger + + // Optional function for dialing out + Dial func(ctx context.Context, network, addr string) (net.Conn, error) +} + +// Server is reponsible for accepting connections and handling +// the details of the SOCKS5 protocol +type Server struct { + config *Config + authMethods map[uint8]Authenticator +} + +// New creates a new Server and potentially returns an error +func New(conf *Config) (*Server, error) { + // Ensure we have at least one authentication method enabled + if len(conf.AuthMethods) == 0 { + if conf.Credentials != nil { + conf.AuthMethods = []Authenticator{&UserPassAuthenticator{conf.Credentials}} + } else { + conf.AuthMethods = []Authenticator{&NoAuthAuthenticator{}} + } + } + + // Ensure we have a DNS resolver + if conf.Resolver == nil { + conf.Resolver = DNSResolver{} + } + + // Ensure we have a rule set + if conf.Rules == nil { + conf.Rules = PermitAll() + } + + // Ensure we have a log target + if conf.Logger == nil { + conf.Logger = log.New(os.Stdout, "", log.LstdFlags) + } + + server := &Server{ + config: conf, + } + + server.authMethods = make(map[uint8]Authenticator) + + for _, a := range conf.AuthMethods { + server.authMethods[a.GetCode()] = a + } + + return server, nil +} + +// ListenAndServe is used to create a listener and serve on it +func (s *Server) ListenAndServe(network, addr string) error { + l, err := net.Listen(network, addr) + if err != nil { + return err + } + return s.Serve(l) +} + +// Serve is used to serve connections from a listener +func (s *Server) Serve(l net.Listener) error { + for { + conn, err := l.Accept() + if err != nil { + return err + } + go s.ServeConn(conn) + } + return nil +} + +// ServeConn is used to serve a single connection. +func (s *Server) ServeConn(conn net.Conn) error { + defer conn.Close() + bufConn := bufio.NewReader(conn) + + // Read the version byte + version := []byte{0} + if _, err := bufConn.Read(version); err != nil { + s.config.Logger.Printf("[ERR] socks: Failed to get version byte: %v", err) + return err + } + + // Ensure we are compatible + if version[0] != socks5Version { + err := fmt.Errorf("Unsupported SOCKS version: %v", version) + s.config.Logger.Printf("[ERR] socks: %v", err) + return err + } + + // Authenticate the connection + authContext, err := s.authenticate(conn, bufConn) + if err != nil { + err = fmt.Errorf("Failed to authenticate: %v", err) + s.config.Logger.Printf("[ERR] socks: %v", err) + return err + } + + request, err := NewRequest(bufConn) + if err != nil { + if err == unrecognizedAddrType { + if err := sendReply(conn, addrTypeNotSupported, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + } + return fmt.Errorf("Failed to read destination address: %v", err) + } + request.AuthContext = authContext + if client, ok := conn.RemoteAddr().(*net.TCPAddr); ok { + request.RemoteAddr = &AddrSpec{IP: client.IP, Port: client.Port} + } + + // Process the client request + if err := s.handleRequest(request, conn); err != nil { + err = fmt.Errorf("Failed to handle request: %v", err) + s.config.Logger.Printf("[ERR] socks: %v", err) + return err + } + + return nil +} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 0000000..339177b --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 0000000..1602287 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 0000000..d7d14f8 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore new file mode 100644 index 0000000..50d95c5 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v4/.travis.yml b/vendor/github.com/cenkalti/backoff/v4/.travis.yml new file mode 100644 index 0000000..c79105c --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.13 + - 1.x + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE new file mode 100644 index 0000000..89b8179 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md new file mode 100644 index 0000000..16abdfc --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -0,0 +1,32 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. + +Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go new file mode 100644 index 0000000..3676ee4 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go new file mode 100644 index 0000000..4848233 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -0,0 +1,62 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + return b.BackOff.NextBackOff() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go new file mode 100644 index 0000000..3d34532 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -0,0 +1,158 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff returns Stop. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Stop time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop + } + return next +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v4/go.mod b/vendor/github.com/cenkalti/backoff/v4/go.mod new file mode 100644 index 0000000..f811bea --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/go.mod @@ -0,0 +1,3 @@ +module github.com/cenkalti/backoff/v4 + +go 1.13 diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go new file mode 100644 index 0000000..1ce2507 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -0,0 +1,112 @@ +package backoff + +import ( + "errors" + "time" +) + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + var err error + var next time.Duration + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { + return permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { + return cerr + } + + return err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go new file mode 100644 index 0000000..df9d68b --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -0,0 +1,97 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 0000000..8120d02 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go new file mode 100644 index 0000000..28d58ca --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -0,0 +1,38 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/cenkalti/hub/.gitignore b/vendor/github.com/cenkalti/hub/.gitignore new file mode 100644 index 0000000..0026861 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/cenkalti/hub/.travis.yml b/vendor/github.com/cenkalti/hub/.travis.yml new file mode 100644 index 0000000..b05e4c5 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/.travis.yml @@ -0,0 +1,3 @@ +language: go +go: 1.2 + diff --git a/vendor/github.com/cenkalti/hub/LICENSE b/vendor/github.com/cenkalti/hub/LICENSE new file mode 100644 index 0000000..89b8179 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/hub/README.md b/vendor/github.com/cenkalti/hub/README.md new file mode 100644 index 0000000..d3f2118 --- /dev/null +++ b/vendor/github.com/cenkalti/hub/README.md @@ -0,0 +1,5 @@ +hub +=== + +[![GoDoc](https://godoc.org/github.com/cenkalti/hub?status.png)](https://godoc.org/github.com/cenkalti/hub) +[![Build Status](https://travis-ci.org/cenkalti/hub.png)](https://travis-ci.org/cenkalti/hub) diff --git a/vendor/github.com/cenkalti/hub/hub.go b/vendor/github.com/cenkalti/hub/hub.go new file mode 100644 index 0000000..24c5efa --- /dev/null +++ b/vendor/github.com/cenkalti/hub/hub.go @@ -0,0 +1,82 @@ +// Package hub provides a simple event dispatcher for publish/subscribe pattern. +package hub + +import "sync" + +type Kind int + +// Event is an interface for published events. +type Event interface { + Kind() Kind +} + +// Hub is an event dispatcher, publishes events to the subscribers +// which are subscribed for a specific event type. +// Optimized for publish calls. +// The handlers may be called in order different than they are registered. +type Hub struct { + subscribers map[Kind][]handler + m sync.RWMutex + seq uint64 +} + +type handler struct { + f func(Event) + id uint64 +} + +// Subscribe registers f for the event of a specific kind. +func (h *Hub) Subscribe(kind Kind, f func(Event)) (cancel func()) { + var cancelled bool + h.m.Lock() + h.seq++ + id := h.seq + if h.subscribers == nil { + h.subscribers = make(map[Kind][]handler) + } + h.subscribers[kind] = append(h.subscribers[kind], handler{id: id, f: f}) + h.m.Unlock() + return func() { + h.m.Lock() + if cancelled { + h.m.Unlock() + return + } + cancelled = true + a := h.subscribers[kind] + for i, f := range a { + if f.id == id { + a[i], h.subscribers[kind] = a[len(a)-1], a[:len(a)-1] + break + } + } + if len(a) == 0 { + delete(h.subscribers, kind) + } + h.m.Unlock() + } +} + +// Publish an event to the subscribers. +func (h *Hub) Publish(e Event) { + h.m.RLock() + if handlers, ok := h.subscribers[e.Kind()]; ok { + for _, h := range handlers { + h.f(e) + } + } + h.m.RUnlock() +} + +// DefaultHub is the default Hub used by Publish and Subscribe. +var DefaultHub Hub + +// Subscribe registers f for the event of a specific kind in the DefaultHub. +func Subscribe(kind Kind, f func(Event)) (cancel func()) { + return DefaultHub.Subscribe(kind, f) +} + +// Publish an event to the subscribers in DefaultHub. +func Publish(e Event) { + DefaultHub.Publish(e) +} diff --git a/vendor/github.com/cenkalti/rpc2/.gitignore b/vendor/github.com/cenkalti/rpc2/.gitignore new file mode 100644 index 0000000..8365624 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/cenkalti/rpc2/.travis.yml b/vendor/github.com/cenkalti/rpc2/.travis.yml new file mode 100644 index 0000000..ae8233c --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.15 + - tip + +arch: + - amd64 + - ppc64le diff --git a/vendor/github.com/cenkalti/rpc2/LICENSE b/vendor/github.com/cenkalti/rpc2/LICENSE new file mode 100644 index 0000000..d565b1b --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/cenkalti/rpc2/README.md b/vendor/github.com/cenkalti/rpc2/README.md new file mode 100644 index 0000000..3dffd26 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/README.md @@ -0,0 +1,82 @@ +rpc2 +==== + +[![GoDoc](https://godoc.org/github.com/cenkalti/rpc2?status.png)](https://godoc.org/github.com/cenkalti/rpc2) +[![Build Status](https://travis-ci.org/cenkalti/rpc2.png)](https://travis-ci.org/cenkalti/rpc2) + +rpc2 is a fork of net/rpc package in the standard library. +The main goal is to add bi-directional support to calls. +That means server can call the methods of client. +This is not possible with net/rpc package. +In order to do this it adds a `*Client` argument to method signatures. + +Install +-------- + + go get github.com/cenkalti/rpc2 + +Example server +--------------- + +```go +package main + +import ( + "fmt" + "net" + + "github.com/cenkalti/rpc2" +) + +type Args struct{ A, B int } +type Reply int + +func main() { + srv := rpc2.NewServer() + srv.Handle("add", func(client *rpc2.Client, args *Args, reply *Reply) error { + + // Reversed call (server to client) + var rep Reply + client.Call("mult", Args{2, 3}, &rep) + fmt.Println("mult result:", rep) + + *reply = Reply(args.A + args.B) + return nil + }) + + lis, _ := net.Listen("tcp", "127.0.0.1:5000") + srv.Accept(lis) +} +``` + +Example Client +--------------- + +```go +package main + +import ( + "fmt" + "net" + + "github.com/cenkalti/rpc2" +) + +type Args struct{ A, B int } +type Reply int + +func main() { + conn, _ := net.Dial("tcp", "127.0.0.1:5000") + + clt := rpc2.NewClient(conn) + clt.Handle("mult", func(client *rpc2.Client, args *Args, reply *Reply) error { + *reply = Reply(args.A * args.B) + return nil + }) + go clt.Run() + + var rep Reply + clt.Call("add", Args{1, 2}, &rep) + fmt.Println("add result:", rep) +} +``` diff --git a/vendor/github.com/cenkalti/rpc2/client.go b/vendor/github.com/cenkalti/rpc2/client.go new file mode 100644 index 0000000..cc99569 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/client.go @@ -0,0 +1,364 @@ +// Package rpc2 provides bi-directional RPC client and server similar to net/rpc. +package rpc2 + +import ( + "context" + "errors" + "io" + "log" + "reflect" + "sync" +) + +// Client represents an RPC Client. +// There may be multiple outstanding Calls associated +// with a single Client, and a Client may be used by +// multiple goroutines simultaneously. +type Client struct { + mutex sync.Mutex // protects pending, seq, request + sending sync.Mutex + request Request // temp area used in send() + seq uint64 + pending map[uint64]*Call + closing bool + shutdown bool + server bool + codec Codec + handlers map[string]*handler + disconnect chan struct{} + State *State // additional information to associate with client + blocking bool // whether to block request handling +} + +// NewClient returns a new Client to handle requests to the +// set of services at the other end of the connection. +// It adds a buffer to the write side of the connection so +// the header and payload are sent as a unit. +func NewClient(conn io.ReadWriteCloser) *Client { + return NewClientWithCodec(NewGobCodec(conn)) +} + +// NewClientWithCodec is like NewClient but uses the specified +// codec to encode requests and decode responses. +func NewClientWithCodec(codec Codec) *Client { + return &Client{ + codec: codec, + pending: make(map[uint64]*Call), + handlers: make(map[string]*handler), + disconnect: make(chan struct{}), + seq: 1, // 0 means notification. + } +} + +// SetBlocking puts the client in blocking mode. +// In blocking mode, received requests are processes synchronously. +// If you have methods that may take a long time, other subsequent requests may time out. +func (c *Client) SetBlocking(blocking bool) { + c.blocking = blocking +} + +// Run the client's read loop. +// You must run this method before calling any methods on the server. +func (c *Client) Run() { + c.readLoop() +} + +// DisconnectNotify returns a channel that is closed +// when the client connection has gone away. +func (c *Client) DisconnectNotify() chan struct{} { + return c.disconnect +} + +// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics. +func (c *Client) Handle(method string, handlerFunc interface{}) { + addHandler(c.handlers, method, handlerFunc) +} + +// readLoop reads messages from codec. +// It reads a reqeust or a response to the previous request. +// If the message is request, calls the handler function. +// If the message is response, sends the reply to the associated call. +func (c *Client) readLoop() { + var err error + var req Request + var resp Response + for err == nil { + req = Request{} + resp = Response{} + if err = c.codec.ReadHeader(&req, &resp); err != nil { + break + } + + if req.Method != "" { + // request comes to server + if err = c.readRequest(&req); err != nil { + debugln("rpc2: error reading request:", err.Error()) + } + } else { + // response comes to client + if err = c.readResponse(&resp); err != nil { + debugln("rpc2: error reading response:", err.Error()) + } + } + } + // Terminate pending calls. + c.sending.Lock() + c.mutex.Lock() + c.shutdown = true + closing := c.closing + if err == io.EOF { + if closing { + err = ErrShutdown + } else { + err = io.ErrUnexpectedEOF + } + } + for _, call := range c.pending { + call.Error = err + call.done() + } + c.mutex.Unlock() + c.sending.Unlock() + if err != io.EOF && !closing && !c.server { + debugln("rpc2: client protocol error:", err) + } + close(c.disconnect) + if !closing { + c.codec.Close() + } +} + +func (c *Client) handleRequest(req Request, method *handler, argv reflect.Value) { + // Invoke the method, providing a new value for the reply. + replyv := reflect.New(method.replyType.Elem()) + + returnValues := method.fn.Call([]reflect.Value{reflect.ValueOf(c), argv, replyv}) + + // Do not send response if request is a notification. + if req.Seq == 0 { + return + } + + // The return value for the method is an error. + errInter := returnValues[0].Interface() + errmsg := "" + if errInter != nil { + errmsg = errInter.(error).Error() + } + resp := &Response{ + Seq: req.Seq, + Error: errmsg, + } + if err := c.codec.WriteResponse(resp, replyv.Interface()); err != nil { + debugln("rpc2: error writing response:", err.Error()) + } +} + +func (c *Client) readRequest(req *Request) error { + method, ok := c.handlers[req.Method] + if !ok { + resp := &Response{ + Seq: req.Seq, + Error: "rpc2: can't find method " + req.Method, + } + return c.codec.WriteResponse(resp, resp) + } + + // Decode the argument value. + var argv reflect.Value + argIsValue := false // if true, need to indirect before calling. + if method.argType.Kind() == reflect.Ptr { + argv = reflect.New(method.argType.Elem()) + } else { + argv = reflect.New(method.argType) + argIsValue = true + } + // argv guaranteed to be a pointer now. + if err := c.codec.ReadRequestBody(argv.Interface()); err != nil { + return err + } + if argIsValue { + argv = argv.Elem() + } + + if c.blocking { + c.handleRequest(*req, method, argv) + } else { + go c.handleRequest(*req, method, argv) + } + + return nil +} + +func (c *Client) readResponse(resp *Response) error { + seq := resp.Seq + c.mutex.Lock() + call := c.pending[seq] + delete(c.pending, seq) + c.mutex.Unlock() + + var err error + switch { + case call == nil: + // We've got no pending call. That usually means that + // WriteRequest partially failed, and call was already + // removed; response is a server telling us about an + // error reading request body. We should still attempt + // to read error body, but there's no one to give it to. + err = c.codec.ReadResponseBody(nil) + if err != nil { + err = errors.New("reading error body: " + err.Error()) + } + case resp.Error != "": + // We've got an error response. Give this to the request; + // any subsequent requests will get the ReadResponseBody + // error if there is one. + call.Error = ServerError(resp.Error) + err = c.codec.ReadResponseBody(nil) + if err != nil { + err = errors.New("reading error body: " + err.Error()) + } + call.done() + default: + err = c.codec.ReadResponseBody(call.Reply) + if err != nil { + call.Error = errors.New("reading body " + err.Error()) + } + call.done() + } + + return err +} + +// Close waits for active calls to finish and closes the codec. +func (c *Client) Close() error { + c.mutex.Lock() + if c.shutdown || c.closing { + c.mutex.Unlock() + return ErrShutdown + } + c.closing = true + c.mutex.Unlock() + return c.codec.Close() +} + +// Go invokes the function asynchronously. It returns the Call structure representing +// the invocation. The done channel will signal when the call is complete by returning +// the same Call object. If done is nil, Go will allocate a new channel. +// If non-nil, done must be buffered or Go will deliberately crash. +func (c *Client) Go(method string, args interface{}, reply interface{}, done chan *Call) *Call { + call := new(Call) + call.Method = method + call.Args = args + call.Reply = reply + if done == nil { + done = make(chan *Call, 10) // buffered. + } else { + // If caller passes done != nil, it must arrange that + // done has enough buffer for the number of simultaneous + // RPCs that will be using that channel. If the channel + // is totally unbuffered, it's best not to run at all. + if cap(done) == 0 { + log.Panic("rpc2: done channel is unbuffered") + } + } + call.Done = done + c.send(call) + return call +} + +// CallWithContext invokes the named function, waits for it to complete, and +// returns its error status, or an error from Context timeout. +func (c *Client) CallWithContext(ctx context.Context, method string, args interface{}, reply interface{}) error { + call := c.Go(method, args, reply, make(chan *Call, 1)) + select { + case <-call.Done: + return call.Error + case <-ctx.Done(): + return ctx.Err() + } + return nil +} + +// Call invokes the named function, waits for it to complete, and returns its error status. +func (c *Client) Call(method string, args interface{}, reply interface{}) error { + return c.CallWithContext(context.Background(), method, args, reply) +} + +func (call *Call) done() { + select { + case call.Done <- call: + // ok + default: + // We don't want to block here. It is the caller's responsibility to make + // sure the channel has enough buffer space. See comment in Go(). + debugln("rpc2: discarding Call reply due to insufficient Done chan capacity") + } +} + +// ServerError represents an error that has been returned from +// the remote side of the RPC connection. +type ServerError string + +func (e ServerError) Error() string { + return string(e) +} + +// ErrShutdown is returned when the connection is closing or closed. +var ErrShutdown = errors.New("connection is shut down") + +// Call represents an active RPC. +type Call struct { + Method string // The name of the service and method to call. + Args interface{} // The argument to the function (*struct). + Reply interface{} // The reply from the function (*struct). + Error error // After completion, the error status. + Done chan *Call // Strobes when call is complete. +} + +func (c *Client) send(call *Call) { + c.sending.Lock() + defer c.sending.Unlock() + + // Register this call. + c.mutex.Lock() + if c.shutdown || c.closing { + call.Error = ErrShutdown + c.mutex.Unlock() + call.done() + return + } + seq := c.seq + c.seq++ + c.pending[seq] = call + c.mutex.Unlock() + + // Encode and send the request. + c.request.Seq = seq + c.request.Method = call.Method + err := c.codec.WriteRequest(&c.request, call.Args) + if err != nil { + c.mutex.Lock() + call = c.pending[seq] + delete(c.pending, seq) + c.mutex.Unlock() + if call != nil { + call.Error = err + call.done() + } + } +} + +// Notify sends a request to the receiver but does not wait for a return value. +func (c *Client) Notify(method string, args interface{}) error { + c.sending.Lock() + defer c.sending.Unlock() + + if c.shutdown || c.closing { + return ErrShutdown + } + + c.request.Seq = 0 + c.request.Method = method + return c.codec.WriteRequest(&c.request, args) +} diff --git a/vendor/github.com/cenkalti/rpc2/codec.go b/vendor/github.com/cenkalti/rpc2/codec.go new file mode 100644 index 0000000..b097d9a --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/codec.go @@ -0,0 +1,125 @@ +package rpc2 + +import ( + "bufio" + "encoding/gob" + "io" + "sync" +) + +// A Codec implements reading and writing of RPC requests and responses. +// The client calls ReadHeader to read a message header. +// The implementation must populate either Request or Response argument. +// Depending on which argument is populated, ReadRequestBody or +// ReadResponseBody is called right after ReadHeader. +// ReadRequestBody and ReadResponseBody may be called with a nil +// argument to force the body to be read and then discarded. +type Codec interface { + // ReadHeader must read a message and populate either the request + // or the response by inspecting the incoming message. + ReadHeader(*Request, *Response) error + + // ReadRequestBody into args argument of handler function. + ReadRequestBody(interface{}) error + + // ReadResponseBody into reply argument of handler function. + ReadResponseBody(interface{}) error + + // WriteRequest must be safe for concurrent use by multiple goroutines. + WriteRequest(*Request, interface{}) error + + // WriteResponse must be safe for concurrent use by multiple goroutines. + WriteResponse(*Response, interface{}) error + + // Close is called when client/server finished with the connection. + Close() error +} + +// Request is a header written before every RPC call. +type Request struct { + Seq uint64 // sequence number chosen by client + Method string +} + +// Response is a header written before every RPC return. +type Response struct { + Seq uint64 // echoes that of the request + Error string // error, if any. +} + +type gobCodec struct { + rwc io.ReadWriteCloser + dec *gob.Decoder + enc *gob.Encoder + encBuf *bufio.Writer + mutex sync.Mutex +} + +type message struct { + Seq uint64 + Method string + Error string +} + +// NewGobCodec returns a new rpc2.Codec using gob encoding/decoding on conn. +func NewGobCodec(conn io.ReadWriteCloser) Codec { + buf := bufio.NewWriter(conn) + return &gobCodec{ + rwc: conn, + dec: gob.NewDecoder(conn), + enc: gob.NewEncoder(buf), + encBuf: buf, + } +} + +func (c *gobCodec) ReadHeader(req *Request, resp *Response) error { + var msg message + if err := c.dec.Decode(&msg); err != nil { + return err + } + + if msg.Method != "" { + req.Seq = msg.Seq + req.Method = msg.Method + } else { + resp.Seq = msg.Seq + resp.Error = msg.Error + } + return nil +} + +func (c *gobCodec) ReadRequestBody(body interface{}) error { + return c.dec.Decode(body) +} + +func (c *gobCodec) ReadResponseBody(body interface{}) error { + return c.dec.Decode(body) +} + +func (c *gobCodec) WriteRequest(r *Request, body interface{}) (err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if err = c.enc.Encode(r); err != nil { + return + } + if err = c.enc.Encode(body); err != nil { + return + } + return c.encBuf.Flush() +} + +func (c *gobCodec) WriteResponse(r *Response, body interface{}) (err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if err = c.enc.Encode(r); err != nil { + return + } + if err = c.enc.Encode(body); err != nil { + return + } + return c.encBuf.Flush() +} + +func (c *gobCodec) Close() error { + return c.rwc.Close() +} diff --git a/vendor/github.com/cenkalti/rpc2/debug.go b/vendor/github.com/cenkalti/rpc2/debug.go new file mode 100644 index 0000000..ec1b625 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/debug.go @@ -0,0 +1,12 @@ +package rpc2 + +import "log" + +// DebugLog controls the printing of internal and I/O errors. +var DebugLog = false + +func debugln(v ...interface{}) { + if DebugLog { + log.Println(v...) + } +} diff --git a/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go new file mode 100644 index 0000000..87e1168 --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/jsonrpc/jsonrpc.go @@ -0,0 +1,226 @@ +// Package jsonrpc implements a JSON-RPC ClientCodec and ServerCodec for the rpc2 package. +// +// Beside struct types, JSONCodec allows using positional arguments. +// Use []interface{} as the type of argument when sending and receiving methods. +// +// Positional arguments example: +// server.Handle("add", func(client *rpc2.Client, args []interface{}, result *float64) error { +// *result = args[0].(float64) + args[1].(float64) +// return nil +// }) +// +// var result float64 +// client.Call("add", []interface{}{1, 2}, &result) +// +package jsonrpc + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "sync" + + "github.com/cenkalti/rpc2" +) + +type jsonCodec struct { + dec *json.Decoder // for reading JSON values + enc *json.Encoder // for writing JSON values + c io.Closer + + // temporary work space + msg message + serverRequest serverRequest + clientResponse clientResponse + + // JSON-RPC clients can use arbitrary json values as request IDs. + // Package rpc expects uint64 request IDs. + // We assign uint64 sequence numbers to incoming requests + // but save the original request ID in the pending map. + // When rpc responds, we use the sequence number in + // the response to find the original request ID. + mutex sync.Mutex // protects seq, pending + pending map[uint64]*json.RawMessage + seq uint64 +} + +// NewJSONCodec returns a new rpc2.Codec using JSON-RPC on conn. +func NewJSONCodec(conn io.ReadWriteCloser) rpc2.Codec { + return &jsonCodec{ + dec: json.NewDecoder(conn), + enc: json.NewEncoder(conn), + c: conn, + pending: make(map[uint64]*json.RawMessage), + } +} + +// serverRequest and clientResponse combined +type message struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params"` + Id *json.RawMessage `json:"id"` + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` +} + +// Unmarshal to +type serverRequest struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params"` + Id *json.RawMessage `json:"id"` +} +type clientResponse struct { + Id uint64 `json:"id"` + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` +} + +// to Marshal +type serverResponse struct { + Id *json.RawMessage `json:"id"` + Result interface{} `json:"result"` + Error interface{} `json:"error"` +} +type clientRequest struct { + Method string `json:"method"` + Params interface{} `json:"params"` + Id *uint64 `json:"id"` +} + +func (c *jsonCodec) ReadHeader(req *rpc2.Request, resp *rpc2.Response) error { + c.msg = message{} + if err := c.dec.Decode(&c.msg); err != nil { + return err + } + + if c.msg.Method != "" { + // request comes to server + c.serverRequest.Id = c.msg.Id + c.serverRequest.Method = c.msg.Method + c.serverRequest.Params = c.msg.Params + + req.Method = c.serverRequest.Method + + // JSON request id can be any JSON value; + // RPC package expects uint64. Translate to + // internal uint64 and save JSON on the side. + if c.serverRequest.Id == nil { + // Notification + } else { + c.mutex.Lock() + c.seq++ + c.pending[c.seq] = c.serverRequest.Id + c.serverRequest.Id = nil + req.Seq = c.seq + c.mutex.Unlock() + } + } else { + // response comes to client + err := json.Unmarshal(*c.msg.Id, &c.clientResponse.Id) + if err != nil { + return err + } + c.clientResponse.Result = c.msg.Result + c.clientResponse.Error = c.msg.Error + + resp.Error = "" + resp.Seq = c.clientResponse.Id + if c.clientResponse.Error != nil || c.clientResponse.Result == nil { + x, ok := c.clientResponse.Error.(string) + if !ok { + return fmt.Errorf("invalid error %v", c.clientResponse.Error) + } + if x == "" { + x = "unspecified error" + } + resp.Error = x + } + } + return nil +} + +var errMissingParams = errors.New("jsonrpc: request body missing params") + +func (c *jsonCodec) ReadRequestBody(x interface{}) error { + if x == nil { + return nil + } + if c.serverRequest.Params == nil { + return errMissingParams + } + + var err error + + // Check if x points to a slice of any kind + rt := reflect.TypeOf(x) + if rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Slice { + // If it's a slice, unmarshal as is + err = json.Unmarshal(*c.serverRequest.Params, x) + } else { + // Anything else unmarshal into a slice containing x + params := &[]interface{}{x} + err = json.Unmarshal(*c.serverRequest.Params, params) + } + + return err +} + +func (c *jsonCodec) ReadResponseBody(x interface{}) error { + if x == nil { + return nil + } + return json.Unmarshal(*c.clientResponse.Result, x) +} + +func (c *jsonCodec) WriteRequest(r *rpc2.Request, param interface{}) error { + req := &clientRequest{Method: r.Method} + + // Check if param is a slice of any kind + if param != nil && reflect.TypeOf(param).Kind() == reflect.Slice { + // If it's a slice, leave as is + req.Params = param + } else { + // Put anything else into a slice + req.Params = []interface{}{param} + } + + if r.Seq == 0 { + // Notification + req.Id = nil + } else { + seq := r.Seq + req.Id = &seq + } + return c.enc.Encode(req) +} + +var null = json.RawMessage([]byte("null")) + +func (c *jsonCodec) WriteResponse(r *rpc2.Response, x interface{}) error { + c.mutex.Lock() + b, ok := c.pending[r.Seq] + if !ok { + c.mutex.Unlock() + return errors.New("invalid sequence number in response") + } + delete(c.pending, r.Seq) + c.mutex.Unlock() + + if b == nil { + // Invalid request so no id. Use JSON null. + b = &null + } + resp := serverResponse{Id: b} + if r.Error == "" { + resp.Result = x + } else { + resp.Error = r.Error + } + return c.enc.Encode(resp) +} + +func (c *jsonCodec) Close() error { + return c.c.Close() +} diff --git a/vendor/github.com/cenkalti/rpc2/server.go b/vendor/github.com/cenkalti/rpc2/server.go new file mode 100644 index 0000000..2a5be7e --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/server.go @@ -0,0 +1,181 @@ +package rpc2 + +import ( + "io" + "log" + "net" + "reflect" + "unicode" + "unicode/utf8" + + "github.com/cenkalti/hub" +) + +// Precompute the reflect type for error. Can't use error directly +// because Typeof takes an empty interface value. This is annoying. +var typeOfError = reflect.TypeOf((*error)(nil)).Elem() +var typeOfClient = reflect.TypeOf((*Client)(nil)) + +const ( + clientConnected hub.Kind = iota + clientDisconnected +) + +// Server responds to RPC requests made by Client. +type Server struct { + handlers map[string]*handler + eventHub *hub.Hub +} + +type handler struct { + fn reflect.Value + argType reflect.Type + replyType reflect.Type +} + +type connectionEvent struct { + Client *Client +} + +type disconnectionEvent struct { + Client *Client +} + +func (connectionEvent) Kind() hub.Kind { return clientConnected } +func (disconnectionEvent) Kind() hub.Kind { return clientDisconnected } + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + handlers: make(map[string]*handler), + eventHub: &hub.Hub{}, + } +} + +// Handle registers the handler function for the given method. If a handler already exists for method, Handle panics. +func (s *Server) Handle(method string, handlerFunc interface{}) { + addHandler(s.handlers, method, handlerFunc) +} + +func addHandler(handlers map[string]*handler, mname string, handlerFunc interface{}) { + if _, ok := handlers[mname]; ok { + panic("rpc2: multiple registrations for " + mname) + } + + method := reflect.ValueOf(handlerFunc) + mtype := method.Type() + // Method needs three ins: *client, *args, *reply. + if mtype.NumIn() != 3 { + log.Panicln("method", mname, "has wrong number of ins:", mtype.NumIn()) + } + // First arg must be a pointer to rpc2.Client. + clientType := mtype.In(0) + if clientType.Kind() != reflect.Ptr { + log.Panicln("method", mname, "client type not a pointer:", clientType) + } + if clientType != typeOfClient { + log.Panicln("method", mname, "first argument", clientType.String(), "not *rpc2.Client") + } + // Second arg need not be a pointer. + argType := mtype.In(1) + if !isExportedOrBuiltinType(argType) { + log.Panicln(mname, "argument type not exported:", argType) + } + // Third arg must be a pointer. + replyType := mtype.In(2) + if replyType.Kind() != reflect.Ptr { + log.Panicln("method", mname, "reply type not a pointer:", replyType) + } + // Reply type must be exported. + if !isExportedOrBuiltinType(replyType) { + log.Panicln("method", mname, "reply type not exported:", replyType) + } + // Method needs one out. + if mtype.NumOut() != 1 { + log.Panicln("method", mname, "has wrong number of outs:", mtype.NumOut()) + } + // The return type of the method must be error. + if returnType := mtype.Out(0); returnType != typeOfError { + log.Panicln("method", mname, "returns", returnType.String(), "not error") + } + handlers[mname] = &handler{ + fn: method, + argType: argType, + replyType: replyType, + } +} + +// Is this type exported or a builtin? +func isExportedOrBuiltinType(t reflect.Type) bool { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + // PkgPath will be non-empty even for an exported type, + // so we need to check the type name as well. + return isExported(t.Name()) || t.PkgPath() == "" +} + +// Is this an exported - upper case - name? +func isExported(name string) bool { + rune, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(rune) +} + +// OnConnect registers a function to run when a client connects. +func (s *Server) OnConnect(f func(*Client)) { + s.eventHub.Subscribe(clientConnected, func(e hub.Event) { + go f(e.(connectionEvent).Client) + }) +} + +// OnDisconnect registers a function to run when a client disconnects. +func (s *Server) OnDisconnect(f func(*Client)) { + s.eventHub.Subscribe(clientDisconnected, func(e hub.Event) { + go f(e.(disconnectionEvent).Client) + }) +} + +// Accept accepts connections on the listener and serves requests +// for each incoming connection. Accept blocks; the caller typically +// invokes it in a go statement. +func (s *Server) Accept(lis net.Listener) { + for { + conn, err := lis.Accept() + if err != nil { + log.Print("rpc.Serve: accept:", err.Error()) + return + } + go s.ServeConn(conn) + } +} + +// ServeConn runs the server on a single connection. +// ServeConn blocks, serving the connection until the client hangs up. +// The caller typically invokes ServeConn in a go statement. +// ServeConn uses the gob wire format (see package gob) on the +// connection. To use an alternate codec, use ServeCodec. +func (s *Server) ServeConn(conn io.ReadWriteCloser) { + s.ServeCodec(NewGobCodec(conn)) +} + +// ServeCodec is like ServeConn but uses the specified codec to +// decode requests and encode responses. +func (s *Server) ServeCodec(codec Codec) { + s.ServeCodecWithState(codec, NewState()) +} + +// ServeCodecWithState is like ServeCodec but also gives the ability to +// associate a state variable with the client that persists across RPC calls. +func (s *Server) ServeCodecWithState(codec Codec, state *State) { + defer codec.Close() + + // Client also handles the incoming connections. + c := NewClientWithCodec(codec) + c.server = true + c.handlers = s.handlers + c.State = state + + s.eventHub.Publish(connectionEvent{c}) + c.Run() + s.eventHub.Publish(disconnectionEvent{c}) +} diff --git a/vendor/github.com/cenkalti/rpc2/state.go b/vendor/github.com/cenkalti/rpc2/state.go new file mode 100644 index 0000000..7a4f23e --- /dev/null +++ b/vendor/github.com/cenkalti/rpc2/state.go @@ -0,0 +1,25 @@ +package rpc2 + +import "sync" + +type State struct { + store map[string]interface{} + m sync.RWMutex +} + +func NewState() *State { + return &State{store: make(map[string]interface{})} +} + +func (s *State) Get(key string) (value interface{}, ok bool) { + s.m.RLock() + value, ok = s.store[key] + s.m.RUnlock() + return +} + +func (s *State) Set(key string, value interface{}) { + s.m.Lock() + s.store[key] = value + s.m.Unlock() +} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml new file mode 100644 index 0000000..c516ea8 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - "1.x" + - master +env: + - TAGS="" + - TAGS="-tags purego" +script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt new file mode 100644 index 0000000..24b5306 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md new file mode 100644 index 0000000..2fd8693 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -0,0 +1,67 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod new file mode 100644 index 0000000..49f6760 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/cespare/xxhash/v2 + +go 1.11 diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go new file mode 100644 index 0000000..db0b35f --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -0,0 +1,236 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + b = b[len(d.mem):] + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go new file mode 100644 index 0000000..ad14b80 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go @@ -0,0 +1,13 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s new file mode 100644 index 0000000..d580e32 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -0,0 +1,215 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is CX minus the old base pointer. + SUBQ b_base+8(FP), CX + MOVQ CX, ret+32(FP) + + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go new file mode 100644 index 0000000..4a5a821 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -0,0 +1,76 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go new file mode 100644 index 0000000..fc9bea7 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -0,0 +1,15 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go new file mode 100644 index 0000000..53bf76e --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -0,0 +1,46 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "reflect" + "unsafe" +) + +// Notes: +// +// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ +// for some discussion about these unsafe conversions. +// +// In the future it's possible that compiler optimizations will make these +// unsafe operations unnecessary: https://golang.org/issue/2205. +// +// Both of these wrapper functions still incur function call overhead since they +// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write +// for strings to squeeze out a bit more speed. Mid-stack inlining should +// eventually fix this. + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +func Sum64String(s string) uint64 { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return Sum64(b) +} + +// WriteString adds more data to d. It always returns len(s), nil. +// It may be faster than Write([]byte(s)) by avoiding a copy. +func (d *Digest) WriteString(s string) (n int, err error) { + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return d.Write(b) +} diff --git a/vendor/github.com/chzyer/readline/.gitignore b/vendor/github.com/chzyer/readline/.gitignore new file mode 100644 index 0000000..a3062be --- /dev/null +++ b/vendor/github.com/chzyer/readline/.gitignore @@ -0,0 +1 @@ +.vscode/* diff --git a/vendor/github.com/chzyer/readline/.travis.yml b/vendor/github.com/chzyer/readline/.travis.yml new file mode 100644 index 0000000..9c35955 --- /dev/null +++ b/vendor/github.com/chzyer/readline/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - 1.x +script: + - GOOS=windows go install github.com/chzyer/readline/example/... + - GOOS=linux go install github.com/chzyer/readline/example/... + - GOOS=darwin go install github.com/chzyer/readline/example/... + - go test -race -v diff --git a/vendor/github.com/chzyer/readline/CHANGELOG.md b/vendor/github.com/chzyer/readline/CHANGELOG.md new file mode 100644 index 0000000..14ff5be --- /dev/null +++ b/vendor/github.com/chzyer/readline/CHANGELOG.md @@ -0,0 +1,58 @@ +# ChangeLog + +### 1.4 - 2016-07-25 + +* [#60][60] Support dynamic autocompletion +* Fix ANSI parser on Windows +* Fix wrong column width in complete mode on Windows +* Remove dependent package "golang.org/x/crypto/ssh/terminal" + +### 1.3 - 2016-05-09 + +* [#38][38] add SetChildren for prefix completer interface +* [#42][42] improve multiple lines compatibility +* [#43][43] remove sub-package(runes) for gopkg compatibility +* [#46][46] Auto complete with space prefixed line +* [#48][48] support suspend process (ctrl+Z) +* [#49][49] fix bug that check equals with previous command +* [#53][53] Fix bug which causes integer divide by zero panicking when input buffer is empty + +### 1.2 - 2016-03-05 + +* Add a demo for checking password strength [example/readline-pass-strength](https://github.com/chzyer/readline/blob/master/example/readline-pass-strength/readline-pass-strength.go), , written by [@sahib](https://github.com/sahib) +* [#23][23], support stdin remapping +* [#27][27], add a `UniqueEditLine` to `Config`, which will erase the editing line after user submited it, usually use in IM. +* Add a demo for multiline [example/readline-multiline](https://github.com/chzyer/readline/blob/master/example/readline-multiline/readline-multiline.go) which can submit one SQL by multiple lines. +* Supports performs even stdin/stdout is not a tty. +* Add a new simple apis for single instance, check by [here](https://github.com/chzyer/readline/blob/master/std.go). It need to save history manually if using this api. +* [#28][28], fixes the history is not working as expected. +* [#33][33], vim mode now support `c`, `d`, `x (delete character)`, `r (replace character)` + +### 1.1 - 2015-11-20 + +* [#12][12] Add support for key ``/``/`` +* Only enter raw mode as needed (calling `Readline()`), program will receive signal(e.g. Ctrl+C) if not interact with `readline`. +* Bugs fixed for `PrefixCompleter` +* Press `Ctrl+D` in empty line will cause `io.EOF` in error, Press `Ctrl+C` in anytime will cause `ErrInterrupt` instead of `io.EOF`, this will privodes a shell-like user experience. +* Customable Interrupt/EOF prompt in `Config` +* [#17][17] Change atomic package to use 32bit function to let it runnable on arm 32bit devices +* Provides a new password user experience(`readline.ReadPasswordEx()`). + +### 1.0 - 2015-10-14 + +* Initial public release. + +[12]: https://github.com/chzyer/readline/pull/12 +[17]: https://github.com/chzyer/readline/pull/17 +[23]: https://github.com/chzyer/readline/pull/23 +[27]: https://github.com/chzyer/readline/pull/27 +[28]: https://github.com/chzyer/readline/pull/28 +[33]: https://github.com/chzyer/readline/pull/33 +[38]: https://github.com/chzyer/readline/pull/38 +[42]: https://github.com/chzyer/readline/pull/42 +[43]: https://github.com/chzyer/readline/pull/43 +[46]: https://github.com/chzyer/readline/pull/46 +[48]: https://github.com/chzyer/readline/pull/48 +[49]: https://github.com/chzyer/readline/pull/49 +[53]: https://github.com/chzyer/readline/pull/53 +[60]: https://github.com/chzyer/readline/pull/60 diff --git a/vendor/github.com/chzyer/readline/LICENSE b/vendor/github.com/chzyer/readline/LICENSE new file mode 100644 index 0000000..c9afab3 --- /dev/null +++ b/vendor/github.com/chzyer/readline/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Chzyer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md new file mode 100644 index 0000000..fab974b --- /dev/null +++ b/vendor/github.com/chzyer/readline/README.md @@ -0,0 +1,114 @@ +[![Build Status](https://travis-ci.org/chzyer/readline.svg?branch=master)](https://travis-ci.org/chzyer/readline) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.md) +[![Version](https://img.shields.io/github/tag/chzyer/readline.svg)](https://github.com/chzyer/readline/releases) +[![GoDoc](https://godoc.org/github.com/chzyer/readline?status.svg)](https://godoc.org/github.com/chzyer/readline) +[![OpenCollective](https://opencollective.com/readline/badge/backers.svg)](#backers) +[![OpenCollective](https://opencollective.com/readline/badge/sponsors.svg)](#sponsors) + +

+ + + +

+ +A powerful readline library in `Linux` `macOS` `Windows` `Solaris` + +## Guide + +* [Demo](example/readline-demo/readline-demo.go) +* [Shortcut](doc/shortcut.md) + +## Repos using readline + +[![cockroachdb](https://img.shields.io/github/stars/cockroachdb/cockroach.svg?label=cockroachdb/cockroach)](https://github.com/cockroachdb/cockroach) +[![robertkrimen/otto](https://img.shields.io/github/stars/robertkrimen/otto.svg?label=robertkrimen/otto)](https://github.com/robertkrimen/otto) +[![empire](https://img.shields.io/github/stars/remind101/empire.svg?label=remind101/empire)](https://github.com/remind101/empire) +[![mehrdadrad/mylg](https://img.shields.io/github/stars/mehrdadrad/mylg.svg?label=mehrdadrad/mylg)](https://github.com/mehrdadrad/mylg) +[![knq/usql](https://img.shields.io/github/stars/knq/usql.svg?label=knq/usql)](https://github.com/knq/usql) +[![youtube/doorman](https://img.shields.io/github/stars/youtube/doorman.svg?label=youtube/doorman)](https://github.com/youtube/doorman) +[![bom-d-van/harp](https://img.shields.io/github/stars/bom-d-van/harp.svg?label=bom-d-van/harp)](https://github.com/bom-d-van/harp) +[![abiosoft/ishell](https://img.shields.io/github/stars/abiosoft/ishell.svg?label=abiosoft/ishell)](https://github.com/abiosoft/ishell) +[![Netflix/hal-9001](https://img.shields.io/github/stars/Netflix/hal-9001.svg?label=Netflix/hal-9001)](https://github.com/Netflix/hal-9001) +[![docker/go-p9p](https://img.shields.io/github/stars/docker/go-p9p.svg?label=docker/go-p9p)](https://github.com/docker/go-p9p) + + +## Feedback + +If you have any questions, please submit a github issue and any pull requests is welcomed :) + +* [https://twitter.com/chzyer](https://twitter.com/chzyer) +* [http://weibo.com/2145262190](http://weibo.com/2145262190) + + +## Backers + +Love Readline? Help me keep it alive by donating funds to cover project expenses!
+[[Become a backer](https://opencollective.com/readline#backer)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Sponsors + +Become a sponsor and get your logo here on our Github page. [[Become a sponsor](https://opencollective.com/readline#sponsor)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/chzyer/readline/ansi_windows.go b/vendor/github.com/chzyer/readline/ansi_windows.go new file mode 100644 index 0000000..63b908c --- /dev/null +++ b/vendor/github.com/chzyer/readline/ansi_windows.go @@ -0,0 +1,249 @@ +// +build windows + +package readline + +import ( + "bufio" + "io" + "strconv" + "strings" + "sync" + "unicode/utf8" + "unsafe" +) + +const ( + _ = uint16(0) + COLOR_FBLUE = 0x0001 + COLOR_FGREEN = 0x0002 + COLOR_FRED = 0x0004 + COLOR_FINTENSITY = 0x0008 + + COLOR_BBLUE = 0x0010 + COLOR_BGREEN = 0x0020 + COLOR_BRED = 0x0040 + COLOR_BINTENSITY = 0x0080 + + COMMON_LVB_UNDERSCORE = 0x8000 + COMMON_LVB_BOLD = 0x0007 +) + +var ColorTableFg = []word{ + 0, // 30: Black + COLOR_FRED, // 31: Red + COLOR_FGREEN, // 32: Green + COLOR_FRED | COLOR_FGREEN, // 33: Yellow + COLOR_FBLUE, // 34: Blue + COLOR_FRED | COLOR_FBLUE, // 35: Magenta + COLOR_FGREEN | COLOR_FBLUE, // 36: Cyan + COLOR_FRED | COLOR_FBLUE | COLOR_FGREEN, // 37: White +} + +var ColorTableBg = []word{ + 0, // 40: Black + COLOR_BRED, // 41: Red + COLOR_BGREEN, // 42: Green + COLOR_BRED | COLOR_BGREEN, // 43: Yellow + COLOR_BBLUE, // 44: Blue + COLOR_BRED | COLOR_BBLUE, // 45: Magenta + COLOR_BGREEN | COLOR_BBLUE, // 46: Cyan + COLOR_BRED | COLOR_BBLUE | COLOR_BGREEN, // 47: White +} + +type ANSIWriter struct { + target io.Writer + wg sync.WaitGroup + ctx *ANSIWriterCtx + sync.Mutex +} + +func NewANSIWriter(w io.Writer) *ANSIWriter { + a := &ANSIWriter{ + target: w, + ctx: NewANSIWriterCtx(w), + } + return a +} + +func (a *ANSIWriter) Close() error { + a.wg.Wait() + return nil +} + +type ANSIWriterCtx struct { + isEsc bool + isEscSeq bool + arg []string + target *bufio.Writer + wantFlush bool +} + +func NewANSIWriterCtx(target io.Writer) *ANSIWriterCtx { + return &ANSIWriterCtx{ + target: bufio.NewWriter(target), + } +} + +func (a *ANSIWriterCtx) Flush() { + a.target.Flush() +} + +func (a *ANSIWriterCtx) process(r rune) bool { + if a.wantFlush { + if r == 0 || r == CharEsc { + a.wantFlush = false + a.target.Flush() + } + } + if a.isEscSeq { + a.isEscSeq = a.ioloopEscSeq(a.target, r, &a.arg) + return true + } + + switch r { + case CharEsc: + a.isEsc = true + case '[': + if a.isEsc { + a.arg = nil + a.isEscSeq = true + a.isEsc = false + break + } + fallthrough + default: + a.target.WriteRune(r) + a.wantFlush = true + } + return true +} + +func (a *ANSIWriterCtx) ioloopEscSeq(w *bufio.Writer, r rune, argptr *[]string) bool { + arg := *argptr + var err error + + if r >= 'A' && r <= 'D' { + count := short(GetInt(arg, 1)) + info, err := GetConsoleScreenBufferInfo() + if err != nil { + return false + } + switch r { + case 'A': // up + info.dwCursorPosition.y -= count + case 'B': // down + info.dwCursorPosition.y += count + case 'C': // right + info.dwCursorPosition.x += count + case 'D': // left + info.dwCursorPosition.x -= count + } + SetConsoleCursorPosition(&info.dwCursorPosition) + return false + } + + switch r { + case 'J': + killLines() + case 'K': + eraseLine() + case 'm': + color := word(0) + for _, item := range arg { + var c int + c, err = strconv.Atoi(item) + if err != nil { + w.WriteString("[" + strings.Join(arg, ";") + "m") + break + } + if c >= 30 && c < 40 { + color ^= COLOR_FINTENSITY + color |= ColorTableFg[c-30] + } else if c >= 40 && c < 50 { + color ^= COLOR_BINTENSITY + color |= ColorTableBg[c-40] + } else if c == 4 { + color |= COMMON_LVB_UNDERSCORE | ColorTableFg[7] + } else if c == 1 { + color |= COMMON_LVB_BOLD | COLOR_FINTENSITY + } else { // unknown code treat as reset + color = ColorTableFg[7] + } + } + if err != nil { + break + } + kernel.SetConsoleTextAttribute(stdout, uintptr(color)) + case '\007': // set title + case ';': + if len(arg) == 0 || arg[len(arg)-1] != "" { + arg = append(arg, "") + *argptr = arg + } + return true + default: + if len(arg) == 0 { + arg = append(arg, "") + } + arg[len(arg)-1] += string(r) + *argptr = arg + return true + } + *argptr = nil + return false +} + +func (a *ANSIWriter) Write(b []byte) (int, error) { + a.Lock() + defer a.Unlock() + + off := 0 + for len(b) > off { + r, size := utf8.DecodeRune(b[off:]) + if size == 0 { + return off, io.ErrShortWrite + } + off += size + a.ctx.process(r) + } + a.ctx.Flush() + return off, nil +} + +func killLines() error { + sbi, err := GetConsoleScreenBufferInfo() + if err != nil { + return err + } + + size := (sbi.dwCursorPosition.y - sbi.dwSize.y) * sbi.dwSize.x + size += sbi.dwCursorPosition.x + + var written int + kernel.FillConsoleOutputAttribute(stdout, uintptr(ColorTableFg[7]), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) + return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) +} + +func eraseLine() error { + sbi, err := GetConsoleScreenBufferInfo() + if err != nil { + return err + } + + size := sbi.dwSize.x + sbi.dwCursorPosition.x = 0 + var written int + return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) +} diff --git a/vendor/github.com/chzyer/readline/complete.go b/vendor/github.com/chzyer/readline/complete.go new file mode 100644 index 0000000..c08c994 --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete.go @@ -0,0 +1,285 @@ +package readline + +import ( + "bufio" + "bytes" + "fmt" + "io" +) + +type AutoCompleter interface { + // Readline will pass the whole line and current offset to it + // Completer need to pass all the candidates, and how long they shared the same characters in line + // Example: + // [go, git, git-shell, grep] + // Do("g", 1) => ["o", "it", "it-shell", "rep"], 1 + // Do("gi", 2) => ["t", "t-shell"], 2 + // Do("git", 3) => ["", "-shell"], 3 + Do(line []rune, pos int) (newLine [][]rune, length int) +} + +type TabCompleter struct{} + +func (t *TabCompleter) Do([]rune, int) ([][]rune, int) { + return [][]rune{[]rune("\t")}, 0 +} + +type opCompleter struct { + w io.Writer + op *Operation + width int + + inCompleteMode bool + inSelectMode bool + candidate [][]rune + candidateSource []rune + candidateOff int + candidateChoise int + candidateColNum int +} + +func newOpCompleter(w io.Writer, op *Operation, width int) *opCompleter { + return &opCompleter{ + w: w, + op: op, + width: width, + } +} + +func (o *opCompleter) doSelect() { + if len(o.candidate) == 1 { + o.op.buf.WriteRunes(o.candidate[0]) + o.ExitCompleteMode(false) + return + } + o.nextCandidate(1) + o.CompleteRefresh() +} + +func (o *opCompleter) nextCandidate(i int) { + o.candidateChoise += i + o.candidateChoise = o.candidateChoise % len(o.candidate) + if o.candidateChoise < 0 { + o.candidateChoise = len(o.candidate) + o.candidateChoise + } +} + +func (o *opCompleter) OnComplete() bool { + if o.width == 0 { + return false + } + if o.IsInCompleteSelectMode() { + o.doSelect() + return true + } + + buf := o.op.buf + rs := buf.Runes() + + if o.IsInCompleteMode() && o.candidateSource != nil && runes.Equal(rs, o.candidateSource) { + o.EnterCompleteSelectMode() + o.doSelect() + return true + } + + o.ExitCompleteSelectMode() + o.candidateSource = rs + newLines, offset := o.op.cfg.AutoComplete.Do(rs, buf.idx) + if len(newLines) == 0 { + o.ExitCompleteMode(false) + return true + } + + // only Aggregate candidates in non-complete mode + if !o.IsInCompleteMode() { + if len(newLines) == 1 { + buf.WriteRunes(newLines[0]) + o.ExitCompleteMode(false) + return true + } + + same, size := runes.Aggregate(newLines) + if size > 0 { + buf.WriteRunes(same) + o.ExitCompleteMode(false) + return true + } + } + + o.EnterCompleteMode(offset, newLines) + return true +} + +func (o *opCompleter) IsInCompleteSelectMode() bool { + return o.inSelectMode +} + +func (o *opCompleter) IsInCompleteMode() bool { + return o.inCompleteMode +} + +func (o *opCompleter) HandleCompleteSelect(r rune) bool { + next := true + switch r { + case CharEnter, CharCtrlJ: + next = false + o.op.buf.WriteRunes(o.op.candidate[o.op.candidateChoise]) + o.ExitCompleteMode(false) + case CharLineStart: + num := o.candidateChoise % o.candidateColNum + o.nextCandidate(-num) + case CharLineEnd: + num := o.candidateColNum - o.candidateChoise%o.candidateColNum - 1 + o.candidateChoise += num + if o.candidateChoise >= len(o.candidate) { + o.candidateChoise = len(o.candidate) - 1 + } + case CharBackspace: + o.ExitCompleteSelectMode() + next = false + case CharTab, CharForward: + o.doSelect() + case CharBell, CharInterrupt: + o.ExitCompleteMode(true) + next = false + case CharNext: + tmpChoise := o.candidateChoise + o.candidateColNum + if tmpChoise >= o.getMatrixSize() { + tmpChoise -= o.getMatrixSize() + } else if tmpChoise >= len(o.candidate) { + tmpChoise += o.candidateColNum + tmpChoise -= o.getMatrixSize() + } + o.candidateChoise = tmpChoise + case CharBackward: + o.nextCandidate(-1) + case CharPrev: + tmpChoise := o.candidateChoise - o.candidateColNum + if tmpChoise < 0 { + tmpChoise += o.getMatrixSize() + if tmpChoise >= len(o.candidate) { + tmpChoise -= o.candidateColNum + } + } + o.candidateChoise = tmpChoise + default: + next = false + o.ExitCompleteSelectMode() + } + if next { + o.CompleteRefresh() + return true + } + return false +} + +func (o *opCompleter) getMatrixSize() int { + line := len(o.candidate) / o.candidateColNum + if len(o.candidate)%o.candidateColNum != 0 { + line++ + } + return line * o.candidateColNum +} + +func (o *opCompleter) OnWidthChange(newWidth int) { + o.width = newWidth +} + +func (o *opCompleter) CompleteRefresh() { + if !o.inCompleteMode { + return + } + lineCnt := o.op.buf.CursorLineCount() + colWidth := 0 + for _, c := range o.candidate { + w := runes.WidthAll(c) + if w > colWidth { + colWidth = w + } + } + colWidth += o.candidateOff + 1 + same := o.op.buf.RuneSlice(-o.candidateOff) + + // -1 to avoid reach the end of line + width := o.width - 1 + colNum := width / colWidth + if colNum != 0 { + colWidth += (width - (colWidth * colNum)) / colNum + } + + o.candidateColNum = colNum + buf := bufio.NewWriter(o.w) + buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) + + colIdx := 0 + lines := 1 + buf.WriteString("\033[J") + for idx, c := range o.candidate { + inSelect := idx == o.candidateChoise && o.IsInCompleteSelectMode() + if inSelect { + buf.WriteString("\033[30;47m") + } + buf.WriteString(string(same)) + buf.WriteString(string(c)) + buf.Write(bytes.Repeat([]byte(" "), colWidth-runes.WidthAll(c)-runes.WidthAll(same))) + + if inSelect { + buf.WriteString("\033[0m") + } + + colIdx++ + if colIdx == colNum { + buf.WriteString("\n") + lines++ + colIdx = 0 + } + } + + // move back + fmt.Fprintf(buf, "\033[%dA\r", lineCnt-1+lines) + fmt.Fprintf(buf, "\033[%dC", o.op.buf.idx+o.op.buf.PromptLen()) + buf.Flush() +} + +func (o *opCompleter) aggCandidate(candidate [][]rune) int { + offset := 0 + for i := 0; i < len(candidate[0]); i++ { + for j := 0; j < len(candidate)-1; j++ { + if i > len(candidate[j]) { + goto aggregate + } + if candidate[j][i] != candidate[j+1][i] { + goto aggregate + } + } + offset = i + } +aggregate: + return offset +} + +func (o *opCompleter) EnterCompleteSelectMode() { + o.inSelectMode = true + o.candidateChoise = -1 + o.CompleteRefresh() +} + +func (o *opCompleter) EnterCompleteMode(offset int, candidate [][]rune) { + o.inCompleteMode = true + o.candidate = candidate + o.candidateOff = offset + o.CompleteRefresh() +} + +func (o *opCompleter) ExitCompleteSelectMode() { + o.inSelectMode = false + o.candidate = nil + o.candidateChoise = -1 + o.candidateOff = -1 + o.candidateSource = nil +} + +func (o *opCompleter) ExitCompleteMode(revent bool) { + o.inCompleteMode = false + o.ExitCompleteSelectMode() +} diff --git a/vendor/github.com/chzyer/readline/complete_helper.go b/vendor/github.com/chzyer/readline/complete_helper.go new file mode 100644 index 0000000..58d7248 --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete_helper.go @@ -0,0 +1,165 @@ +package readline + +import ( + "bytes" + "strings" +) + +// Caller type for dynamic completion +type DynamicCompleteFunc func(string) []string + +type PrefixCompleterInterface interface { + Print(prefix string, level int, buf *bytes.Buffer) + Do(line []rune, pos int) (newLine [][]rune, length int) + GetName() []rune + GetChildren() []PrefixCompleterInterface + SetChildren(children []PrefixCompleterInterface) +} + +type DynamicPrefixCompleterInterface interface { + PrefixCompleterInterface + IsDynamic() bool + GetDynamicNames(line []rune) [][]rune +} + +type PrefixCompleter struct { + Name []rune + Dynamic bool + Callback DynamicCompleteFunc + Children []PrefixCompleterInterface +} + +func (p *PrefixCompleter) Tree(prefix string) string { + buf := bytes.NewBuffer(nil) + p.Print(prefix, 0, buf) + return buf.String() +} + +func Print(p PrefixCompleterInterface, prefix string, level int, buf *bytes.Buffer) { + if strings.TrimSpace(string(p.GetName())) != "" { + buf.WriteString(prefix) + if level > 0 { + buf.WriteString("├") + buf.WriteString(strings.Repeat("─", (level*4)-2)) + buf.WriteString(" ") + } + buf.WriteString(string(p.GetName()) + "\n") + level++ + } + for _, ch := range p.GetChildren() { + ch.Print(prefix, level, buf) + } +} + +func (p *PrefixCompleter) Print(prefix string, level int, buf *bytes.Buffer) { + Print(p, prefix, level, buf) +} + +func (p *PrefixCompleter) IsDynamic() bool { + return p.Dynamic +} + +func (p *PrefixCompleter) GetName() []rune { + return p.Name +} + +func (p *PrefixCompleter) GetDynamicNames(line []rune) [][]rune { + var names = [][]rune{} + for _, name := range p.Callback(string(line)) { + names = append(names, []rune(name+" ")) + } + return names +} + +func (p *PrefixCompleter) GetChildren() []PrefixCompleterInterface { + return p.Children +} + +func (p *PrefixCompleter) SetChildren(children []PrefixCompleterInterface) { + p.Children = children +} + +func NewPrefixCompleter(pc ...PrefixCompleterInterface) *PrefixCompleter { + return PcItem("", pc...) +} + +func PcItem(name string, pc ...PrefixCompleterInterface) *PrefixCompleter { + name += " " + return &PrefixCompleter{ + Name: []rune(name), + Dynamic: false, + Children: pc, + } +} + +func PcItemDynamic(callback DynamicCompleteFunc, pc ...PrefixCompleterInterface) *PrefixCompleter { + return &PrefixCompleter{ + Callback: callback, + Dynamic: true, + Children: pc, + } +} + +func (p *PrefixCompleter) Do(line []rune, pos int) (newLine [][]rune, offset int) { + return doInternal(p, line, pos, line) +} + +func Do(p PrefixCompleterInterface, line []rune, pos int) (newLine [][]rune, offset int) { + return doInternal(p, line, pos, line) +} + +func doInternal(p PrefixCompleterInterface, line []rune, pos int, origLine []rune) (newLine [][]rune, offset int) { + line = runes.TrimSpaceLeft(line[:pos]) + goNext := false + var lineCompleter PrefixCompleterInterface + for _, child := range p.GetChildren() { + childNames := make([][]rune, 1) + + childDynamic, ok := child.(DynamicPrefixCompleterInterface) + if ok && childDynamic.IsDynamic() { + childNames = childDynamic.GetDynamicNames(origLine) + } else { + childNames[0] = child.GetName() + } + + for _, childName := range childNames { + if len(line) >= len(childName) { + if runes.HasPrefix(line, childName) { + if len(line) == len(childName) { + newLine = append(newLine, []rune{' '}) + } else { + newLine = append(newLine, childName) + } + offset = len(childName) + lineCompleter = child + goNext = true + } + } else { + if runes.HasPrefix(childName, line) { + newLine = append(newLine, childName[len(line):]) + offset = len(line) + lineCompleter = child + } + } + } + } + + if len(newLine) != 1 { + return + } + + tmpLine := make([]rune, 0, len(line)) + for i := offset; i < len(line); i++ { + if line[i] == ' ' { + continue + } + + tmpLine = append(tmpLine, line[i:]...) + return doInternal(lineCompleter, tmpLine, len(tmpLine), origLine) + } + + if goNext { + return doInternal(lineCompleter, nil, 0, origLine) + } + return +} diff --git a/vendor/github.com/chzyer/readline/complete_segment.go b/vendor/github.com/chzyer/readline/complete_segment.go new file mode 100644 index 0000000..5ceadd8 --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete_segment.go @@ -0,0 +1,82 @@ +package readline + +type SegmentCompleter interface { + // a + // |- a1 + // |--- a11 + // |- a2 + // b + // input: + // DoTree([], 0) [a, b] + // DoTree([a], 1) [a] + // DoTree([a, ], 0) [a1, a2] + // DoTree([a, a], 1) [a1, a2] + // DoTree([a, a1], 2) [a1] + // DoTree([a, a1, ], 0) [a11] + // DoTree([a, a1, a], 1) [a11] + DoSegment([][]rune, int) [][]rune +} + +type dumpSegmentCompleter struct { + f func([][]rune, int) [][]rune +} + +func (d *dumpSegmentCompleter) DoSegment(segment [][]rune, n int) [][]rune { + return d.f(segment, n) +} + +func SegmentFunc(f func([][]rune, int) [][]rune) AutoCompleter { + return &SegmentComplete{&dumpSegmentCompleter{f}} +} + +func SegmentAutoComplete(completer SegmentCompleter) *SegmentComplete { + return &SegmentComplete{ + SegmentCompleter: completer, + } +} + +type SegmentComplete struct { + SegmentCompleter +} + +func RetSegment(segments [][]rune, cands [][]rune, idx int) ([][]rune, int) { + ret := make([][]rune, 0, len(cands)) + lastSegment := segments[len(segments)-1] + for _, cand := range cands { + if !runes.HasPrefix(cand, lastSegment) { + continue + } + ret = append(ret, cand[len(lastSegment):]) + } + return ret, idx +} + +func SplitSegment(line []rune, pos int) ([][]rune, int) { + segs := [][]rune{} + lastIdx := -1 + line = line[:pos] + pos = 0 + for idx, l := range line { + if l == ' ' { + pos = 0 + segs = append(segs, line[lastIdx+1:idx]) + lastIdx = idx + } else { + pos++ + } + } + segs = append(segs, line[lastIdx+1:]) + return segs, pos +} + +func (c *SegmentComplete) Do(line []rune, pos int) (newLine [][]rune, offset int) { + + segment, idx := SplitSegment(line, pos) + + cands := c.DoSegment(segment, idx) + newLine, offset = RetSegment(segment, cands, idx) + for idx := range newLine { + newLine[idx] = append(newLine[idx], ' ') + } + return newLine, offset +} diff --git a/vendor/github.com/chzyer/readline/history.go b/vendor/github.com/chzyer/readline/history.go new file mode 100644 index 0000000..6b17c46 --- /dev/null +++ b/vendor/github.com/chzyer/readline/history.go @@ -0,0 +1,330 @@ +package readline + +import ( + "bufio" + "container/list" + "fmt" + "os" + "strings" + "sync" +) + +type hisItem struct { + Source []rune + Version int64 + Tmp []rune +} + +func (h *hisItem) Clean() { + h.Source = nil + h.Tmp = nil +} + +type opHistory struct { + cfg *Config + history *list.List + historyVer int64 + current *list.Element + fd *os.File + fdLock sync.Mutex + enable bool +} + +func newOpHistory(cfg *Config) (o *opHistory) { + o = &opHistory{ + cfg: cfg, + history: list.New(), + enable: true, + } + return o +} + +func (o *opHistory) Reset() { + o.history = list.New() + o.current = nil +} + +func (o *opHistory) IsHistoryClosed() bool { + o.fdLock.Lock() + defer o.fdLock.Unlock() + return o.fd.Fd() == ^(uintptr(0)) +} + +func (o *opHistory) Init() { + if o.IsHistoryClosed() { + o.initHistory() + } +} + +func (o *opHistory) initHistory() { + if o.cfg.HistoryFile != "" { + o.historyUpdatePath(o.cfg.HistoryFile) + } +} + +// only called by newOpHistory +func (o *opHistory) historyUpdatePath(path string) { + o.fdLock.Lock() + defer o.fdLock.Unlock() + f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return + } + o.fd = f + r := bufio.NewReader(o.fd) + total := 0 + for ; ; total++ { + line, err := r.ReadString('\n') + if err != nil { + break + } + // ignore the empty line + line = strings.TrimSpace(line) + if len(line) == 0 { + continue + } + o.Push([]rune(line)) + o.Compact() + } + if total > o.cfg.HistoryLimit { + o.rewriteLocked() + } + o.historyVer++ + o.Push(nil) + return +} + +func (o *opHistory) Compact() { + for o.history.Len() > o.cfg.HistoryLimit && o.history.Len() > 0 { + o.history.Remove(o.history.Front()) + } +} + +func (o *opHistory) Rewrite() { + o.fdLock.Lock() + defer o.fdLock.Unlock() + o.rewriteLocked() +} + +func (o *opHistory) rewriteLocked() { + if o.cfg.HistoryFile == "" { + return + } + + tmpFile := o.cfg.HistoryFile + ".tmp" + fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0666) + if err != nil { + return + } + + buf := bufio.NewWriter(fd) + for elem := o.history.Front(); elem != nil; elem = elem.Next() { + buf.WriteString(string(elem.Value.(*hisItem).Source) + "\n") + } + buf.Flush() + + // replace history file + if err = os.Rename(tmpFile, o.cfg.HistoryFile); err != nil { + fd.Close() + return + } + + if o.fd != nil { + o.fd.Close() + } + // fd is write only, just satisfy what we need. + o.fd = fd +} + +func (o *opHistory) Close() { + o.fdLock.Lock() + defer o.fdLock.Unlock() + if o.fd != nil { + o.fd.Close() + } +} + +func (o *opHistory) FindBck(isNewSearch bool, rs []rune, start int) (int, *list.Element) { + for elem := o.current; elem != nil; elem = elem.Prev() { + item := o.showItem(elem.Value) + if isNewSearch { + start += len(rs) + } + if elem == o.current { + if len(item) >= start { + item = item[:start] + } + } + idx := runes.IndexAllBckEx(item, rs, o.cfg.HistorySearchFold) + if idx < 0 { + continue + } + return idx, elem + } + return -1, nil +} + +func (o *opHistory) FindFwd(isNewSearch bool, rs []rune, start int) (int, *list.Element) { + for elem := o.current; elem != nil; elem = elem.Next() { + item := o.showItem(elem.Value) + if isNewSearch { + start -= len(rs) + if start < 0 { + start = 0 + } + } + if elem == o.current { + if len(item)-1 >= start { + item = item[start:] + } else { + continue + } + } + idx := runes.IndexAllEx(item, rs, o.cfg.HistorySearchFold) + if idx < 0 { + continue + } + if elem == o.current { + idx += start + } + return idx, elem + } + return -1, nil +} + +func (o *opHistory) showItem(obj interface{}) []rune { + item := obj.(*hisItem) + if item.Version == o.historyVer { + return item.Tmp + } + return item.Source +} + +func (o *opHistory) Prev() []rune { + if o.current == nil { + return nil + } + current := o.current.Prev() + if current == nil { + return nil + } + o.current = current + return runes.Copy(o.showItem(current.Value)) +} + +func (o *opHistory) Next() ([]rune, bool) { + if o.current == nil { + return nil, false + } + current := o.current.Next() + if current == nil { + return nil, false + } + + o.current = current + return runes.Copy(o.showItem(current.Value)), true +} + +// Disable the current history +func (o *opHistory) Disable() { + o.enable = false +} + +// Enable the current history +func (o *opHistory) Enable() { + o.enable = true +} + +func (o *opHistory) debug() { + Debug("-------") + for item := o.history.Front(); item != nil; item = item.Next() { + Debug(fmt.Sprintf("%+v", item.Value)) + } +} + +// save history +func (o *opHistory) New(current []rune) (err error) { + + // history deactivated + if !o.enable { + return nil + } + + current = runes.Copy(current) + + // if just use last command without modify + // just clean lastest history + if back := o.history.Back(); back != nil { + prev := back.Prev() + if prev != nil { + if runes.Equal(current, prev.Value.(*hisItem).Source) { + o.current = o.history.Back() + o.current.Value.(*hisItem).Clean() + o.historyVer++ + return nil + } + } + } + + if len(current) == 0 { + o.current = o.history.Back() + if o.current != nil { + o.current.Value.(*hisItem).Clean() + o.historyVer++ + return nil + } + } + + if o.current != o.history.Back() { + // move history item to current command + currentItem := o.current.Value.(*hisItem) + // set current to last item + o.current = o.history.Back() + + current = runes.Copy(currentItem.Tmp) + } + + // err only can be a IO error, just report + err = o.Update(current, true) + + // push a new one to commit current command + o.historyVer++ + o.Push(nil) + return +} + +func (o *opHistory) Revert() { + o.historyVer++ + o.current = o.history.Back() +} + +func (o *opHistory) Update(s []rune, commit bool) (err error) { + o.fdLock.Lock() + defer o.fdLock.Unlock() + s = runes.Copy(s) + if o.current == nil { + o.Push(s) + o.Compact() + return + } + r := o.current.Value.(*hisItem) + r.Version = o.historyVer + if commit { + r.Source = s + if o.fd != nil { + // just report the error + _, err = o.fd.Write([]byte(string(r.Source) + "\n")) + } + } else { + r.Tmp = append(r.Tmp[:0], s...) + } + o.current.Value = r + o.Compact() + return +} + +func (o *opHistory) Push(s []rune) { + s = runes.Copy(s) + elem := o.history.PushBack(&hisItem{Source: s}) + o.current = elem +} diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go new file mode 100644 index 0000000..4c31624 --- /dev/null +++ b/vendor/github.com/chzyer/readline/operation.go @@ -0,0 +1,531 @@ +package readline + +import ( + "errors" + "io" + "sync" +) + +var ( + ErrInterrupt = errors.New("Interrupt") +) + +type InterruptError struct { + Line []rune +} + +func (*InterruptError) Error() string { + return "Interrupted" +} + +type Operation struct { + m sync.Mutex + cfg *Config + t *Terminal + buf *RuneBuffer + outchan chan []rune + errchan chan error + w io.Writer + + history *opHistory + *opSearch + *opCompleter + *opPassword + *opVim +} + +func (o *Operation) SetBuffer(what string) { + o.buf.Set([]rune(what)) +} + +type wrapWriter struct { + r *Operation + t *Terminal + target io.Writer +} + +func (w *wrapWriter) Write(b []byte) (int, error) { + if !w.t.IsReading() { + return w.target.Write(b) + } + + var ( + n int + err error + ) + w.r.buf.Refresh(func() { + n, err = w.target.Write(b) + }) + + if w.r.IsSearchMode() { + w.r.SearchRefresh(-1) + } + if w.r.IsInCompleteMode() { + w.r.CompleteRefresh() + } + return n, err +} + +func NewOperation(t *Terminal, cfg *Config) *Operation { + width := cfg.FuncGetWidth() + op := &Operation{ + t: t, + buf: NewRuneBuffer(t, cfg.Prompt, cfg, width), + outchan: make(chan []rune), + errchan: make(chan error, 1), + } + op.w = op.buf.w + op.SetConfig(cfg) + op.opVim = newVimMode(op) + op.opCompleter = newOpCompleter(op.buf.w, op, width) + op.opPassword = newOpPassword(op) + op.cfg.FuncOnWidthChanged(func() { + newWidth := cfg.FuncGetWidth() + op.opCompleter.OnWidthChange(newWidth) + op.opSearch.OnWidthChange(newWidth) + op.buf.OnWidthChange(newWidth) + }) + go op.ioloop() + return op +} + +func (o *Operation) SetPrompt(s string) { + o.buf.SetPrompt(s) +} + +func (o *Operation) SetMaskRune(r rune) { + o.buf.SetMask(r) +} + +func (o *Operation) GetConfig() *Config { + o.m.Lock() + cfg := *o.cfg + o.m.Unlock() + return &cfg +} + +func (o *Operation) ioloop() { + for { + keepInSearchMode := false + keepInCompleteMode := false + r := o.t.ReadRune() + if o.GetConfig().FuncFilterInputRune != nil { + var process bool + r, process = o.GetConfig().FuncFilterInputRune(r) + if !process { + o.buf.Refresh(nil) // to refresh the line + continue // ignore this rune + } + } + + if r == 0 { // io.EOF + if o.buf.Len() == 0 { + o.buf.Clean() + select { + case o.errchan <- io.EOF: + } + break + } else { + // if stdin got io.EOF and there is something left in buffer, + // let's flush them by sending CharEnter. + // And we will got io.EOF int next loop. + r = CharEnter + } + } + isUpdateHistory := true + + if o.IsInCompleteSelectMode() { + keepInCompleteMode = o.HandleCompleteSelect(r) + if keepInCompleteMode { + continue + } + + o.buf.Refresh(nil) + switch r { + case CharEnter, CharCtrlJ: + o.history.Update(o.buf.Runes(), false) + fallthrough + case CharInterrupt: + o.t.KickRead() + fallthrough + case CharBell: + continue + } + } + + if o.IsEnableVimMode() { + r = o.HandleVim(r, o.t.ReadRune) + if r == 0 { + continue + } + } + + switch r { + case CharBell: + if o.IsSearchMode() { + o.ExitSearchMode(true) + o.buf.Refresh(nil) + } + if o.IsInCompleteMode() { + o.ExitCompleteMode(true) + o.buf.Refresh(nil) + } + case CharTab: + if o.GetConfig().AutoComplete == nil { + o.t.Bell() + break + } + if o.OnComplete() { + keepInCompleteMode = true + } else { + o.t.Bell() + break + } + + case CharBckSearch: + if !o.SearchMode(S_DIR_BCK) { + o.t.Bell() + break + } + keepInSearchMode = true + case CharCtrlU: + o.buf.KillFront() + case CharFwdSearch: + if !o.SearchMode(S_DIR_FWD) { + o.t.Bell() + break + } + keepInSearchMode = true + case CharKill: + o.buf.Kill() + keepInCompleteMode = true + case MetaForward: + o.buf.MoveToNextWord() + case CharTranspose: + o.buf.Transpose() + case MetaBackward: + o.buf.MoveToPrevWord() + case MetaDelete: + o.buf.DeleteWord() + case CharLineStart: + o.buf.MoveToLineStart() + case CharLineEnd: + o.buf.MoveToLineEnd() + case CharBackspace, CharCtrlH: + if o.IsSearchMode() { + o.SearchBackspace() + keepInSearchMode = true + break + } + + if o.buf.Len() == 0 { + o.t.Bell() + break + } + o.buf.Backspace() + if o.IsInCompleteMode() { + o.OnComplete() + } + case CharCtrlZ: + o.buf.Clean() + o.t.SleepToResume() + o.Refresh() + case CharCtrlL: + ClearScreen(o.w) + o.Refresh() + case MetaBackspace, CharCtrlW: + o.buf.BackEscapeWord() + case CharCtrlY: + o.buf.Yank() + case CharEnter, CharCtrlJ: + if o.IsSearchMode() { + o.ExitSearchMode(false) + } + o.buf.MoveToLineEnd() + var data []rune + if !o.GetConfig().UniqueEditLine { + o.buf.WriteRune('\n') + data = o.buf.Reset() + data = data[:len(data)-1] // trim \n + } else { + o.buf.Clean() + data = o.buf.Reset() + } + o.outchan <- data + if !o.GetConfig().DisableAutoSaveHistory { + // ignore IO error + _ = o.history.New(data) + } else { + isUpdateHistory = false + } + case CharBackward: + o.buf.MoveBackward() + case CharForward: + o.buf.MoveForward() + case CharPrev: + buf := o.history.Prev() + if buf != nil { + o.buf.Set(buf) + } else { + o.t.Bell() + } + case CharNext: + buf, ok := o.history.Next() + if ok { + o.buf.Set(buf) + } else { + o.t.Bell() + } + case CharDelete: + if o.buf.Len() > 0 || !o.IsNormalMode() { + o.t.KickRead() + if !o.buf.Delete() { + o.t.Bell() + } + break + } + + // treat as EOF + if !o.GetConfig().UniqueEditLine { + o.buf.WriteString(o.GetConfig().EOFPrompt + "\n") + } + o.buf.Reset() + isUpdateHistory = false + o.history.Revert() + o.errchan <- io.EOF + if o.GetConfig().UniqueEditLine { + o.buf.Clean() + } + case CharInterrupt: + if o.IsSearchMode() { + o.t.KickRead() + o.ExitSearchMode(true) + break + } + if o.IsInCompleteMode() { + o.t.KickRead() + o.ExitCompleteMode(true) + o.buf.Refresh(nil) + break + } + o.buf.MoveToLineEnd() + o.buf.Refresh(nil) + hint := o.GetConfig().InterruptPrompt + "\n" + if !o.GetConfig().UniqueEditLine { + o.buf.WriteString(hint) + } + remain := o.buf.Reset() + if !o.GetConfig().UniqueEditLine { + remain = remain[:len(remain)-len([]rune(hint))] + } + isUpdateHistory = false + o.history.Revert() + o.errchan <- &InterruptError{remain} + default: + if o.IsSearchMode() { + o.SearchChar(r) + keepInSearchMode = true + break + } + o.buf.WriteRune(r) + if o.IsInCompleteMode() { + o.OnComplete() + keepInCompleteMode = true + } + } + + listener := o.GetConfig().Listener + if listener != nil { + newLine, newPos, ok := listener.OnChange(o.buf.Runes(), o.buf.Pos(), r) + if ok { + o.buf.SetWithIdx(newPos, newLine) + } + } + + o.m.Lock() + if !keepInSearchMode && o.IsSearchMode() { + o.ExitSearchMode(false) + o.buf.Refresh(nil) + } else if o.IsInCompleteMode() { + if !keepInCompleteMode { + o.ExitCompleteMode(false) + o.Refresh() + } else { + o.buf.Refresh(nil) + o.CompleteRefresh() + } + } + if isUpdateHistory && !o.IsSearchMode() { + // it will cause null history + o.history.Update(o.buf.Runes(), false) + } + o.m.Unlock() + } +} + +func (o *Operation) Stderr() io.Writer { + return &wrapWriter{target: o.GetConfig().Stderr, r: o, t: o.t} +} + +func (o *Operation) Stdout() io.Writer { + return &wrapWriter{target: o.GetConfig().Stdout, r: o, t: o.t} +} + +func (o *Operation) String() (string, error) { + r, err := o.Runes() + return string(r), err +} + +func (o *Operation) Runes() ([]rune, error) { + o.t.EnterRawMode() + defer o.t.ExitRawMode() + + listener := o.GetConfig().Listener + if listener != nil { + listener.OnChange(nil, 0, 0) + } + + o.buf.Refresh(nil) // print prompt + o.t.KickRead() + select { + case r := <-o.outchan: + return r, nil + case err := <-o.errchan: + if e, ok := err.(*InterruptError); ok { + return e.Line, ErrInterrupt + } + return nil, err + } +} + +func (o *Operation) PasswordEx(prompt string, l Listener) ([]byte, error) { + cfg := o.GenPasswordConfig() + cfg.Prompt = prompt + cfg.Listener = l + return o.PasswordWithConfig(cfg) +} + +func (o *Operation) GenPasswordConfig() *Config { + return o.opPassword.PasswordConfig() +} + +func (o *Operation) PasswordWithConfig(cfg *Config) ([]byte, error) { + if err := o.opPassword.EnterPasswordMode(cfg); err != nil { + return nil, err + } + defer o.opPassword.ExitPasswordMode() + return o.Slice() +} + +func (o *Operation) Password(prompt string) ([]byte, error) { + return o.PasswordEx(prompt, nil) +} + +func (o *Operation) SetTitle(t string) { + o.w.Write([]byte("\033[2;" + t + "\007")) +} + +func (o *Operation) Slice() ([]byte, error) { + r, err := o.Runes() + if err != nil { + return nil, err + } + return []byte(string(r)), nil +} + +func (o *Operation) Close() { + o.history.Close() +} + +func (o *Operation) SetHistoryPath(path string) { + if o.history != nil { + o.history.Close() + } + o.cfg.HistoryFile = path + o.history = newOpHistory(o.cfg) +} + +func (o *Operation) IsNormalMode() bool { + return !o.IsInCompleteMode() && !o.IsSearchMode() +} + +func (op *Operation) SetConfig(cfg *Config) (*Config, error) { + op.m.Lock() + defer op.m.Unlock() + if op.cfg == cfg { + return op.cfg, nil + } + if err := cfg.Init(); err != nil { + return op.cfg, err + } + old := op.cfg + op.cfg = cfg + op.SetPrompt(cfg.Prompt) + op.SetMaskRune(cfg.MaskRune) + op.buf.SetConfig(cfg) + width := op.cfg.FuncGetWidth() + + if cfg.opHistory == nil { + op.SetHistoryPath(cfg.HistoryFile) + cfg.opHistory = op.history + cfg.opSearch = newOpSearch(op.buf.w, op.buf, op.history, cfg, width) + } + op.history = cfg.opHistory + + // SetHistoryPath will close opHistory which already exists + // so if we use it next time, we need to reopen it by `InitHistory()` + op.history.Init() + + if op.cfg.AutoComplete != nil { + op.opCompleter = newOpCompleter(op.buf.w, op, width) + } + + op.opSearch = cfg.opSearch + return old, nil +} + +func (o *Operation) ResetHistory() { + o.history.Reset() +} + +// if err is not nil, it just mean it fail to write to file +// other things goes fine. +func (o *Operation) SaveHistory(content string) error { + return o.history.New([]rune(content)) +} + +func (o *Operation) Refresh() { + if o.t.IsReading() { + o.buf.Refresh(nil) + } +} + +func (o *Operation) Clean() { + o.buf.Clean() +} + +func FuncListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) Listener { + return &DumpListener{f: f} +} + +type DumpListener struct { + f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) +} + +func (d *DumpListener) OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { + return d.f(line, pos, key) +} + +type Listener interface { + OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) +} + +type Painter interface { + Paint(line []rune, pos int) []rune +} + +type defaultPainter struct{} + +func (p *defaultPainter) Paint(line []rune, _ int) []rune { + return line +} diff --git a/vendor/github.com/chzyer/readline/password.go b/vendor/github.com/chzyer/readline/password.go new file mode 100644 index 0000000..414288c --- /dev/null +++ b/vendor/github.com/chzyer/readline/password.go @@ -0,0 +1,33 @@ +package readline + +type opPassword struct { + o *Operation + backupCfg *Config +} + +func newOpPassword(o *Operation) *opPassword { + return &opPassword{o: o} +} + +func (o *opPassword) ExitPasswordMode() { + o.o.SetConfig(o.backupCfg) + o.backupCfg = nil +} + +func (o *opPassword) EnterPasswordMode(cfg *Config) (err error) { + o.backupCfg, err = o.o.SetConfig(cfg) + return +} + +func (o *opPassword) PasswordConfig() *Config { + return &Config{ + EnableMask: true, + InterruptPrompt: "\n", + EOFPrompt: "\n", + HistoryLimit: -1, + Painter: &defaultPainter{}, + + Stdout: o.o.cfg.Stdout, + Stderr: o.o.cfg.Stderr, + } +} diff --git a/vendor/github.com/chzyer/readline/rawreader_windows.go b/vendor/github.com/chzyer/readline/rawreader_windows.go new file mode 100644 index 0000000..073ef15 --- /dev/null +++ b/vendor/github.com/chzyer/readline/rawreader_windows.go @@ -0,0 +1,125 @@ +// +build windows + +package readline + +import "unsafe" + +const ( + VK_CANCEL = 0x03 + VK_BACK = 0x08 + VK_TAB = 0x09 + VK_RETURN = 0x0D + VK_SHIFT = 0x10 + VK_CONTROL = 0x11 + VK_MENU = 0x12 + VK_ESCAPE = 0x1B + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + VK_DELETE = 0x2E + VK_LSHIFT = 0xA0 + VK_RSHIFT = 0xA1 + VK_LCONTROL = 0xA2 + VK_RCONTROL = 0xA3 +) + +// RawReader translate input record to ANSI escape sequence. +// To provides same behavior as unix terminal. +type RawReader struct { + ctrlKey bool + altKey bool +} + +func NewRawReader() *RawReader { + r := new(RawReader) + return r +} + +// only process one action in one read +func (r *RawReader) Read(buf []byte) (int, error) { + ir := new(_INPUT_RECORD) + var read int + var err error +next: + err = kernel.ReadConsoleInputW(stdin, + uintptr(unsafe.Pointer(ir)), + 1, + uintptr(unsafe.Pointer(&read)), + ) + if err != nil { + return 0, err + } + if ir.EventType != EVENT_KEY { + goto next + } + ker := (*_KEY_EVENT_RECORD)(unsafe.Pointer(&ir.Event[0])) + if ker.bKeyDown == 0 { // keyup + if r.ctrlKey || r.altKey { + switch ker.wVirtualKeyCode { + case VK_RCONTROL, VK_LCONTROL: + r.ctrlKey = false + case VK_MENU: //alt + r.altKey = false + } + } + goto next + } + + if ker.unicodeChar == 0 { + var target rune + switch ker.wVirtualKeyCode { + case VK_RCONTROL, VK_LCONTROL: + r.ctrlKey = true + case VK_MENU: //alt + r.altKey = true + case VK_LEFT: + target = CharBackward + case VK_RIGHT: + target = CharForward + case VK_UP: + target = CharPrev + case VK_DOWN: + target = CharNext + } + if target != 0 { + return r.write(buf, target) + } + goto next + } + char := rune(ker.unicodeChar) + if r.ctrlKey { + switch char { + case 'A': + char = CharLineStart + case 'E': + char = CharLineEnd + case 'R': + char = CharBckSearch + case 'S': + char = CharFwdSearch + } + } else if r.altKey { + switch char { + case VK_BACK: + char = CharBackspace + } + return r.writeEsc(buf, char) + } + return r.write(buf, char) +} + +func (r *RawReader) writeEsc(b []byte, char rune) (int, error) { + b[0] = '\033' + n := copy(b[1:], []byte(string(char))) + return n + 1, nil +} + +func (r *RawReader) write(b []byte, char rune) (int, error) { + n := copy(b, []byte(string(char))) + return n, nil +} + +func (r *RawReader) Close() error { + return nil +} diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go new file mode 100644 index 0000000..0e7aca0 --- /dev/null +++ b/vendor/github.com/chzyer/readline/readline.go @@ -0,0 +1,326 @@ +// Readline is a pure go implementation for GNU-Readline kind library. +// +// example: +// rl, err := readline.New("> ") +// if err != nil { +// panic(err) +// } +// defer rl.Close() +// +// for { +// line, err := rl.Readline() +// if err != nil { // io.EOF +// break +// } +// println(line) +// } +// +package readline + +import "io" + +type Instance struct { + Config *Config + Terminal *Terminal + Operation *Operation +} + +type Config struct { + // prompt supports ANSI escape sequence, so we can color some characters even in windows + Prompt string + + // readline will persist historys to file where HistoryFile specified + HistoryFile string + // specify the max length of historys, it's 500 by default, set it to -1 to disable history + HistoryLimit int + DisableAutoSaveHistory bool + // enable case-insensitive history searching + HistorySearchFold bool + + // AutoCompleter will called once user press TAB + AutoComplete AutoCompleter + + // Any key press will pass to Listener + // NOTE: Listener will be triggered by (nil, 0, 0) immediately + Listener Listener + + Painter Painter + + // If VimMode is true, readline will in vim.insert mode by default + VimMode bool + + InterruptPrompt string + EOFPrompt string + + FuncGetWidth func() int + + Stdin io.ReadCloser + StdinWriter io.Writer + Stdout io.Writer + Stderr io.Writer + + EnableMask bool + MaskRune rune + + // erase the editing line after user submited it + // it use in IM usually. + UniqueEditLine bool + + // filter input runes (may be used to disable CtrlZ or for translating some keys to different actions) + // -> output = new (translated) rune and true/false if continue with processing this one + FuncFilterInputRune func(rune) (rune, bool) + + // force use interactive even stdout is not a tty + FuncIsTerminal func() bool + FuncMakeRaw func() error + FuncExitRaw func() error + FuncOnWidthChanged func(func()) + ForceUseInteractive bool + + // private fields + inited bool + opHistory *opHistory + opSearch *opSearch +} + +func (c *Config) useInteractive() bool { + if c.ForceUseInteractive { + return true + } + return c.FuncIsTerminal() +} + +func (c *Config) Init() error { + if c.inited { + return nil + } + c.inited = true + if c.Stdin == nil { + c.Stdin = NewCancelableStdin(Stdin) + } + + c.Stdin, c.StdinWriter = NewFillableStdin(c.Stdin) + + if c.Stdout == nil { + c.Stdout = Stdout + } + if c.Stderr == nil { + c.Stderr = Stderr + } + if c.HistoryLimit == 0 { + c.HistoryLimit = 500 + } + + if c.InterruptPrompt == "" { + c.InterruptPrompt = "^C" + } else if c.InterruptPrompt == "\n" { + c.InterruptPrompt = "" + } + if c.EOFPrompt == "" { + c.EOFPrompt = "^D" + } else if c.EOFPrompt == "\n" { + c.EOFPrompt = "" + } + + if c.AutoComplete == nil { + c.AutoComplete = &TabCompleter{} + } + if c.FuncGetWidth == nil { + c.FuncGetWidth = GetScreenWidth + } + if c.FuncIsTerminal == nil { + c.FuncIsTerminal = DefaultIsTerminal + } + rm := new(RawMode) + if c.FuncMakeRaw == nil { + c.FuncMakeRaw = rm.Enter + } + if c.FuncExitRaw == nil { + c.FuncExitRaw = rm.Exit + } + if c.FuncOnWidthChanged == nil { + c.FuncOnWidthChanged = DefaultOnWidthChanged + } + + return nil +} + +func (c Config) Clone() *Config { + c.opHistory = nil + c.opSearch = nil + return &c +} + +func (c *Config) SetListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) { + c.Listener = FuncListener(f) +} + +func (c *Config) SetPainter(p Painter) { + c.Painter = p +} + +func NewEx(cfg *Config) (*Instance, error) { + t, err := NewTerminal(cfg) + if err != nil { + return nil, err + } + rl := t.Readline() + if cfg.Painter == nil { + cfg.Painter = &defaultPainter{} + } + return &Instance{ + Config: cfg, + Terminal: t, + Operation: rl, + }, nil +} + +func New(prompt string) (*Instance, error) { + return NewEx(&Config{Prompt: prompt}) +} + +func (i *Instance) ResetHistory() { + i.Operation.ResetHistory() +} + +func (i *Instance) SetPrompt(s string) { + i.Operation.SetPrompt(s) +} + +func (i *Instance) SetMaskRune(r rune) { + i.Operation.SetMaskRune(r) +} + +// change history persistence in runtime +func (i *Instance) SetHistoryPath(p string) { + i.Operation.SetHistoryPath(p) +} + +// readline will refresh automatic when write through Stdout() +func (i *Instance) Stdout() io.Writer { + return i.Operation.Stdout() +} + +// readline will refresh automatic when write through Stdout() +func (i *Instance) Stderr() io.Writer { + return i.Operation.Stderr() +} + +// switch VimMode in runtime +func (i *Instance) SetVimMode(on bool) { + i.Operation.SetVimMode(on) +} + +func (i *Instance) IsVimMode() bool { + return i.Operation.IsEnableVimMode() +} + +func (i *Instance) GenPasswordConfig() *Config { + return i.Operation.GenPasswordConfig() +} + +// we can generate a config by `i.GenPasswordConfig()` +func (i *Instance) ReadPasswordWithConfig(cfg *Config) ([]byte, error) { + return i.Operation.PasswordWithConfig(cfg) +} + +func (i *Instance) ReadPasswordEx(prompt string, l Listener) ([]byte, error) { + return i.Operation.PasswordEx(prompt, l) +} + +func (i *Instance) ReadPassword(prompt string) ([]byte, error) { + return i.Operation.Password(prompt) +} + +type Result struct { + Line string + Error error +} + +func (l *Result) CanContinue() bool { + return len(l.Line) != 0 && l.Error == ErrInterrupt +} + +func (l *Result) CanBreak() bool { + return !l.CanContinue() && l.Error != nil +} + +func (i *Instance) Line() *Result { + ret, err := i.Readline() + return &Result{ret, err} +} + +// err is one of (nil, io.EOF, readline.ErrInterrupt) +func (i *Instance) Readline() (string, error) { + return i.Operation.String() +} + +func (i *Instance) ReadlineWithDefault(what string) (string, error) { + i.Operation.SetBuffer(what) + return i.Operation.String() +} + +func (i *Instance) SaveHistory(content string) error { + return i.Operation.SaveHistory(content) +} + +// same as readline +func (i *Instance) ReadSlice() ([]byte, error) { + return i.Operation.Slice() +} + +// we must make sure that call Close() before process exit. +func (i *Instance) Close() error { + if err := i.Terminal.Close(); err != nil { + return err + } + i.Config.Stdin.Close() + i.Operation.Close() + return nil +} +func (i *Instance) Clean() { + i.Operation.Clean() +} + +func (i *Instance) Write(b []byte) (int, error) { + return i.Stdout().Write(b) +} + +// WriteStdin prefill the next Stdin fetch +// Next time you call ReadLine() this value will be writen before the user input +// ie : +// i := readline.New() +// i.WriteStdin([]byte("test")) +// _, _= i.Readline() +// +// gives +// +// > test[cursor] +func (i *Instance) WriteStdin(val []byte) (int, error) { + return i.Terminal.WriteStdin(val) +} + +func (i *Instance) SetConfig(cfg *Config) *Config { + if i.Config == cfg { + return cfg + } + old := i.Config + i.Config = cfg + i.Operation.SetConfig(cfg) + i.Terminal.SetConfig(cfg) + return old +} + +func (i *Instance) Refresh() { + i.Operation.Refresh() +} + +// HistoryDisable the save of the commands into the history +func (i *Instance) HistoryDisable() { + i.Operation.history.Disable() +} + +// HistoryEnable the save of the commands into the history (default on) +func (i *Instance) HistoryEnable() { + i.Operation.history.Enable() +} diff --git a/vendor/github.com/chzyer/readline/remote.go b/vendor/github.com/chzyer/readline/remote.go new file mode 100644 index 0000000..74dbf56 --- /dev/null +++ b/vendor/github.com/chzyer/readline/remote.go @@ -0,0 +1,475 @@ +package readline + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "os" + "sync" + "sync/atomic" +) + +type MsgType int16 + +const ( + T_DATA = MsgType(iota) + T_WIDTH + T_WIDTH_REPORT + T_ISTTY_REPORT + T_RAW + T_ERAW // exit raw + T_EOF +) + +type RemoteSvr struct { + eof int32 + closed int32 + width int32 + reciveChan chan struct{} + writeChan chan *writeCtx + conn net.Conn + isTerminal bool + funcWidthChan func() + stopChan chan struct{} + + dataBufM sync.Mutex + dataBuf bytes.Buffer +} + +type writeReply struct { + n int + err error +} + +type writeCtx struct { + msg *Message + reply chan *writeReply +} + +func newWriteCtx(msg *Message) *writeCtx { + return &writeCtx{ + msg: msg, + reply: make(chan *writeReply), + } +} + +func NewRemoteSvr(conn net.Conn) (*RemoteSvr, error) { + rs := &RemoteSvr{ + width: -1, + conn: conn, + writeChan: make(chan *writeCtx), + reciveChan: make(chan struct{}), + stopChan: make(chan struct{}), + } + buf := bufio.NewReader(rs.conn) + + if err := rs.init(buf); err != nil { + return nil, err + } + + go rs.readLoop(buf) + go rs.writeLoop() + return rs, nil +} + +func (r *RemoteSvr) init(buf *bufio.Reader) error { + m, err := ReadMessage(buf) + if err != nil { + return err + } + // receive isTerminal + if m.Type != T_ISTTY_REPORT { + return fmt.Errorf("unexpected init message") + } + r.GotIsTerminal(m.Data) + + // receive width + m, err = ReadMessage(buf) + if err != nil { + return err + } + if m.Type != T_WIDTH_REPORT { + return fmt.Errorf("unexpected init message") + } + r.GotReportWidth(m.Data) + + return nil +} + +func (r *RemoteSvr) HandleConfig(cfg *Config) { + cfg.Stderr = r + cfg.Stdout = r + cfg.Stdin = r + cfg.FuncExitRaw = r.ExitRawMode + cfg.FuncIsTerminal = r.IsTerminal + cfg.FuncMakeRaw = r.EnterRawMode + cfg.FuncExitRaw = r.ExitRawMode + cfg.FuncGetWidth = r.GetWidth + cfg.FuncOnWidthChanged = func(f func()) { + r.funcWidthChan = f + } +} + +func (r *RemoteSvr) IsTerminal() bool { + return r.isTerminal +} + +func (r *RemoteSvr) checkEOF() error { + if atomic.LoadInt32(&r.eof) == 1 { + return io.EOF + } + return nil +} + +func (r *RemoteSvr) Read(b []byte) (int, error) { + r.dataBufM.Lock() + n, err := r.dataBuf.Read(b) + r.dataBufM.Unlock() + if n == 0 { + if err := r.checkEOF(); err != nil { + return 0, err + } + } + + if n == 0 && err == io.EOF { + <-r.reciveChan + r.dataBufM.Lock() + n, err = r.dataBuf.Read(b) + r.dataBufM.Unlock() + } + if n == 0 { + if err := r.checkEOF(); err != nil { + return 0, err + } + } + + return n, err +} + +func (r *RemoteSvr) writeMsg(m *Message) error { + ctx := newWriteCtx(m) + r.writeChan <- ctx + reply := <-ctx.reply + return reply.err +} + +func (r *RemoteSvr) Write(b []byte) (int, error) { + ctx := newWriteCtx(NewMessage(T_DATA, b)) + r.writeChan <- ctx + reply := <-ctx.reply + return reply.n, reply.err +} + +func (r *RemoteSvr) EnterRawMode() error { + return r.writeMsg(NewMessage(T_RAW, nil)) +} + +func (r *RemoteSvr) ExitRawMode() error { + return r.writeMsg(NewMessage(T_ERAW, nil)) +} + +func (r *RemoteSvr) writeLoop() { + defer r.Close() + +loop: + for { + select { + case ctx, ok := <-r.writeChan: + if !ok { + break + } + n, err := ctx.msg.WriteTo(r.conn) + ctx.reply <- &writeReply{n, err} + case <-r.stopChan: + break loop + } + } +} + +func (r *RemoteSvr) Close() error { + if atomic.CompareAndSwapInt32(&r.closed, 0, 1) { + close(r.stopChan) + r.conn.Close() + } + return nil +} + +func (r *RemoteSvr) readLoop(buf *bufio.Reader) { + defer r.Close() + for { + m, err := ReadMessage(buf) + if err != nil { + break + } + switch m.Type { + case T_EOF: + atomic.StoreInt32(&r.eof, 1) + select { + case r.reciveChan <- struct{}{}: + default: + } + case T_DATA: + r.dataBufM.Lock() + r.dataBuf.Write(m.Data) + r.dataBufM.Unlock() + select { + case r.reciveChan <- struct{}{}: + default: + } + case T_WIDTH_REPORT: + r.GotReportWidth(m.Data) + case T_ISTTY_REPORT: + r.GotIsTerminal(m.Data) + } + } +} + +func (r *RemoteSvr) GotIsTerminal(data []byte) { + if binary.BigEndian.Uint16(data) == 0 { + r.isTerminal = false + } else { + r.isTerminal = true + } +} + +func (r *RemoteSvr) GotReportWidth(data []byte) { + atomic.StoreInt32(&r.width, int32(binary.BigEndian.Uint16(data))) + if r.funcWidthChan != nil { + r.funcWidthChan() + } +} + +func (r *RemoteSvr) GetWidth() int { + return int(atomic.LoadInt32(&r.width)) +} + +// ----------------------------------------------------------------------------- + +type Message struct { + Type MsgType + Data []byte +} + +func ReadMessage(r io.Reader) (*Message, error) { + m := new(Message) + var length int32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + if err := binary.Read(r, binary.BigEndian, &m.Type); err != nil { + return nil, err + } + m.Data = make([]byte, int(length)-2) + if _, err := io.ReadFull(r, m.Data); err != nil { + return nil, err + } + return m, nil +} + +func NewMessage(t MsgType, data []byte) *Message { + return &Message{t, data} +} + +func (m *Message) WriteTo(w io.Writer) (int, error) { + buf := bytes.NewBuffer(make([]byte, 0, len(m.Data)+2+4)) + binary.Write(buf, binary.BigEndian, int32(len(m.Data)+2)) + binary.Write(buf, binary.BigEndian, m.Type) + buf.Write(m.Data) + n, err := buf.WriteTo(w) + return int(n), err +} + +// ----------------------------------------------------------------------------- + +type RemoteCli struct { + conn net.Conn + raw RawMode + receiveChan chan struct{} + inited int32 + isTerminal *bool + + data bytes.Buffer + dataM sync.Mutex +} + +func NewRemoteCli(conn net.Conn) (*RemoteCli, error) { + r := &RemoteCli{ + conn: conn, + receiveChan: make(chan struct{}), + } + return r, nil +} + +func (r *RemoteCli) MarkIsTerminal(is bool) { + r.isTerminal = &is +} + +func (r *RemoteCli) init() error { + if !atomic.CompareAndSwapInt32(&r.inited, 0, 1) { + return nil + } + + if err := r.reportIsTerminal(); err != nil { + return err + } + + if err := r.reportWidth(); err != nil { + return err + } + + // register sig for width changed + DefaultOnWidthChanged(func() { + r.reportWidth() + }) + return nil +} + +func (r *RemoteCli) writeMsg(m *Message) error { + r.dataM.Lock() + _, err := m.WriteTo(r.conn) + r.dataM.Unlock() + return err +} + +func (r *RemoteCli) Write(b []byte) (int, error) { + m := NewMessage(T_DATA, b) + r.dataM.Lock() + _, err := m.WriteTo(r.conn) + r.dataM.Unlock() + return len(b), err +} + +func (r *RemoteCli) reportWidth() error { + screenWidth := GetScreenWidth() + data := make([]byte, 2) + binary.BigEndian.PutUint16(data, uint16(screenWidth)) + msg := NewMessage(T_WIDTH_REPORT, data) + + if err := r.writeMsg(msg); err != nil { + return err + } + return nil +} + +func (r *RemoteCli) reportIsTerminal() error { + var isTerminal bool + if r.isTerminal != nil { + isTerminal = *r.isTerminal + } else { + isTerminal = DefaultIsTerminal() + } + data := make([]byte, 2) + if isTerminal { + binary.BigEndian.PutUint16(data, 1) + } else { + binary.BigEndian.PutUint16(data, 0) + } + msg := NewMessage(T_ISTTY_REPORT, data) + if err := r.writeMsg(msg); err != nil { + return err + } + return nil +} + +func (r *RemoteCli) readLoop() { + buf := bufio.NewReader(r.conn) + for { + msg, err := ReadMessage(buf) + if err != nil { + break + } + switch msg.Type { + case T_ERAW: + r.raw.Exit() + case T_RAW: + r.raw.Enter() + case T_DATA: + os.Stdout.Write(msg.Data) + } + } +} + +func (r *RemoteCli) ServeBy(source io.Reader) error { + if err := r.init(); err != nil { + return err + } + + go func() { + defer r.Close() + for { + n, _ := io.Copy(r, source) + if n == 0 { + break + } + } + }() + defer r.raw.Exit() + r.readLoop() + return nil +} + +func (r *RemoteCli) Close() { + r.writeMsg(NewMessage(T_EOF, nil)) +} + +func (r *RemoteCli) Serve() error { + return r.ServeBy(os.Stdin) +} + +func ListenRemote(n, addr string, cfg *Config, h func(*Instance), onListen ...func(net.Listener) error) error { + ln, err := net.Listen(n, addr) + if err != nil { + return err + } + if len(onListen) > 0 { + if err := onListen[0](ln); err != nil { + return err + } + } + for { + conn, err := ln.Accept() + if err != nil { + break + } + go func() { + defer conn.Close() + rl, err := HandleConn(*cfg, conn) + if err != nil { + return + } + h(rl) + }() + } + return nil +} + +func HandleConn(cfg Config, conn net.Conn) (*Instance, error) { + r, err := NewRemoteSvr(conn) + if err != nil { + return nil, err + } + r.HandleConfig(&cfg) + + rl, err := NewEx(&cfg) + if err != nil { + return nil, err + } + return rl, nil +} + +func DialRemote(n, addr string) error { + conn, err := net.Dial(n, addr) + if err != nil { + return err + } + defer conn.Close() + + cli, err := NewRemoteCli(conn) + if err != nil { + return err + } + return cli.Serve() +} diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go new file mode 100644 index 0000000..81d2da5 --- /dev/null +++ b/vendor/github.com/chzyer/readline/runebuf.go @@ -0,0 +1,629 @@ +package readline + +import ( + "bufio" + "bytes" + "io" + "strconv" + "strings" + "sync" +) + +type runeBufferBck struct { + buf []rune + idx int +} + +type RuneBuffer struct { + buf []rune + idx int + prompt []rune + w io.Writer + + hadClean bool + interactive bool + cfg *Config + + width int + + bck *runeBufferBck + + offset string + + lastKill []rune + + sync.Mutex +} + +func (r* RuneBuffer) pushKill(text []rune) { + r.lastKill = append([]rune{}, text...) +} + +func (r *RuneBuffer) OnWidthChange(newWidth int) { + r.Lock() + r.width = newWidth + r.Unlock() +} + +func (r *RuneBuffer) Backup() { + r.Lock() + r.bck = &runeBufferBck{r.buf, r.idx} + r.Unlock() +} + +func (r *RuneBuffer) Restore() { + r.Refresh(func() { + if r.bck == nil { + return + } + r.buf = r.bck.buf + r.idx = r.bck.idx + }) +} + +func NewRuneBuffer(w io.Writer, prompt string, cfg *Config, width int) *RuneBuffer { + rb := &RuneBuffer{ + w: w, + interactive: cfg.useInteractive(), + cfg: cfg, + width: width, + } + rb.SetPrompt(prompt) + return rb +} + +func (r *RuneBuffer) SetConfig(cfg *Config) { + r.Lock() + r.cfg = cfg + r.interactive = cfg.useInteractive() + r.Unlock() +} + +func (r *RuneBuffer) SetMask(m rune) { + r.Lock() + r.cfg.MaskRune = m + r.Unlock() +} + +func (r *RuneBuffer) CurrentWidth(x int) int { + r.Lock() + defer r.Unlock() + return runes.WidthAll(r.buf[:x]) +} + +func (r *RuneBuffer) PromptLen() int { + r.Lock() + width := r.promptLen() + r.Unlock() + return width +} + +func (r *RuneBuffer) promptLen() int { + return runes.WidthAll(runes.ColorFilter(r.prompt)) +} + +func (r *RuneBuffer) RuneSlice(i int) []rune { + r.Lock() + defer r.Unlock() + + if i > 0 { + rs := make([]rune, i) + copy(rs, r.buf[r.idx:r.idx+i]) + return rs + } + rs := make([]rune, -i) + copy(rs, r.buf[r.idx+i:r.idx]) + return rs +} + +func (r *RuneBuffer) Runes() []rune { + r.Lock() + newr := make([]rune, len(r.buf)) + copy(newr, r.buf) + r.Unlock() + return newr +} + +func (r *RuneBuffer) Pos() int { + r.Lock() + defer r.Unlock() + return r.idx +} + +func (r *RuneBuffer) Len() int { + r.Lock() + defer r.Unlock() + return len(r.buf) +} + +func (r *RuneBuffer) MoveToLineStart() { + r.Refresh(func() { + if r.idx == 0 { + return + } + r.idx = 0 + }) +} + +func (r *RuneBuffer) MoveBackward() { + r.Refresh(func() { + if r.idx == 0 { + return + } + r.idx-- + }) +} + +func (r *RuneBuffer) WriteString(s string) { + r.WriteRunes([]rune(s)) +} + +func (r *RuneBuffer) WriteRune(s rune) { + r.WriteRunes([]rune{s}) +} + +func (r *RuneBuffer) WriteRunes(s []rune) { + r.Refresh(func() { + tail := append(s, r.buf[r.idx:]...) + r.buf = append(r.buf[:r.idx], tail...) + r.idx += len(s) + }) +} + +func (r *RuneBuffer) MoveForward() { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + r.idx++ + }) +} + +func (r *RuneBuffer) IsCursorInEnd() bool { + r.Lock() + defer r.Unlock() + return r.idx == len(r.buf) +} + +func (r *RuneBuffer) Replace(ch rune) { + r.Refresh(func() { + r.buf[r.idx] = ch + }) +} + +func (r *RuneBuffer) Erase() { + r.Refresh(func() { + r.idx = 0 + r.pushKill(r.buf[:]) + r.buf = r.buf[:0] + }) +} + +func (r *RuneBuffer) Delete() (success bool) { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + r.pushKill(r.buf[r.idx : r.idx+1]) + r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) + success = true + }) + return +} + +func (r *RuneBuffer) DeleteWord() { + if r.idx == len(r.buf) { + return + } + init := r.idx + for init < len(r.buf) && IsWordBreak(r.buf[init]) { + init++ + } + for i := init + 1; i < len(r.buf); i++ { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.pushKill(r.buf[r.idx:i-1]) + r.Refresh(func() { + r.buf = append(r.buf[:r.idx], r.buf[i-1:]...) + }) + return + } + } + r.Kill() +} + +func (r *RuneBuffer) MoveToPrevWord() (success bool) { + r.Refresh(func() { + if r.idx == 0 { + return + } + + for i := r.idx - 1; i > 0; i-- { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.idx = i + success = true + return + } + } + r.idx = 0 + success = true + }) + return +} + +func (r *RuneBuffer) KillFront() { + r.Refresh(func() { + if r.idx == 0 { + return + } + + length := len(r.buf) - r.idx + r.pushKill(r.buf[:r.idx]) + copy(r.buf[:length], r.buf[r.idx:]) + r.idx = 0 + r.buf = r.buf[:length] + }) +} + +func (r *RuneBuffer) Kill() { + r.Refresh(func() { + r.pushKill(r.buf[r.idx:]) + r.buf = r.buf[:r.idx] + }) +} + +func (r *RuneBuffer) Transpose() { + r.Refresh(func() { + if len(r.buf) == 1 { + r.idx++ + } + + if len(r.buf) < 2 { + return + } + + if r.idx == 0 { + r.idx = 1 + } else if r.idx >= len(r.buf) { + r.idx = len(r.buf) - 1 + } + r.buf[r.idx], r.buf[r.idx-1] = r.buf[r.idx-1], r.buf[r.idx] + r.idx++ + }) +} + +func (r *RuneBuffer) MoveToNextWord() { + r.Refresh(func() { + for i := r.idx + 1; i < len(r.buf); i++ { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.idx = i + return + } + } + + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) MoveToEndWord() { + r.Refresh(func() { + // already at the end, so do nothing + if r.idx == len(r.buf) { + return + } + // if we are at the end of a word already, go to next + if !IsWordBreak(r.buf[r.idx]) && IsWordBreak(r.buf[r.idx+1]) { + r.idx++ + } + + // keep going until at the end of a word + for i := r.idx + 1; i < len(r.buf); i++ { + if IsWordBreak(r.buf[i]) && !IsWordBreak(r.buf[i-1]) { + r.idx = i - 1 + return + } + } + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) BackEscapeWord() { + r.Refresh(func() { + if r.idx == 0 { + return + } + for i := r.idx - 1; i > 0; i-- { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.pushKill(r.buf[i:r.idx]) + r.buf = append(r.buf[:i], r.buf[r.idx:]...) + r.idx = i + return + } + } + + r.buf = r.buf[:0] + r.idx = 0 + }) +} + +func (r *RuneBuffer) Yank() { + if len(r.lastKill) == 0 { + return + } + r.Refresh(func() { + buf := make([]rune, 0, len(r.buf) + len(r.lastKill)) + buf = append(buf, r.buf[:r.idx]...) + buf = append(buf, r.lastKill...) + buf = append(buf, r.buf[r.idx:]...) + r.buf = buf + r.idx += len(r.lastKill) + }) +} + +func (r *RuneBuffer) Backspace() { + r.Refresh(func() { + if r.idx == 0 { + return + } + + r.idx-- + r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) + }) +} + +func (r *RuneBuffer) MoveToLineEnd() { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) LineCount(width int) int { + if width == -1 { + width = r.width + } + return LineCount(width, + runes.WidthAll(r.buf)+r.PromptLen()) +} + +func (r *RuneBuffer) MoveTo(ch rune, prevChar, reverse bool) (success bool) { + r.Refresh(func() { + if reverse { + for i := r.idx - 1; i >= 0; i-- { + if r.buf[i] == ch { + r.idx = i + if prevChar { + r.idx++ + } + success = true + return + } + } + return + } + for i := r.idx + 1; i < len(r.buf); i++ { + if r.buf[i] == ch { + r.idx = i + if prevChar { + r.idx-- + } + success = true + return + } + } + }) + return +} + +func (r *RuneBuffer) isInLineEdge() bool { + if isWindows { + return false + } + sp := r.getSplitByLine(r.buf) + return len(sp[len(sp)-1]) == 0 +} + +func (r *RuneBuffer) getSplitByLine(rs []rune) []string { + return SplitByLine(r.promptLen(), r.width, rs) +} + +func (r *RuneBuffer) IdxLine(width int) int { + r.Lock() + defer r.Unlock() + return r.idxLine(width) +} + +func (r *RuneBuffer) idxLine(width int) int { + if width == 0 { + return 0 + } + sp := r.getSplitByLine(r.buf[:r.idx]) + return len(sp) - 1 +} + +func (r *RuneBuffer) CursorLineCount() int { + return r.LineCount(r.width) - r.IdxLine(r.width) +} + +func (r *RuneBuffer) Refresh(f func()) { + r.Lock() + defer r.Unlock() + + if !r.interactive { + if f != nil { + f() + } + return + } + + r.clean() + if f != nil { + f() + } + r.print() +} + +func (r *RuneBuffer) SetOffset(offset string) { + r.Lock() + r.offset = offset + r.Unlock() +} + +func (r *RuneBuffer) print() { + r.w.Write(r.output()) + r.hadClean = false +} + +func (r *RuneBuffer) output() []byte { + buf := bytes.NewBuffer(nil) + buf.WriteString(string(r.prompt)) + if r.cfg.EnableMask && len(r.buf) > 0 { + buf.Write([]byte(strings.Repeat(string(r.cfg.MaskRune), len(r.buf)-1))) + if r.buf[len(r.buf)-1] == '\n' { + buf.Write([]byte{'\n'}) + } else { + buf.Write([]byte(string(r.cfg.MaskRune))) + } + if len(r.buf) > r.idx { + buf.Write(r.getBackspaceSequence()) + } + + } else { + for _, e := range r.cfg.Painter.Paint(r.buf, r.idx) { + if e == '\t' { + buf.WriteString(strings.Repeat(" ", TabWidth)) + } else { + buf.WriteRune(e) + } + } + if r.isInLineEdge() { + buf.Write([]byte(" \b")) + } + } + // cursor position + if len(r.buf) > r.idx { + buf.Write(r.getBackspaceSequence()) + } + return buf.Bytes() +} + +func (r *RuneBuffer) getBackspaceSequence() []byte { + var sep = map[int]bool{} + + var i int + for { + if i >= runes.WidthAll(r.buf) { + break + } + + if i == 0 { + i -= r.promptLen() + } + i += r.width + + sep[i] = true + } + var buf []byte + for i := len(r.buf); i > r.idx; i-- { + // move input to the left of one + buf = append(buf, '\b') + if sep[i] { + // up one line, go to the start of the line and move cursor right to the end (r.width) + buf = append(buf, "\033[A\r"+"\033["+strconv.Itoa(r.width)+"C"...) + } + } + + return buf + +} + +func (r *RuneBuffer) Reset() []rune { + ret := runes.Copy(r.buf) + r.buf = r.buf[:0] + r.idx = 0 + return ret +} + +func (r *RuneBuffer) calWidth(m int) int { + if m > 0 { + return runes.WidthAll(r.buf[r.idx : r.idx+m]) + } + return runes.WidthAll(r.buf[r.idx+m : r.idx]) +} + +func (r *RuneBuffer) SetStyle(start, end int, style string) { + if end < start { + panic("end < start") + } + + // goto start + move := start - r.idx + if move > 0 { + r.w.Write([]byte(string(r.buf[r.idx : r.idx+move]))) + } else { + r.w.Write(bytes.Repeat([]byte("\b"), r.calWidth(move))) + } + r.w.Write([]byte("\033[" + style + "m")) + r.w.Write([]byte(string(r.buf[start:end]))) + r.w.Write([]byte("\033[0m")) + // TODO: move back +} + +func (r *RuneBuffer) SetWithIdx(idx int, buf []rune) { + r.Refresh(func() { + r.buf = buf + r.idx = idx + }) +} + +func (r *RuneBuffer) Set(buf []rune) { + r.SetWithIdx(len(buf), buf) +} + +func (r *RuneBuffer) SetPrompt(prompt string) { + r.Lock() + r.prompt = []rune(prompt) + r.Unlock() +} + +func (r *RuneBuffer) cleanOutput(w io.Writer, idxLine int) { + buf := bufio.NewWriter(w) + + if r.width == 0 { + buf.WriteString(strings.Repeat("\r\b", len(r.buf)+r.promptLen())) + buf.Write([]byte("\033[J")) + } else { + buf.Write([]byte("\033[J")) // just like ^k :) + if idxLine == 0 { + buf.WriteString("\033[2K") + buf.WriteString("\r") + } else { + for i := 0; i < idxLine; i++ { + io.WriteString(buf, "\033[2K\r\033[A") + } + io.WriteString(buf, "\033[2K\r") + } + } + buf.Flush() + return +} + +func (r *RuneBuffer) Clean() { + r.Lock() + r.clean() + r.Unlock() +} + +func (r *RuneBuffer) clean() { + r.cleanWithIdxLine(r.idxLine(r.width)) +} + +func (r *RuneBuffer) cleanWithIdxLine(idxLine int) { + if r.hadClean || !r.interactive { + return + } + r.hadClean = true + r.cleanOutput(r.w, idxLine) +} diff --git a/vendor/github.com/chzyer/readline/runes.go b/vendor/github.com/chzyer/readline/runes.go new file mode 100644 index 0000000..a669bc4 --- /dev/null +++ b/vendor/github.com/chzyer/readline/runes.go @@ -0,0 +1,223 @@ +package readline + +import ( + "bytes" + "unicode" + "unicode/utf8" +) + +var runes = Runes{} +var TabWidth = 4 + +type Runes struct{} + +func (Runes) EqualRune(a, b rune, fold bool) bool { + if a == b { + return true + } + if !fold { + return false + } + if a > b { + a, b = b, a + } + if b < utf8.RuneSelf && 'A' <= a && a <= 'Z' { + if b == a+'a'-'A' { + return true + } + } + return false +} + +func (r Runes) EqualRuneFold(a, b rune) bool { + return r.EqualRune(a, b, true) +} + +func (r Runes) EqualFold(a, b []rune) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if r.EqualRuneFold(a[i], b[i]) { + continue + } + return false + } + + return true +} + +func (Runes) Equal(a, b []rune) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +func (rs Runes) IndexAllBckEx(r, sub []rune, fold bool) int { + for i := len(r) - len(sub); i >= 0; i-- { + found := true + for j := 0; j < len(sub); j++ { + if !rs.EqualRune(r[i+j], sub[j], fold) { + found = false + break + } + } + if found { + return i + } + } + return -1 +} + +// Search in runes from end to front +func (rs Runes) IndexAllBck(r, sub []rune) int { + return rs.IndexAllBckEx(r, sub, false) +} + +// Search in runes from front to end +func (rs Runes) IndexAll(r, sub []rune) int { + return rs.IndexAllEx(r, sub, false) +} + +func (rs Runes) IndexAllEx(r, sub []rune, fold bool) int { + for i := 0; i < len(r); i++ { + found := true + if len(r[i:]) < len(sub) { + return -1 + } + for j := 0; j < len(sub); j++ { + if !rs.EqualRune(r[i+j], sub[j], fold) { + found = false + break + } + } + if found { + return i + } + } + return -1 +} + +func (Runes) Index(r rune, rs []rune) int { + for i := 0; i < len(rs); i++ { + if rs[i] == r { + return i + } + } + return -1 +} + +func (Runes) ColorFilter(r []rune) []rune { + newr := make([]rune, 0, len(r)) + for pos := 0; pos < len(r); pos++ { + if r[pos] == '\033' && r[pos+1] == '[' { + idx := runes.Index('m', r[pos+2:]) + if idx == -1 { + continue + } + pos += idx + 2 + continue + } + newr = append(newr, r[pos]) + } + return newr +} + +var zeroWidth = []*unicode.RangeTable{ + unicode.Mn, + unicode.Me, + unicode.Cc, + unicode.Cf, +} + +var doubleWidth = []*unicode.RangeTable{ + unicode.Han, + unicode.Hangul, + unicode.Hiragana, + unicode.Katakana, +} + +func (Runes) Width(r rune) int { + if r == '\t' { + return TabWidth + } + if unicode.IsOneOf(zeroWidth, r) { + return 0 + } + if unicode.IsOneOf(doubleWidth, r) { + return 2 + } + return 1 +} + +func (Runes) WidthAll(r []rune) (length int) { + for i := 0; i < len(r); i++ { + length += runes.Width(r[i]) + } + return +} + +func (Runes) Backspace(r []rune) []byte { + return bytes.Repeat([]byte{'\b'}, runes.WidthAll(r)) +} + +func (Runes) Copy(r []rune) []rune { + n := make([]rune, len(r)) + copy(n, r) + return n +} + +func (Runes) HasPrefixFold(r, prefix []rune) bool { + if len(r) < len(prefix) { + return false + } + return runes.EqualFold(r[:len(prefix)], prefix) +} + +func (Runes) HasPrefix(r, prefix []rune) bool { + if len(r) < len(prefix) { + return false + } + return runes.Equal(r[:len(prefix)], prefix) +} + +func (Runes) Aggregate(candicate [][]rune) (same []rune, size int) { + for i := 0; i < len(candicate[0]); i++ { + for j := 0; j < len(candicate)-1; j++ { + if i >= len(candicate[j]) || i >= len(candicate[j+1]) { + goto aggregate + } + if candicate[j][i] != candicate[j+1][i] { + goto aggregate + } + } + size = i + 1 + } +aggregate: + if size > 0 { + same = runes.Copy(candicate[0][:size]) + for i := 0; i < len(candicate); i++ { + n := runes.Copy(candicate[i]) + copy(n, n[size:]) + candicate[i] = n[:len(n)-size] + } + } + return +} + +func (Runes) TrimSpaceLeft(in []rune) []rune { + firstIndex := len(in) + for i, r := range in { + if unicode.IsSpace(r) == false { + firstIndex = i + break + } + } + return in[firstIndex:] +} diff --git a/vendor/github.com/chzyer/readline/search.go b/vendor/github.com/chzyer/readline/search.go new file mode 100644 index 0000000..52e8ff0 --- /dev/null +++ b/vendor/github.com/chzyer/readline/search.go @@ -0,0 +1,164 @@ +package readline + +import ( + "bytes" + "container/list" + "fmt" + "io" +) + +const ( + S_STATE_FOUND = iota + S_STATE_FAILING +) + +const ( + S_DIR_BCK = iota + S_DIR_FWD +) + +type opSearch struct { + inMode bool + state int + dir int + source *list.Element + w io.Writer + buf *RuneBuffer + data []rune + history *opHistory + cfg *Config + markStart int + markEnd int + width int +} + +func newOpSearch(w io.Writer, buf *RuneBuffer, history *opHistory, cfg *Config, width int) *opSearch { + return &opSearch{ + w: w, + buf: buf, + cfg: cfg, + history: history, + width: width, + } +} + +func (o *opSearch) OnWidthChange(newWidth int) { + o.width = newWidth +} + +func (o *opSearch) IsSearchMode() bool { + return o.inMode +} + +func (o *opSearch) SearchBackspace() { + if len(o.data) > 0 { + o.data = o.data[:len(o.data)-1] + o.search(true) + } +} + +func (o *opSearch) findHistoryBy(isNewSearch bool) (int, *list.Element) { + if o.dir == S_DIR_BCK { + return o.history.FindBck(isNewSearch, o.data, o.buf.idx) + } + return o.history.FindFwd(isNewSearch, o.data, o.buf.idx) +} + +func (o *opSearch) search(isChange bool) bool { + if len(o.data) == 0 { + o.state = S_STATE_FOUND + o.SearchRefresh(-1) + return true + } + idx, elem := o.findHistoryBy(isChange) + if elem == nil { + o.SearchRefresh(-2) + return false + } + o.history.current = elem + + item := o.history.showItem(o.history.current.Value) + start, end := 0, 0 + if o.dir == S_DIR_BCK { + start, end = idx, idx+len(o.data) + } else { + start, end = idx, idx+len(o.data) + idx += len(o.data) + } + o.buf.SetWithIdx(idx, item) + o.markStart, o.markEnd = start, end + o.SearchRefresh(idx) + return true +} + +func (o *opSearch) SearchChar(r rune) { + o.data = append(o.data, r) + o.search(true) +} + +func (o *opSearch) SearchMode(dir int) bool { + if o.width == 0 { + return false + } + alreadyInMode := o.inMode + o.inMode = true + o.dir = dir + o.source = o.history.current + if alreadyInMode { + o.search(false) + } else { + o.SearchRefresh(-1) + } + return true +} + +func (o *opSearch) ExitSearchMode(revert bool) { + if revert { + o.history.current = o.source + o.buf.Set(o.history.showItem(o.history.current.Value)) + } + o.markStart, o.markEnd = 0, 0 + o.state = S_STATE_FOUND + o.inMode = false + o.source = nil + o.data = nil +} + +func (o *opSearch) SearchRefresh(x int) { + if x == -2 { + o.state = S_STATE_FAILING + } else if x >= 0 { + o.state = S_STATE_FOUND + } + if x < 0 { + x = o.buf.idx + } + x = o.buf.CurrentWidth(x) + x += o.buf.PromptLen() + x = x % o.width + + if o.markStart > 0 { + o.buf.SetStyle(o.markStart, o.markEnd, "4") + } + + lineCnt := o.buf.CursorLineCount() + buf := bytes.NewBuffer(nil) + buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) + buf.WriteString("\033[J") + if o.state == S_STATE_FAILING { + buf.WriteString("failing ") + } + if o.dir == S_DIR_BCK { + buf.WriteString("bck") + } else if o.dir == S_DIR_FWD { + buf.WriteString("fwd") + } + buf.WriteString("-i-search: ") + buf.WriteString(string(o.data)) // keyword + buf.WriteString("\033[4m \033[0m") // _ + fmt.Fprintf(buf, "\r\033[%dA", lineCnt) // move prev + if x > 0 { + fmt.Fprintf(buf, "\033[%dC", x) // move forward + } + o.w.Write(buf.Bytes()) +} diff --git a/vendor/github.com/chzyer/readline/std.go b/vendor/github.com/chzyer/readline/std.go new file mode 100644 index 0000000..61d44b7 --- /dev/null +++ b/vendor/github.com/chzyer/readline/std.go @@ -0,0 +1,197 @@ +package readline + +import ( + "io" + "os" + "sync" + "sync/atomic" +) + +var ( + Stdin io.ReadCloser = os.Stdin + Stdout io.WriteCloser = os.Stdout + Stderr io.WriteCloser = os.Stderr +) + +var ( + std *Instance + stdOnce sync.Once +) + +// global instance will not submit history automatic +func getInstance() *Instance { + stdOnce.Do(func() { + std, _ = NewEx(&Config{ + DisableAutoSaveHistory: true, + }) + }) + return std +} + +// let readline load history from filepath +// and try to persist history into disk +// set fp to "" to prevent readline persisting history to disk +// so the `AddHistory` will return nil error forever. +func SetHistoryPath(fp string) { + ins := getInstance() + cfg := ins.Config.Clone() + cfg.HistoryFile = fp + ins.SetConfig(cfg) +} + +// set auto completer to global instance +func SetAutoComplete(completer AutoCompleter) { + ins := getInstance() + cfg := ins.Config.Clone() + cfg.AutoComplete = completer + ins.SetConfig(cfg) +} + +// add history to global instance manually +// raise error only if `SetHistoryPath` is set with a non-empty path +func AddHistory(content string) error { + ins := getInstance() + return ins.SaveHistory(content) +} + +func Password(prompt string) ([]byte, error) { + ins := getInstance() + return ins.ReadPassword(prompt) +} + +// readline with global configs +func Line(prompt string) (string, error) { + ins := getInstance() + ins.SetPrompt(prompt) + return ins.Readline() +} + +type CancelableStdin struct { + r io.Reader + mutex sync.Mutex + stop chan struct{} + closed int32 + notify chan struct{} + data []byte + read int + err error +} + +func NewCancelableStdin(r io.Reader) *CancelableStdin { + c := &CancelableStdin{ + r: r, + notify: make(chan struct{}), + stop: make(chan struct{}), + } + go c.ioloop() + return c +} + +func (c *CancelableStdin) ioloop() { +loop: + for { + select { + case <-c.notify: + c.read, c.err = c.r.Read(c.data) + select { + case c.notify <- struct{}{}: + case <-c.stop: + break loop + } + case <-c.stop: + break loop + } + } +} + +func (c *CancelableStdin) Read(b []byte) (n int, err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if atomic.LoadInt32(&c.closed) == 1 { + return 0, io.EOF + } + + c.data = b + select { + case c.notify <- struct{}{}: + case <-c.stop: + return 0, io.EOF + } + select { + case <-c.notify: + return c.read, c.err + case <-c.stop: + return 0, io.EOF + } +} + +func (c *CancelableStdin) Close() error { + if atomic.CompareAndSwapInt32(&c.closed, 0, 1) { + close(c.stop) + } + return nil +} + +// FillableStdin is a stdin reader which can prepend some data before +// reading into the real stdin +type FillableStdin struct { + sync.Mutex + stdin io.Reader + stdinBuffer io.ReadCloser + buf []byte + bufErr error +} + +// NewFillableStdin gives you FillableStdin +func NewFillableStdin(stdin io.Reader) (io.ReadCloser, io.Writer) { + r, w := io.Pipe() + s := &FillableStdin{ + stdinBuffer: r, + stdin: stdin, + } + s.ioloop() + return s, w +} + +func (s *FillableStdin) ioloop() { + go func() { + for { + bufR := make([]byte, 100) + var n int + n, s.bufErr = s.stdinBuffer.Read(bufR) + if s.bufErr != nil { + if s.bufErr == io.ErrClosedPipe { + break + } + } + s.Lock() + s.buf = append(s.buf, bufR[:n]...) + s.Unlock() + } + }() +} + +// Read will read from the local buffer and if no data, read from stdin +func (s *FillableStdin) Read(p []byte) (n int, err error) { + s.Lock() + i := len(s.buf) + if len(p) < i { + i = len(p) + } + if i > 0 { + n := copy(p, s.buf) + s.buf = s.buf[:0] + cerr := s.bufErr + s.bufErr = nil + s.Unlock() + return n, cerr + } + s.Unlock() + n, err = s.stdin.Read(p) + return n, err +} + +func (s *FillableStdin) Close() error { + s.stdinBuffer.Close() + return nil +} diff --git a/vendor/github.com/chzyer/readline/std_windows.go b/vendor/github.com/chzyer/readline/std_windows.go new file mode 100644 index 0000000..b10f91b --- /dev/null +++ b/vendor/github.com/chzyer/readline/std_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package readline + +func init() { + Stdin = NewRawReader() + Stdout = NewANSIWriter(Stdout) + Stderr = NewANSIWriter(Stderr) +} diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go new file mode 100644 index 0000000..133993c --- /dev/null +++ b/vendor/github.com/chzyer/readline/term.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package readline + +import ( + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := getTermios(fd) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var oldState State + + if termios, err := getTermios(fd); err != nil { + return nil, err + } else { + oldState.termios = *termios + } + + newState := oldState.termios + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON + // newState.Oflag &^= syscall.OPOST + newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN + newState.Cflag &^= syscall.CSIZE | syscall.PARENB + newState.Cflag |= syscall.CS8 + + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + return &oldState, setTermios(fd, &newState) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := getTermios(fd) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func restoreTerm(fd int, state *State) error { + return setTermios(fd, &state.termios) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + oldState, err := getTermios(fd) + if err != nil { + return nil, err + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if err := setTermios(fd, newState); err != nil { + return nil, err + } + + defer func() { + setTermios(fd, oldState) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/github.com/chzyer/readline/term_bsd.go b/vendor/github.com/chzyer/readline/term_bsd.go new file mode 100644 index 0000000..68b56ea --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package readline + +import ( + "syscall" + "unsafe" +) + +func getTermios(fd int) (*Termios, error) { + termios := new(Termios) + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return nil, err + } + return termios, nil +} + +func setTermios(fd int, termios *Termios) error { + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/chzyer/readline/term_linux.go b/vendor/github.com/chzyer/readline/term_linux.go new file mode 100644 index 0000000..e3392b4 --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_linux.go @@ -0,0 +1,33 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package readline + +import ( + "syscall" + "unsafe" +) + +// These constants are declared here, rather than importing +// them from the syscall package as some syscall packages, even +// on linux, for example gccgo, do not declare them. +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS + +func getTermios(fd int) (*Termios, error) { + termios := new(Termios) + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return nil, err + } + return termios, nil +} + +func setTermios(fd int, termios *Termios) error { + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_solaris.go new file mode 100644 index 0000000..4c27273 --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_solaris.go @@ -0,0 +1,32 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package readline + +import "golang.org/x/sys/unix" + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (int, int, error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, 0, err + } + return int(ws.Col), int(ws.Row), nil +} + +type Termios unix.Termios + +func getTermios(fd int) (*Termios, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + return (*Termios)(termios), nil +} + +func setTermios(fd int, termios *Termios) error { + return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios)) +} diff --git a/vendor/github.com/chzyer/readline/term_unix.go b/vendor/github.com/chzyer/readline/term_unix.go new file mode 100644 index 0000000..d3ea242 --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_unix.go @@ -0,0 +1,24 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +package readline + +import ( + "syscall" + "unsafe" +) + +type Termios syscall.Termios + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (int, int, error) { + var dimensions [4]uint16 + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0) + if err != 0 { + return 0, 0, err + } + return int(dimensions[1]), int(dimensions[0]), nil +} diff --git a/vendor/github.com/chzyer/readline/term_windows.go b/vendor/github.com/chzyer/readline/term_windows.go new file mode 100644 index 0000000..1290e00 --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_windows.go @@ -0,0 +1,171 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package readline + +import ( + "io" + "syscall" + "unsafe" +) + +const ( + enableLineInput = 2 + enableEchoInput = 4 + enableProcessedInput = 1 + enableWindowInput = 8 + enableMouseInput = 16 + enableInsertMode = 32 + enableQuickEditMode = 64 + enableExtendedFlags = 128 + enableAutoPosition = 256 + enableProcessedOutput = 1 + enableWrapAtEolOutput = 2 +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") +) + +type ( + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func restoreTerm(fd int, state *State) error { + _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + old := st + + st &^= (enableEchoInput) + st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + + defer func() { + syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(syscall.Handle(fd), buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + if n > 0 && buf[n-1] == '\r' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go new file mode 100644 index 0000000..1078631 --- /dev/null +++ b/vendor/github.com/chzyer/readline/terminal.go @@ -0,0 +1,238 @@ +package readline + +import ( + "bufio" + "fmt" + "io" + "strings" + "sync" + "sync/atomic" +) + +type Terminal struct { + m sync.Mutex + cfg *Config + outchan chan rune + closed int32 + stopChan chan struct{} + kickChan chan struct{} + wg sync.WaitGroup + isReading int32 + sleeping int32 + + sizeChan chan string +} + +func NewTerminal(cfg *Config) (*Terminal, error) { + if err := cfg.Init(); err != nil { + return nil, err + } + t := &Terminal{ + cfg: cfg, + kickChan: make(chan struct{}, 1), + outchan: make(chan rune), + stopChan: make(chan struct{}, 1), + sizeChan: make(chan string, 1), + } + + go t.ioloop() + return t, nil +} + +// SleepToResume will sleep myself, and return only if I'm resumed. +func (t *Terminal) SleepToResume() { + if !atomic.CompareAndSwapInt32(&t.sleeping, 0, 1) { + return + } + defer atomic.StoreInt32(&t.sleeping, 0) + + t.ExitRawMode() + ch := WaitForResume() + SuspendMe() + <-ch + t.EnterRawMode() +} + +func (t *Terminal) EnterRawMode() (err error) { + return t.cfg.FuncMakeRaw() +} + +func (t *Terminal) ExitRawMode() (err error) { + return t.cfg.FuncExitRaw() +} + +func (t *Terminal) Write(b []byte) (int, error) { + return t.cfg.Stdout.Write(b) +} + +// WriteStdin prefill the next Stdin fetch +// Next time you call ReadLine() this value will be writen before the user input +func (t *Terminal) WriteStdin(b []byte) (int, error) { + return t.cfg.StdinWriter.Write(b) +} + +type termSize struct { + left int + top int +} + +func (t *Terminal) GetOffset(f func(offset string)) { + go func() { + f(<-t.sizeChan) + }() + t.Write([]byte("\033[6n")) +} + +func (t *Terminal) Print(s string) { + fmt.Fprintf(t.cfg.Stdout, "%s", s) +} + +func (t *Terminal) PrintRune(r rune) { + fmt.Fprintf(t.cfg.Stdout, "%c", r) +} + +func (t *Terminal) Readline() *Operation { + return NewOperation(t, t.cfg) +} + +// return rune(0) if meet EOF +func (t *Terminal) ReadRune() rune { + ch, ok := <-t.outchan + if !ok { + return rune(0) + } + return ch +} + +func (t *Terminal) IsReading() bool { + return atomic.LoadInt32(&t.isReading) == 1 +} + +func (t *Terminal) KickRead() { + select { + case t.kickChan <- struct{}{}: + default: + } +} + +func (t *Terminal) ioloop() { + t.wg.Add(1) + defer func() { + t.wg.Done() + close(t.outchan) + }() + + var ( + isEscape bool + isEscapeEx bool + expectNextChar bool + ) + + buf := bufio.NewReader(t.getStdin()) + for { + if !expectNextChar { + atomic.StoreInt32(&t.isReading, 0) + select { + case <-t.kickChan: + atomic.StoreInt32(&t.isReading, 1) + case <-t.stopChan: + return + } + } + expectNextChar = false + r, _, err := buf.ReadRune() + if err != nil { + if strings.Contains(err.Error(), "interrupted system call") { + expectNextChar = true + continue + } + break + } + + if isEscape { + isEscape = false + if r == CharEscapeEx { + expectNextChar = true + isEscapeEx = true + continue + } + r = escapeKey(r, buf) + } else if isEscapeEx { + isEscapeEx = false + if key := readEscKey(r, buf); key != nil { + r = escapeExKey(key) + // offset + if key.typ == 'R' { + if _, _, ok := key.Get2(); ok { + select { + case t.sizeChan <- key.attr: + default: + } + } + expectNextChar = true + continue + } + } + if r == 0 { + expectNextChar = true + continue + } + } + + expectNextChar = true + switch r { + case CharEsc: + if t.cfg.VimMode { + t.outchan <- r + break + } + isEscape = true + case CharInterrupt, CharEnter, CharCtrlJ, CharDelete: + expectNextChar = false + fallthrough + default: + t.outchan <- r + } + } + +} + +func (t *Terminal) Bell() { + fmt.Fprintf(t, "%c", CharBell) +} + +func (t *Terminal) Close() error { + if atomic.SwapInt32(&t.closed, 1) != 0 { + return nil + } + if closer, ok := t.cfg.Stdin.(io.Closer); ok { + closer.Close() + } + close(t.stopChan) + t.wg.Wait() + return t.ExitRawMode() +} + +func (t *Terminal) GetConfig() *Config { + t.m.Lock() + cfg := *t.cfg + t.m.Unlock() + return &cfg +} + +func (t *Terminal) getStdin() io.Reader { + t.m.Lock() + r := t.cfg.Stdin + t.m.Unlock() + return r +} + +func (t *Terminal) SetConfig(c *Config) error { + if err := c.Init(); err != nil { + return err + } + t.m.Lock() + t.cfg = c + t.m.Unlock() + return nil +} diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go new file mode 100644 index 0000000..af4e005 --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils.go @@ -0,0 +1,277 @@ +package readline + +import ( + "bufio" + "bytes" + "container/list" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + "unicode" +) + +var ( + isWindows = false +) + +const ( + CharLineStart = 1 + CharBackward = 2 + CharInterrupt = 3 + CharDelete = 4 + CharLineEnd = 5 + CharForward = 6 + CharBell = 7 + CharCtrlH = 8 + CharTab = 9 + CharCtrlJ = 10 + CharKill = 11 + CharCtrlL = 12 + CharEnter = 13 + CharNext = 14 + CharPrev = 16 + CharBckSearch = 18 + CharFwdSearch = 19 + CharTranspose = 20 + CharCtrlU = 21 + CharCtrlW = 23 + CharCtrlY = 25 + CharCtrlZ = 26 + CharEsc = 27 + CharEscapeEx = 91 + CharBackspace = 127 +) + +const ( + MetaBackward rune = -iota - 1 + MetaForward + MetaDelete + MetaBackspace + MetaTranspose +) + +// WaitForResume need to call before current process got suspend. +// It will run a ticker until a long duration is occurs, +// which means this process is resumed. +func WaitForResume() chan struct{} { + ch := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func() { + ticker := time.NewTicker(10 * time.Millisecond) + t := time.Now() + wg.Done() + for { + now := <-ticker.C + if now.Sub(t) > 100*time.Millisecond { + break + } + t = now + } + ticker.Stop() + ch <- struct{}{} + }() + wg.Wait() + return ch +} + +func Restore(fd int, state *State) error { + err := restoreTerm(fd, state) + if err != nil { + // errno 0 means everything is ok :) + if err.Error() == "errno 0" { + return nil + } else { + return err + } + } + return nil +} + +func IsPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// translate Esc[X +func escapeExKey(key *escapeKeyPair) rune { + var r rune + switch key.typ { + case 'D': + r = CharBackward + case 'C': + r = CharForward + case 'A': + r = CharPrev + case 'B': + r = CharNext + case 'H': + r = CharLineStart + case 'F': + r = CharLineEnd + case '~': + if key.attr == "3" { + r = CharDelete + } + default: + } + return r +} + +type escapeKeyPair struct { + attr string + typ rune +} + +func (e *escapeKeyPair) Get2() (int, int, bool) { + sp := strings.Split(e.attr, ";") + if len(sp) < 2 { + return -1, -1, false + } + s1, err := strconv.Atoi(sp[0]) + if err != nil { + return -1, -1, false + } + s2, err := strconv.Atoi(sp[1]) + if err != nil { + return -1, -1, false + } + return s1, s2, true +} + +func readEscKey(r rune, reader *bufio.Reader) *escapeKeyPair { + p := escapeKeyPair{} + buf := bytes.NewBuffer(nil) + for { + if r == ';' { + } else if unicode.IsNumber(r) { + } else { + p.typ = r + break + } + buf.WriteRune(r) + r, _, _ = reader.ReadRune() + } + p.attr = buf.String() + return &p +} + +// translate EscX to Meta+X +func escapeKey(r rune, reader *bufio.Reader) rune { + switch r { + case 'b': + r = MetaBackward + case 'f': + r = MetaForward + case 'd': + r = MetaDelete + case CharTranspose: + r = MetaTranspose + case CharBackspace: + r = MetaBackspace + case 'O': + d, _, _ := reader.ReadRune() + switch d { + case 'H': + r = CharLineStart + case 'F': + r = CharLineEnd + default: + reader.UnreadRune() + } + case CharEsc: + + } + return r +} + +func SplitByLine(start, screenWidth int, rs []rune) []string { + var ret []string + buf := bytes.NewBuffer(nil) + currentWidth := start + for _, r := range rs { + w := runes.Width(r) + currentWidth += w + buf.WriteRune(r) + if currentWidth >= screenWidth { + ret = append(ret, buf.String()) + buf.Reset() + currentWidth = 0 + } + } + ret = append(ret, buf.String()) + return ret +} + +// calculate how many lines for N character +func LineCount(screenWidth, w int) int { + r := w / screenWidth + if w%screenWidth != 0 { + r++ + } + return r +} + +func IsWordBreak(i rune) bool { + switch { + case i >= 'a' && i <= 'z': + case i >= 'A' && i <= 'Z': + case i >= '0' && i <= '9': + default: + return true + } + return false +} + +func GetInt(s []string, def int) int { + if len(s) == 0 { + return def + } + c, err := strconv.Atoi(s[0]) + if err != nil { + return def + } + return c +} + +type RawMode struct { + state *State +} + +func (r *RawMode) Enter() (err error) { + r.state, err = MakeRaw(GetStdin()) + return err +} + +func (r *RawMode) Exit() error { + if r.state == nil { + return nil + } + return Restore(GetStdin(), r.state) +} + +// ----------------------------------------------------------------------------- + +func sleep(n int) { + Debug(n) + time.Sleep(2000 * time.Millisecond) +} + +// print a linked list to Debug() +func debugList(l *list.List) { + idx := 0 + for e := l.Front(); e != nil; e = e.Next() { + Debug(idx, fmt.Sprintf("%+v", e.Value)) + idx++ + } +} + +// append log info to another file +func Debug(o ...interface{}) { + f, _ := os.OpenFile("debug.tmp", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + fmt.Fprintln(f, o...) + f.Close() +} diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go new file mode 100644 index 0000000..f88dac9 --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils_unix.go @@ -0,0 +1,83 @@ +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris + +package readline + +import ( + "io" + "os" + "os/signal" + "sync" + "syscall" +) + +type winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +// SuspendMe use to send suspend signal to myself, when we in the raw mode. +// For OSX it need to send to parent's pid +// For Linux it need to send to myself +func SuspendMe() { + p, _ := os.FindProcess(os.Getppid()) + p.Signal(syscall.SIGTSTP) + p, _ = os.FindProcess(os.Getpid()) + p.Signal(syscall.SIGTSTP) +} + +// get width of the terminal +func getWidth(stdoutFd int) int { + cols, _, err := GetSize(stdoutFd) + if err != nil { + return -1 + } + return cols +} + +func GetScreenWidth() int { + w := getWidth(syscall.Stdout) + if w < 0 { + w = getWidth(syscall.Stderr) + } + return w +} + +// ClearScreen clears the console screen +func ClearScreen(w io.Writer) (int, error) { + return w.Write([]byte("\033[H")) +} + +func DefaultIsTerminal() bool { + return IsTerminal(syscall.Stdin) && (IsTerminal(syscall.Stdout) || IsTerminal(syscall.Stderr)) +} + +func GetStdin() int { + return syscall.Stdin +} + +// ----------------------------------------------------------------------------- + +var ( + widthChange sync.Once + widthChangeCallback func() +) + +func DefaultOnWidthChanged(f func()) { + widthChangeCallback = f + widthChange.Do(func() { + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGWINCH) + + go func() { + for { + _, ok := <-ch + if !ok { + break + } + widthChangeCallback() + } + }() + }) +} diff --git a/vendor/github.com/chzyer/readline/utils_windows.go b/vendor/github.com/chzyer/readline/utils_windows.go new file mode 100644 index 0000000..5bfa55d --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package readline + +import ( + "io" + "syscall" +) + +func SuspendMe() { +} + +func GetStdin() int { + return int(syscall.Stdin) +} + +func init() { + isWindows = true +} + +// get width of the terminal +func GetScreenWidth() int { + info, _ := GetConsoleScreenBufferInfo() + if info == nil { + return -1 + } + return int(info.dwSize.x) +} + +// ClearScreen clears the console screen +func ClearScreen(_ io.Writer) error { + return SetConsoleCursorPosition(&_COORD{0, 0}) +} + +func DefaultIsTerminal() bool { + return true +} + +func DefaultOnWidthChanged(func()) { + +} diff --git a/vendor/github.com/chzyer/readline/vim.go b/vendor/github.com/chzyer/readline/vim.go new file mode 100644 index 0000000..bedf2c1 --- /dev/null +++ b/vendor/github.com/chzyer/readline/vim.go @@ -0,0 +1,176 @@ +package readline + +const ( + VIM_NORMAL = iota + VIM_INSERT + VIM_VISUAL +) + +type opVim struct { + cfg *Config + op *Operation + vimMode int +} + +func newVimMode(op *Operation) *opVim { + ov := &opVim{ + cfg: op.cfg, + op: op, + } + ov.SetVimMode(ov.cfg.VimMode) + return ov +} + +func (o *opVim) SetVimMode(on bool) { + if o.cfg.VimMode && !on { // turn off + o.ExitVimMode() + } + o.cfg.VimMode = on + o.vimMode = VIM_INSERT +} + +func (o *opVim) ExitVimMode() { + o.vimMode = VIM_INSERT +} + +func (o *opVim) IsEnableVimMode() bool { + return o.cfg.VimMode +} + +func (o *opVim) handleVimNormalMovement(r rune, readNext func() rune) (t rune, handled bool) { + rb := o.op.buf + handled = true + switch r { + case 'h': + t = CharBackward + case 'j': + t = CharNext + case 'k': + t = CharPrev + case 'l': + t = CharForward + case '0', '^': + rb.MoveToLineStart() + case '$': + rb.MoveToLineEnd() + case 'x': + rb.Delete() + if rb.IsCursorInEnd() { + rb.MoveBackward() + } + case 'r': + rb.Replace(readNext()) + case 'd': + next := readNext() + switch next { + case 'd': + rb.Erase() + case 'w': + rb.DeleteWord() + case 'h': + rb.Backspace() + case 'l': + rb.Delete() + } + case 'p': + rb.Yank() + case 'b', 'B': + rb.MoveToPrevWord() + case 'w', 'W': + rb.MoveToNextWord() + case 'e', 'E': + rb.MoveToEndWord() + case 'f', 'F', 't', 'T': + next := readNext() + prevChar := r == 't' || r == 'T' + reverse := r == 'F' || r == 'T' + switch next { + case CharEsc: + default: + rb.MoveTo(next, prevChar, reverse) + } + default: + return r, false + } + return t, true +} + +func (o *opVim) handleVimNormalEnterInsert(r rune, readNext func() rune) (t rune, handled bool) { + rb := o.op.buf + handled = true + switch r { + case 'i': + case 'I': + rb.MoveToLineStart() + case 'a': + rb.MoveForward() + case 'A': + rb.MoveToLineEnd() + case 's': + rb.Delete() + case 'S': + rb.Erase() + case 'c': + next := readNext() + switch next { + case 'c': + rb.Erase() + case 'w': + rb.DeleteWord() + case 'h': + rb.Backspace() + case 'l': + rb.Delete() + } + default: + return r, false + } + + o.EnterVimInsertMode() + return +} + +func (o *opVim) HandleVimNormal(r rune, readNext func() rune) (t rune) { + switch r { + case CharEnter, CharInterrupt: + o.ExitVimMode() + return r + } + + if r, handled := o.handleVimNormalMovement(r, readNext); handled { + return r + } + + if r, handled := o.handleVimNormalEnterInsert(r, readNext); handled { + return r + } + + // invalid operation + o.op.t.Bell() + return 0 +} + +func (o *opVim) EnterVimInsertMode() { + o.vimMode = VIM_INSERT +} + +func (o *opVim) ExitVimInsertMode() { + o.vimMode = VIM_NORMAL +} + +func (o *opVim) HandleVim(r rune, readNext func() rune) rune { + if o.vimMode == VIM_NORMAL { + return o.HandleVimNormal(r, readNext) + } + if r == CharEsc { + o.ExitVimInsertMode() + return 0 + } + + switch o.vimMode { + case VIM_INSERT: + return r + case VIM_VISUAL: + } + return r +} diff --git a/vendor/github.com/chzyer/readline/windows_api.go b/vendor/github.com/chzyer/readline/windows_api.go new file mode 100644 index 0000000..63f4f7b --- /dev/null +++ b/vendor/github.com/chzyer/readline/windows_api.go @@ -0,0 +1,152 @@ +// +build windows + +package readline + +import ( + "reflect" + "syscall" + "unsafe" +) + +var ( + kernel = NewKernel() + stdout = uintptr(syscall.Stdout) + stdin = uintptr(syscall.Stdin) +) + +type Kernel struct { + SetConsoleCursorPosition, + SetConsoleTextAttribute, + FillConsoleOutputCharacterW, + FillConsoleOutputAttribute, + ReadConsoleInputW, + GetConsoleScreenBufferInfo, + GetConsoleCursorInfo, + GetStdHandle CallFunc +} + +type short int16 +type word uint16 +type dword uint32 +type wchar uint16 + +type _COORD struct { + x short + y short +} + +func (c *_COORD) ptr() uintptr { + return uintptr(*(*int32)(unsafe.Pointer(c))) +} + +const ( + EVENT_KEY = 0x0001 + EVENT_MOUSE = 0x0002 + EVENT_WINDOW_BUFFER_SIZE = 0x0004 + EVENT_MENU = 0x0008 + EVENT_FOCUS = 0x0010 +) + +type _KEY_EVENT_RECORD struct { + bKeyDown int32 + wRepeatCount word + wVirtualKeyCode word + wVirtualScanCode word + unicodeChar wchar + dwControlKeyState dword +} + +// KEY_EVENT_RECORD KeyEvent; +// MOUSE_EVENT_RECORD MouseEvent; +// WINDOW_BUFFER_SIZE_RECORD WindowBufferSizeEvent; +// MENU_EVENT_RECORD MenuEvent; +// FOCUS_EVENT_RECORD FocusEvent; +type _INPUT_RECORD struct { + EventType word + Padding uint16 + Event [16]byte +} + +type _CONSOLE_SCREEN_BUFFER_INFO struct { + dwSize _COORD + dwCursorPosition _COORD + wAttributes word + srWindow _SMALL_RECT + dwMaximumWindowSize _COORD +} + +type _SMALL_RECT struct { + left short + top short + right short + bottom short +} + +type _CONSOLE_CURSOR_INFO struct { + dwSize dword + bVisible bool +} + +type CallFunc func(u ...uintptr) error + +func NewKernel() *Kernel { + k := &Kernel{} + kernel32 := syscall.NewLazyDLL("kernel32.dll") + v := reflect.ValueOf(k).Elem() + t := v.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + f := kernel32.NewProc(name) + v.Field(i).Set(reflect.ValueOf(k.Wrap(f))) + } + return k +} + +func (k *Kernel) Wrap(p *syscall.LazyProc) CallFunc { + return func(args ...uintptr) error { + var r0 uintptr + var e1 syscall.Errno + size := uintptr(len(args)) + if len(args) <= 3 { + buf := make([]uintptr, 3) + copy(buf, args) + r0, _, e1 = syscall.Syscall(p.Addr(), size, + buf[0], buf[1], buf[2]) + } else { + buf := make([]uintptr, 6) + copy(buf, args) + r0, _, e1 = syscall.Syscall6(p.Addr(), size, + buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], + ) + } + + if int(r0) == 0 { + if e1 != 0 { + return error(e1) + } else { + return syscall.EINVAL + } + } + return nil + } + +} + +func GetConsoleScreenBufferInfo() (*_CONSOLE_SCREEN_BUFFER_INFO, error) { + t := new(_CONSOLE_SCREEN_BUFFER_INFO) + err := kernel.GetConsoleScreenBufferInfo( + stdout, + uintptr(unsafe.Pointer(t)), + ) + return t, err +} + +func GetConsoleCursorInfo() (*_CONSOLE_CURSOR_INFO, error) { + t := new(_CONSOLE_CURSOR_INFO) + err := kernel.GetConsoleCursorInfo(stdout, uintptr(unsafe.Pointer(t))) + return t, err +} + +func SetConsoleCursorPosition(c *_COORD) error { + return kernel.SetConsoleCursorPosition(stdout, c.ptr()) +} diff --git a/vendor/github.com/coreos/go-systemd/v22/LICENSE b/vendor/github.com/coreos/go-systemd/v22/LICENSE new file mode 100644 index 0000000..37ec93a --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/v22/NOTICE b/vendor/github.com/coreos/go-systemd/v22/NOTICE new file mode 100644 index 0000000..23a0ada --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go new file mode 100644 index 0000000..ba4ae31 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/daemon/sdnotify.go @@ -0,0 +1,84 @@ +// Copyright 2014 Docker, Inc. +// Copyright 2015-2018 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package daemon provides a Go implementation of the sd_notify protocol. +// It can be used to inform systemd of service start-up completion, watchdog +// events, and other status changes. +// +// https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description +package daemon + +import ( + "net" + "os" +) + +const ( + // SdNotifyReady tells the service manager that service startup is finished + // or the service finished loading its configuration. + SdNotifyReady = "READY=1" + + // SdNotifyStopping tells the service manager that the service is beginning + // its shutdown. + SdNotifyStopping = "STOPPING=1" + + // SdNotifyReloading tells the service manager that this service is + // reloading its configuration. Note that you must call SdNotifyReady when + // it completed reloading. + SdNotifyReloading = "RELOADING=1" + + // SdNotifyWatchdog tells the service manager to update the watchdog + // timestamp for the service. + SdNotifyWatchdog = "WATCHDOG=1" +) + +// SdNotify sends a message to the init daemon. It is common to ignore the error. +// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET` +// will be unconditionally unset. +// +// It returns one of the following: +// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset) +// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data) +// (true, nil) - notification supported, data has been sent +func SdNotify(unsetEnvironment bool, state string) (bool, error) { + socketAddr := &net.UnixAddr{ + Name: os.Getenv("NOTIFY_SOCKET"), + Net: "unixgram", + } + + // NOTIFY_SOCKET not set + if socketAddr.Name == "" { + return false, nil + } + + if unsetEnvironment { + if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil { + return false, err + } + } + + conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) + // Error connecting to NOTIFY_SOCKET + if err != nil { + return false, err + } + defer conn.Close() + + if _, err = conn.Write([]byte(state)); err != nil { + return false, err + } + return true, nil +} diff --git a/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go b/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go new file mode 100644 index 0000000..7a0e0d3 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/v22/daemon/watchdog.go @@ -0,0 +1,73 @@ +// Copyright 2016 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package daemon + +import ( + "fmt" + "os" + "strconv" + "time" +) + +// SdWatchdogEnabled returns watchdog information for a service. +// Processes should call daemon.SdNotify(false, daemon.SdNotifyWatchdog) every +// time / 2. +// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC` and +// `WATCHDOG_PID` will be unconditionally unset. +// +// It returns one of the following: +// (0, nil) - watchdog isn't enabled or we aren't the watched PID. +// (0, err) - an error happened (e.g. error converting time). +// (time, nil) - watchdog is enabled and we can send ping. +// time is delay before inactive service will be killed. +func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) { + wusec := os.Getenv("WATCHDOG_USEC") + wpid := os.Getenv("WATCHDOG_PID") + if unsetEnvironment { + wusecErr := os.Unsetenv("WATCHDOG_USEC") + wpidErr := os.Unsetenv("WATCHDOG_PID") + if wusecErr != nil { + return 0, wusecErr + } + if wpidErr != nil { + return 0, wpidErr + } + } + + if wusec == "" { + return 0, nil + } + s, err := strconv.Atoi(wusec) + if err != nil { + return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err) + } + if s <= 0 { + return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number") + } + interval := time.Duration(s) * time.Microsecond + + if wpid == "" { + return interval, nil + } + p, err := strconv.Atoi(wpid) + if err != nil { + return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err) + } + if os.Getpid() != p { + return 0, nil + } + + return interval, nil +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md new file mode 100644 index 0000000..1cade6c --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Brian Goff + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go new file mode 100644 index 0000000..b480056 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -0,0 +1,14 @@ +package md2man + +import ( + "github.com/russross/blackfriday/v2" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := NewRoffRenderer() + + return blackfriday.Run(doc, + []blackfriday.Option{blackfriday.WithRenderer(renderer), + blackfriday.WithExtensions(renderer.GetExtensions())}...) +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go new file mode 100644 index 0000000..0668a66 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -0,0 +1,345 @@ +package md2man + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +// roffRenderer implements the blackfriday.Renderer interface for creating +// roff format (manpages) from markdown text +type roffRenderer struct { + extensions blackfriday.Extensions + listCounters []int + firstHeader bool + defineTerm bool + listDepth int +} + +const ( + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB\\fC" + codespanCloseTag = "\\fR" + codeTag = "\n.PP\n.RS\n\n.nf\n" + codeCloseTag = "\n.fi\n.RE\n" + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = "\n.RE\n" + arglistTag = "\n.TP\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" +) + +// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func NewRoffRenderer() *roffRenderer { // nolint: golint + var extensions blackfriday.Extensions + + extensions |= blackfriday.NoIntraEmphasis + extensions |= blackfriday.Tables + extensions |= blackfriday.FencedCode + extensions |= blackfriday.SpaceHeadings + extensions |= blackfriday.Footnotes + extensions |= blackfriday.Titleblock + extensions |= blackfriday.DefinitionLists + return &roffRenderer{ + extensions: extensions, + } +} + +// GetExtensions returns the list of extensions used by this renderer implementation +func (r *roffRenderer) GetExtensions() blackfriday.Extensions { + return r.extensions +} + +// RenderHeader handles outputting the header at document start +func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // disable hyphenation + out(w, ".nh\n") +} + +// RenderFooter handles outputting the footer at the document end; the roff +// renderer has no footer information +func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { +} + +// RenderNode is called for each node in a markdown document; based on the node +// type the equivalent roff output is sent to the writer +func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + + var walkAction = blackfriday.GoToNext + + switch node.Type { + case blackfriday.Text: + r.handleText(w, node, entering) + case blackfriday.Softbreak: + out(w, crTag) + case blackfriday.Hardbreak: + out(w, breakTag) + case blackfriday.Emph: + if entering { + out(w, emphTag) + } else { + out(w, emphCloseTag) + } + case blackfriday.Strong: + if entering { + out(w, strongTag) + } else { + out(w, strongCloseTag) + } + case blackfriday.Link: + if !entering { + out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) + } + case blackfriday.Image: + // ignore images + walkAction = blackfriday.SkipChildren + case blackfriday.Code: + out(w, codespanTag) + escapeSpecialChars(w, node.Literal) + out(w, codespanCloseTag) + case blackfriday.Document: + break + case blackfriday.Paragraph: + // roff .PP markers break lists + if r.listDepth > 0 { + return blackfriday.GoToNext + } + if entering { + out(w, paraTag) + } else { + out(w, crTag) + } + case blackfriday.BlockQuote: + if entering { + out(w, quoteTag) + } else { + out(w, quoteCloseTag) + } + case blackfriday.Heading: + r.handleHeading(w, node, entering) + case blackfriday.HorizontalRule: + out(w, hruleTag) + case blackfriday.List: + r.handleList(w, node, entering) + case blackfriday.Item: + r.handleItem(w, node, entering) + case blackfriday.CodeBlock: + out(w, codeTag) + escapeSpecialChars(w, node.Literal) + out(w, codeCloseTag) + case blackfriday.Table: + r.handleTable(w, node, entering) + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.TableHead: + case blackfriday.TableBody: + case blackfriday.TableRow: + // no action as cell entries do all the nroff formatting + return blackfriday.GoToNext + default: + fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) + } + return walkAction +} + +func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { + var ( + start, end string + ) + // handle special roff table cell text encapsulation + if node.Parent.Type == blackfriday.TableCell { + if len(node.Literal) > 30 { + start = tableCellStart + end = tableCellEnd + } else { + // end rows that aren't terminated by "tableCellEnd" with a cr if end of row + if node.Parent.Next == nil && !node.Parent.IsHeader { + end = crTag + } + } + } + out(w, start) + escapeSpecialChars(w, node.Literal) + out(w, end) +} + +func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + switch node.Level { + case 1: + if !r.firstHeader { + out(w, titleHeader) + r.firstHeader = true + break + } + out(w, topLevelHeader) + case 2: + out(w, secondLevelHdr) + default: + out(w, otherHeader) + } + } +} + +func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { + openTag := listTag + closeTag := listCloseTag + if node.ListFlags&blackfriday.ListTypeDefinition != 0 { + // tags for definition lists handled within Item node + openTag = "" + closeTag = "" + } + if entering { + r.listDepth++ + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + r.listCounters = append(r.listCounters, 1) + } + out(w, openTag) + } else { + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + r.listCounters = r.listCounters[:len(r.listCounters)-1] + } + out(w, closeTag) + r.listDepth-- + } +} + +func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) + r.listCounters[len(r.listCounters)-1]++ + } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { + // state machine for handling terms and following definitions + // since blackfriday does not distinguish them properly, nor + // does it seperate them into separate lists as it should + if !r.defineTerm { + out(w, arglistTag) + r.defineTerm = true + } else { + r.defineTerm = false + } + } else { + out(w, ".IP \\(bu 2\n") + } + } else { + out(w, "\n") + } +} + +func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + out(w, tableStart) + //call walker to count cells (and rows?) so format section can be produced + columns := countColumns(node) + out(w, strings.Repeat("l ", columns)+"\n") + out(w, strings.Repeat("l ", columns)+".\n") + } else { + out(w, tableEnd) + } +} + +func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { + var ( + start, end string + ) + if node.IsHeader { + start = codespanTag + end = codespanCloseTag + } + if entering { + if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { + out(w, "\t"+start) + } else { + out(w, start) + } + } else { + // need to carriage return if we are at the end of the header row + if node.IsHeader && node.Next == nil { + end = end + crTag + } + out(w, end) + } +} + +// because roff format requires knowing the column count before outputting any table +// data we need to walk a table tree and count the columns +func countColumns(node *blackfriday.Node) int { + var columns int + + node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + switch node.Type { + case blackfriday.TableRow: + if !entering { + return blackfriday.Terminate + } + case blackfriday.TableCell: + if entering { + columns++ + } + default: + } + return blackfriday.GoToNext + }) + return columns +} + +func out(w io.Writer, output string) { + io.WriteString(w, output) // nolint: errcheck +} + +func needsBackslash(c byte) bool { + for _, r := range []byte("-_&\\~") { + if c == r { + return true + } + } + return false +} + +func escapeSpecialChars(w io.Writer, text []byte) { + for i := 0; i < len(text); i++ { + // escape initial apostrophe or period + if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { + out(w, "\\&") + } + + // directly copy normal characters + org := i + + for i < len(text) && !needsBackslash(text[i]) { + i++ + } + if i > org { + w.Write(text[org:i]) // nolint: errcheck + } + + // escape a character + if i >= len(text) { + break + } + + w.Write([]byte{'\\', text[i]}) // nolint: errcheck + } +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/AUTHORS b/vendor/github.com/danieldin95/go-openvswitch/AUTHORS new file mode 100644 index 0000000..efb22b9 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/AUTHORS @@ -0,0 +1,16 @@ +Maintainer +---------- +DigitalOcean, Inc + +Original Authors +---------------- +Matt Layher + +Contributors +------------ +Michael Ben-Ami +Tejas Kokje +Kei Nohguchi +Neal Shrader +Sangeetha Srikanth +Franck Rupin diff --git a/vendor/github.com/danieldin95/go-openvswitch/LICENSE.md b/vendor/github.com/danieldin95/go-openvswitch/LICENSE.md new file mode 100644 index 0000000..84a5a44 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/LICENSE.md @@ -0,0 +1,195 @@ +Apache License +============== + +_Version 2.0, January 2004_ +_<>_ + +### Terms and Conditions for use, reproduction, and distribution + +#### 1. Definitions + +“License” shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +“Licensor” shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +“Legal Entity” shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, “control” means **(i)** the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the +outstanding shares, or **(iii)** beneficial ownership of such entity. + +“You” (or “Your”) shall mean an individual or Legal Entity exercising +permissions granted by this License. + +“Source” form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +“Object” form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +“Work” shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +“Derivative Works” shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +“Contribution” shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +“submitted” means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as “Not a Contribution.” + +“Contributor” shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +#### 2. Grant of Copyright License + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +#### 3. Grant of Patent License + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +#### 4. Redistribution + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +* **(a)** You must give any other recipients of the Work or Derivative Works a copy of +this License; and +* **(b)** You must cause any modified files to carry prominent notices stating that You +changed the files; and +* **(c)** You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. + +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +#### 5. Submission of Contributions + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +#### 6. Trademarks + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +#### 7. Disclaimer of Warranty + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an “AS IS” BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +#### 8. Limitation of Liability + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +#### 9. Accepting Warranty or Additional Liability + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +_END OF TERMS AND CONDITIONS_ + +### APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets `[]` replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same “printed page” as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/README.md b/vendor/github.com/danieldin95/go-openvswitch/ovs/README.md new file mode 100644 index 0000000..bc8bfc6 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/README.md @@ -0,0 +1,31 @@ +ovs +=== + +Package `ovs` is a client library for Open vSwitch which enables programmatic +control of the virtual switch. + +Package `ovs` is a wrapper around the `ovs-vsctl` and `ovs-ofctl` utilities, but +in the future, it may speak OVSDB and OpenFlow directly with the same interface. + +```go +// Create a *ovs.Client. Specify ovs.OptionFuncs to customize it. +c := ovs.New( + // Prepend "sudo" to all commands. + ovs.Sudo(), +) + +// $ sudo ovs-vsctl --may-exist add-br ovsbr0 +if err := c.VSwitch.AddBridge("ovsbr0"); err != nil { + log.Fatalf("failed to add bridge: %v", err) +} + +// $ sudo ovs-ofctl add-flow ovsbr0 priority=100,ip,actions=drop +err := c.OpenFlow.AddFlow("ovsbr0", &ovs.Flow{ + Priority: 100, + Protocol: ovs.ProtocolIPv4, + Actions: []ovs.Action{ovs.Drop()}, +}) +if err != nil { + log.Fatalf("failed to add flow: %v", err) +} +``` diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/action.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/action.go new file mode 100644 index 0000000..6bf17b7 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/action.go @@ -0,0 +1,703 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "encoding" + "errors" + "fmt" + "net" + "strconv" +) + +var ( + // errCTNoArguments is returned when no arguments are passed to ActionCT. + errCTNoArguments = errors.New("no arguments for connection tracking") + + // errInvalidIPv6Label is returned when an input IPv6 label is out of + // range. It should only use the first 20 bits of the 32 bit field. + errInvalidIPv6Label = errors.New("IPv6 label must only use 20 bits") + + // errInvalidARPOP is returned when an input ARP OP is out of + // range. It should be in the range 1-4. + errInvalidARPOP = errors.New("ARP OP must in the range 1-4") + + // errInvalidVLANVID is returned when an input VLAN VID is out of range + // for a valid VLAN VID. + errInvalidVLANVID = errors.New("VLAN VID must be between 0 and 4095") + + // errInvalidVLANVIDPCP is returned when an input VLAN PCP is out of range + // for a valid VLAN PCP. + errInvalidVLANPCP = errors.New("VLAN PCP must be between 0 and 7") + + // errOutputNegativePort is returned when Output is called with a + // negative integer port. + errOutputNegativePort = errors.New("output port number must not be negative") + + // errResubmitPortTableZero is returned when Resubmit is called with + // both port and table value set to zero. + errResubmitPortTableZero = errors.New("both port and table are zero for action resubmit") + + // errLoadSetFieldZero is returned when Load or SetField is called with value and/or + // field set to empty strings. + errLoadSetFieldZero = errors.New("value and/or field for action load or set_field are empty") + + // errResubmitPortInvalid is returned when ResubmitPort is given a port number that is + // invalid per the openflow spec. + errResubmitPortInvalid = errors.New("resubmit port must be between 0 and 65279 inclusive") + + // errTooManyDimensions is returned when the specified dimension exceeds the total dimension + // in a conjunction action. + errDimensionTooLarge = errors.New("dimension number exceeds total number of dimensions") + + // errMoveEmpty is returned when Move is called with src and/or dst set to the empty string. + errMoveEmpty = errors.New("src and/or dst field for action move are empty") + + // errOutputFieldEmpty is returned when OutputField is called with field set to the empty string. + errOutputFieldEmpty = errors.New("field for action output (output:field syntax) is empty") + + // errLearnedNil is returned when Learn is called with a nil *LearnedFlow. + errLearnedNil = errors.New("learned flow for action learn is nil") +) + +// Action strings in lower case, as those are compared to the lower case letters +// in parseAction(). +const ( + actionAll = "all" + actionDrop = "drop" + actionFlood = "flood" + actionInPort = "in_port" + actionLocal = "local" + actionNormal = "normal" + actionStripVLAN = "strip_vlan" +) + +// An Action is a type which can be marshaled into an OpenFlow action. Actions can be +// used with Flows to perform operations when the Flow matches an input packet. +// +// Actions must also implement fmt.GoStringer for code generation purposes. +type Action interface { + encoding.TextMarshaler + fmt.GoStringer +} + +// A textAction is an Action which is referred to by a name only, with no arguments. +type textAction struct { + action string +} + +// MarshalText implements Action. +func (a *textAction) MarshalText() ([]byte, error) { + return []byte(a.action), nil +} + +// GoString implements Action. +func (a *textAction) GoString() string { + switch a.action { + case actionAll: + return "ovs.All()" + case actionDrop: + return "ovs.Drop()" + case actionFlood: + return "ovs.Flood()" + case actionInPort: + return "ovs.InPort()" + case actionLocal: + return "ovs.Local()" + case actionNormal: + return "ovs.Normal()" + case actionStripVLAN: + return "ovs.StripVLAN()" + default: + return fmt.Sprintf("// BUG(mdlayher): unimplemented OVS text action: %q", a.action) + } +} + +// All outputs the packet on all switch ports except +// the port on which it was received. +func All() Action { + return &textAction{ + action: actionAll, + } +} + +// Drop immediately discards the packet. It must be the only Action +// specified when used. +func Drop() Action { + return &textAction{ + action: actionDrop, + } +} + +// Flood outputs the packet on all switch ports other than the port on which it +// was received, which have flooding enabled. +func Flood() Action { + return &textAction{ + action: actionFlood, + } +} + +// InPort outputs the packet on the port from which it was received. +func InPort() Action { + return &textAction{ + action: actionInPort, + } +} + +// Local outputs the packet on the local port, which corresponds to +// the network device that has the same name as the bridge. +func Local() Action { + return &textAction{ + action: actionLocal, + } +} + +// Normal subjects the packet to the device's normal L2/L3 processing. +func Normal() Action { + return &textAction{ + action: actionNormal, + } +} + +// StripVLAN strips the VLAN tag from a packet, if one is present. +func StripVLAN() Action { + return &textAction{ + action: actionStripVLAN, + } +} + +// printf-style patterns for marshaling and unmarshaling actions. +const ( + patConnectionTracking = "ct(%s)" + patConjunction = "conjunction(%d,%d/%d)" + patModDataLinkDestination = "mod_dl_dst:%s" + patModDataLinkSource = "mod_dl_src:%s" + patModNetworkDestination = "mod_nw_dst:%s" + patModNetworkSource = "mod_nw_src:%s" + patModTransportDestinationPort = "mod_tp_dst:%d" + patModTransportSourcePort = "mod_tp_src:%d" + patModVLANVID = "mod_vlan_vid:%d" + patOutput = "output:%d" + patOutputField = "output:%s" + patResubmitPort = "resubmit:%s" + patResubmitPortTable = "resubmit(%s,%s)" + patLearn = "learn(%s)" +) + +// ConnectionTracking sends a packet through the host's connection tracker. +func ConnectionTracking(args string) Action { + return &ctAction{ + args: args, + } +} + +// A ctAction is an Action which is used by ConneectionTracking. +type ctAction struct { + // TODO(mdlayher): implement arguments type for ct() actions + args string +} + +// MarshalText implements Action. +func (a *ctAction) MarshalText() ([]byte, error) { + if a.args == "" { + return nil, errCTNoArguments + } + + return bprintf(patConnectionTracking, a.args), nil +} + +// GoString implements Action. +func (a *ctAction) GoString() string { + return fmt.Sprintf("ovs.ConnectionTracking(%q)", a.args) +} + +// ModDataLinkDestination modifies the data link destination of a packet. +func ModDataLinkDestination(addr net.HardwareAddr) Action { + return &modDataLinkAction{ + srcdst: destination, + addr: addr, + } +} + +// ModDataLinkSource modifies the data link source of a packet. +func ModDataLinkSource(addr net.HardwareAddr) Action { + return &modDataLinkAction{ + srcdst: source, + addr: addr, + } +} + +// A modDataLinkAction is an Action which is used by +// ModDataLink{Source,Destination}. +type modDataLinkAction struct { + srcdst string + addr net.HardwareAddr +} + +// MarshalText implements Action. +func (a *modDataLinkAction) MarshalText() ([]byte, error) { + if len(a.addr) != ethernetAddrLen { + return nil, fmt.Errorf("hardware address must be %d octets, but got %d", + ethernetAddrLen, len(a.addr)) + } + + if a.srcdst == source { + return bprintf(patModDataLinkSource, a.addr.String()), nil + } + + return bprintf(patModDataLinkDestination, a.addr.String()), nil +} + +// GoString implements Action. +func (a *modDataLinkAction) GoString() string { + if a.srcdst == source { + return fmt.Sprintf("ovs.ModDataLinkSource(%s)", hwAddrGoString(a.addr)) + } + + return fmt.Sprintf("ovs.ModDataLinkDestination(%s)", hwAddrGoString(a.addr)) +} + +// ModNetworkDestination modifies the destination IPv4 address of a packet. +func ModNetworkDestination(ip net.IP) Action { + return &modNetworkAction{ + srcdst: destination, + ip: ip.To4(), + } +} + +// ModNetworkSource modifies the source IPv4 address of a packet. +func ModNetworkSource(ip net.IP) Action { + return &modNetworkAction{ + srcdst: source, + ip: ip.To4(), + } +} + +// A modNetworkAction is an Action which is used by +// ModNetwork{Source,Destination}. +type modNetworkAction struct { + srcdst string + ip net.IP +} + +// MarshalText implements Action. +func (a *modNetworkAction) MarshalText() ([]byte, error) { + if a.ip == nil { + return nil, errors.New("invalid IPv4 address for ModNetwork action") + } + + if a.srcdst == source { + return bprintf(patModNetworkSource, a.ip.String()), nil + } + + return bprintf(patModNetworkDestination, a.ip.String()), nil +} + +// GoString implements Action. +func (a *modNetworkAction) GoString() string { + if a.srcdst == source { + return fmt.Sprintf("ovs.ModNetworkSource(%s)", ipv4GoString(a.ip)) + } + + return fmt.Sprintf("ovs.ModNetworkDestination(%s)", ipv4GoString(a.ip)) +} + +// ModTransportDestinationPort modifies the destination port of a packet. +func ModTransportDestinationPort(port uint16) Action { + return &modTransportPortAction{ + srcdst: destination, + port: port, + } +} + +// ModTransportSourcePort modifies the source port of a packet. +func ModTransportSourcePort(port uint16) Action { + return &modTransportPortAction{ + srcdst: source, + port: port, + } +} + +// A modTransportPortAction is an Action which is used by +// ModTransport{Source,Destination}Port. +type modTransportPortAction struct { + srcdst string + port uint16 +} + +// MarshalText implements Action. +func (a *modTransportPortAction) MarshalText() ([]byte, error) { + if a.srcdst == source { + return bprintf(patModTransportSourcePort, a.port), nil + } + + return bprintf(patModTransportDestinationPort, a.port), nil +} + +// GoString implements Action. +func (a *modTransportPortAction) GoString() string { + if a.srcdst == source { + return fmt.Sprintf("ovs.ModTransportSourcePort(%d)", a.port) + } + + return fmt.Sprintf("ovs.ModTransportDestinationPort(%d)", a.port) +} + +// ModVLANVID modifies the VLAN ID (VID) on a packet. It adds a VLAN +// tag if one is not already present. vid must be a valid VLAN VID, within +// the range of 0 to 4095. +func ModVLANVID(vid int) Action { + return &modVLANVIDAction{ + vid: vid, + } +} + +// A modVLANVIDAction is an Action which is used by ModVLANVID. +type modVLANVIDAction struct { + vid int +} + +// MarshalText implements Action. +func (a *modVLANVIDAction) MarshalText() ([]byte, error) { + if !validVLANVID(a.vid) { + return nil, errInvalidVLANVID + } + + return bprintf(patModVLANVID, a.vid), nil +} + +// GoString implements Action. +func (a *modVLANVIDAction) GoString() string { + return fmt.Sprintf("ovs.ModVLANVID(%d)", a.vid) +} + +// Output outputs the packet to the specified switch port. Use +// InPortLocal to output the packet to the LOCAL port. port must either +// be a non-negative integer. +func Output(port int) Action { + return &outputAction{ + port: port, + } +} + +// An outputAction is an Action which is used by Output. +type outputAction struct { + port int +} + +// MarshalText implements Action. +func (a *outputAction) MarshalText() ([]byte, error) { + if a.port < 0 { + return nil, errOutputNegativePort + } + + return bprintf(patOutput, a.port), nil +} + +// GoString implements Action. +func (a *outputAction) GoString() string { + return fmt.Sprintf("ovs.Output(%d)", a.port) +} + +// OutputField outputs the packet to the switch port described by the specified field. +// For example, when the `field` value is "in_port", the packet is output to the port +// it came in on. +func OutputField(field string) Action { + return &outputFieldAction{ + field: field, + } +} + +// An outputFieldAction is an Action which is used by OutputField. +type outputFieldAction struct { + field string +} + +// MarshalText implements Action. +func (a *outputFieldAction) MarshalText() ([]byte, error) { + if a.field == "" { + return nil, errOutputFieldEmpty + } + + return bprintf(patOutputField, a.field), nil +} + +// GoString implements Action. +func (a *outputFieldAction) GoString() string { + return fmt.Sprintf("ovs.OutputField(%q)", a.field) +} + +// Conjunction associates a flow with a certain conjunction ID to match on more than +// one dimension across multiple set matches. +func Conjunction(id int, dimensionNumber int, dimensionSize int) Action { + return &conjunctionAction{ + id: id, + dimensionNumber: dimensionNumber, + dimensionSize: dimensionSize, + } +} + +// A conjuctionAction is an Action which is used by Conjunction. +type conjunctionAction struct { + id int + dimensionNumber int + dimensionSize int +} + +// MarshalText implements Action. +func (a *conjunctionAction) MarshalText() ([]byte, error) { + if a.dimensionNumber > a.dimensionSize { + return nil, errDimensionTooLarge + } + + return bprintf(patConjunction, a.id, a.dimensionNumber, a.dimensionSize), nil +} + +// GoString implements Action. +func (a *conjunctionAction) GoString() string { + return fmt.Sprintf("ovs.Conjunction(%d, %d, %d)", a.id, a.dimensionNumber, a.dimensionSize) +} + +// Resubmit resubmits a packet for further processing by matching +// flows with the specified port and table. If port or table are zero, +// they are set to empty in the output Action. If both are zero, an +// error is returned. +func Resubmit(port int, table int) Action { + return &resubmitAction{ + port: port, + table: table, + } +} + +// A resubmitAction is an Action which is used by ConneectionTracking. +type resubmitAction struct { + port int + table int +} + +// ResubmitPort resubmits a packet into the current table with its context modified +// to look like it originated from the specified openflow port ID. +func ResubmitPort(port int) Action { + return &resubmitPortAction{ + port: port, + } +} + +// A resubmitPortAction is an Action which is used by ConneectionTracking. +type resubmitPortAction struct { + port int +} + +// MarshalText implements Action. +func (a *resubmitPortAction) MarshalText() ([]byte, error) { + // Largest valid port ID is 0xfffeff per openflow spec. + if a.port < 0 || a.port > 0xfffeff { + return nil, errResubmitPortInvalid + } + + p := strconv.Itoa(a.port) + + return bprintf(patResubmitPort, p), nil +} + +// GoString implements Action. +func (a *resubmitPortAction) GoString() string { + return fmt.Sprintf("ovs.ResubmitPort(%d)", a.port) +} + +// MarshalText implements Action. +func (a *resubmitAction) MarshalText() ([]byte, error) { + if a.port == 0 && a.table == 0 { + return nil, errResubmitPortTableZero + } + + p := "" + if a.port != 0 { + p = strconv.Itoa(a.port) + } + + t := "" + if a.table != 0 { + t = strconv.Itoa(a.table) + } + + return bprintf(patResubmitPortTable, p, t), nil +} + +// GoString implements Action. +func (a *resubmitAction) GoString() string { + return fmt.Sprintf("ovs.Resubmit(%d, %d)", a.port, a.table) +} + +// SetField overwrites the specified field with the specified value. +// If either string is empty, an error is returned. +func SetField(value string, field string) Action { + return &loadSetFieldAction{ + value: value, + field: field, + typ: actionSetField, + } +} + +// Load loads the specified value into the specified field. +// If either string is empty, an error is returned. +func Load(value string, field string) Action { + return &loadSetFieldAction{ + value: value, + field: field, + typ: actionLoad, + } +} + +// Specifies whether SetField or Load was called to construct a +// loadSetFieldAction. +const ( + actionSetField = iota + actionLoad +) + +// A loadSetFieldAction is an Action which is used by Load and SetField. +type loadSetFieldAction struct { + value string + field string + typ int +} + +// MarshalText implements Action. +func (a *loadSetFieldAction) MarshalText() ([]byte, error) { + if a.value == "" || a.field == "" { + return nil, errLoadSetFieldZero + } + + if a.typ == actionLoad { + return bprintf("load:%s->%s", a.value, a.field), nil + } + + return bprintf("set_field:%s->%s", a.value, a.field), nil +} + +// GoString implements Action. +func (a *loadSetFieldAction) GoString() string { + if a.typ == actionLoad { + return fmt.Sprintf("ovs.Load(%q, %q)", a.value, a.field) + } + + return fmt.Sprintf("ovs.SetField(%q, %q)", a.value, a.field) +} + +// SetTunnel sets the tunnel id, e.g. VNI if vxlan is the tunnel protocol. +func SetTunnel(tunnelID uint64) Action { + return &setTunnelAction{ + tunnelID: tunnelID, + } +} + +// A setTunnelAction is an Action used by SetTunnel. +type setTunnelAction struct { + tunnelID uint64 +} + +// GoString implements Action. +func (a *setTunnelAction) GoString() string { + return fmt.Sprintf("ovs.SetTunnel(%#x)", a.tunnelID) +} + +// MarshalText implements Action. +func (a *setTunnelAction) MarshalText() ([]byte, error) { + return bprintf("set_tunnel:%#x", a.tunnelID), nil +} + +// Move sets the value of the destination field to the value of the source field. +func Move(src, dst string) Action { + return &moveAction{ + src: src, + dst: dst, + } +} + +// A moveAction is an Action used by Move. +type moveAction struct { + src string + dst string +} + +// GoString implements Action. +func (a *moveAction) GoString() string { + return fmt.Sprintf("ovs.Move(%q, %q)", a.src, a.dst) +} + +// MarshalText implements Action. +func (a *moveAction) MarshalText() ([]byte, error) { + if a.src == "" || a.dst == "" { + return nil, errMoveEmpty + } + + return bprintf("move:%s->%s", a.src, a.dst), nil +} + +// Learn dynamically installs a LearnedFlow. +func Learn(learned *LearnedFlow) Action { + return &learnAction{ + learned: learned, + } +} + +// A learnAction is an Action used by Learn. +type learnAction struct { + learned *LearnedFlow +} + +// GoString implements Action. +func (a *learnAction) GoString() string { + return fmt.Sprintf("ovs.Learn(%#v)", a.learned) +} + +// MarshalText implements Action. +func (a *learnAction) MarshalText() ([]byte, error) { + if a.learned == nil { + return nil, errLearnedNil + } + + l, err := a.learned.MarshalText() + if err != nil { + return nil, err + } + + return bprintf(patLearn, l), nil +} + +// validARPOP indicates if an ARP OP is out of range. It should be in the range +// 1-4. +func validARPOP(op uint16) bool { + return 1 <= op && op <= 4 +} + +// validIPv6Label indicates if an IPv6 label is out of range. It should only +// use the first 20 bits of the 32 bit field. +func validIPv6Label(label uint32) bool { + return (label & 0xfff00000) == 0x00000000 +} + +// validVLANVID indicates if a VLAN VID falls within the valid range +// for a VLAN VID. +func validVLANVID(vid int) bool { + return vid >= 0x000 && vid <= 0xfff +} + +// validVLANVPCP indicates if a VLAN VID falls within the valid range +// for a VLAN VID. +func validVLANPCP(pcp int) bool { + return pcp >= 0 && pcp <= 7 +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/actionparser.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/actionparser.go new file mode 100644 index 0000000..ef2bbe8 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/actionparser.go @@ -0,0 +1,393 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net" + "regexp" + "strconv" + "strings" +) + +// An actionParser is a parser for OVS flow actions. +type actionParser struct { + r *bufio.Reader + s stack +} + +// newActionParser creates a new actionParser which wraps the input +// io.Reader. +func newActionParser(r io.Reader) *actionParser { + return &actionParser{ + r: bufio.NewReader(r), + s: make(stack, 0), + } +} + +// eof is a sentinel rune for end of file. +var eof = rune(0) + +// read reads a single rune from the wrapped io.Reader. It returns eof +// if no more runes are present. +func (p *actionParser) read() rune { + ch, _, err := p.r.ReadRune() + if err != nil { + return eof + } + return ch +} + +// Parse parses a slice of Actions using the wrapped io.Reader. The raw +// action strings are also returned for inspection if needed. +func (p *actionParser) Parse() ([]Action, []string, error) { + var actions []Action + var raw []string + + for { + a, r, err := p.parseAction() + if err != nil { + // No more actions remain + if err == io.EOF { + break + } + + return nil, nil, err + } + + actions = append(actions, a) + raw = append(raw, r) + } + + return actions, raw, nil +} + +// parseAction parses a single Action and its raw text from the wrapped +// io.Reader. +func (p *actionParser) parseAction() (Action, string, error) { + // Track runes encountered + var buf bytes.Buffer + + for { + ch := p.read() + + // If comma encountered and no open parentheses, at end of this + // action string + if ch == ',' && p.s.len() == 0 { + break + } + + // If EOF encountered, at end of string + if ch == eof { + // If no items in buffer, end of this action + if buf.Len() == 0 { + return nil, "", io.EOF + } + + // Parse action from buffer + break + } + + // Track open and closing parentheses using a stack to ensure + // that they are appropriately matched + switch ch { + case '(': + p.s.push() + case ')': + p.s.pop() + } + + _, _ = buf.WriteRune(ch) + } + + // Found an unmatched set of parentheses + if p.s.len() > 0 { + return nil, "", fmt.Errorf("invalid action: %q", buf.String()) + } + + s := buf.String() + act, err := parseAction(s) + return act, s, err +} + +// A stack is a basic stack with elements that have no value. +type stack []struct{} + +// len returns the current length of the stack. +func (s *stack) len() int { + return len(*s) +} + +// push adds an element to the stack. +func (s *stack) push() { + *s = append(*s, struct{}{}) +} + +// pop removes an element from the stack. +func (s *stack) pop() { + *s = (*s)[:s.len()-1] +} + +var ( + // resubmitRe is the regex used to match the resubmit action + // with port and table specified + resubmitRe = regexp.MustCompile(`resubmit\((\d*),(\d*)\)`) + + // resubmitPortRe is the regex used to match the resubmit action + // when only a port is specified + resubmitPortRe = regexp.MustCompile(`resubmit:(\d+)`) + + // ctRe is the regex used to match the ct action with its + // parameter list. + ctRe = regexp.MustCompile(`ct\((\S+)\)`) + + // loadRe is the regex used to match the load action + // with its parameters. + loadRe = regexp.MustCompile(`load:(\S+)->(\S+)`) + + // moveRe is the regex used to match the move action + // with its parameters. + moveRe = regexp.MustCompile(`move:(\S+)->(\S+)`) + + // setFieldRe is the regex used to match the set_field action + // with its parameters. + setFieldRe = regexp.MustCompile(`set_field:(\S+)->(\S+)`) +) + +// TODO(mdlayher): replace parsing regex with arguments parsers + +// parseAction creates an Action function from the input string. +func parseAction(s string) (Action, error) { + // Simple actions which match a basic string + switch strings.ToLower(s) { + case actionDrop: + return Drop(), nil + case actionFlood: + return Flood(), nil + case actionInPort: + return InPort(), nil + case actionLocal: + return Local(), nil + case actionNormal: + return Normal(), nil + case actionStripVLAN: + return StripVLAN(), nil + } + + // ActionCT, with its arguments + if ss := ctRe.FindAllStringSubmatch(s, 1); len(ss) > 0 && len(ss[0]) == 2 { + // Results are: + // - full string + // - arguments list + return ConnectionTracking(ss[0][1]), nil + } + + // ActionModDataLinkDestination, with its hardware address. + if strings.HasPrefix(s, patModDataLinkDestination[:len(patModDataLinkDestination)-2]) { + var addr string + n, err := fmt.Sscanf(s, patModDataLinkDestination, &addr) + if err != nil { + return nil, err + } + if n > 0 { + mac, err := net.ParseMAC(addr) + if err != nil { + return nil, err + } + + return ModDataLinkDestination(mac), nil + } + } + + // ActionModDataLinkSource, with its hardware address. + if strings.HasPrefix(s, patModDataLinkSource[:len(patModDataLinkSource)-2]) { + var addr string + n, err := fmt.Sscanf(s, patModDataLinkSource, &addr) + if err != nil { + return nil, err + } + if n > 0 { + mac, err := net.ParseMAC(addr) + if err != nil { + return nil, err + } + + return ModDataLinkSource(mac), nil + } + } + + // ActionModNetworkDestination, with it hardware address + if strings.HasPrefix(s, patModNetworkDestination[:len(patModNetworkDestination)-2]) { + var ip string + n, err := fmt.Sscanf(s, patModNetworkDestination, &ip) + if err != nil { + return nil, err + } + if n > 0 { + ip4 := net.ParseIP(ip).To4() + if ip4 == nil { + return nil, fmt.Errorf("invalid IPv4 address: %s", ip) + } + + return ModNetworkDestination(ip4), nil + } + } + + // ActionModNetworkSource, with it hardware address + if strings.HasPrefix(s, patModNetworkSource[:len(patModNetworkSource)-2]) { + var ip string + n, err := fmt.Sscanf(s, patModNetworkSource, &ip) + if err != nil { + return nil, err + } + if n > 0 { + ip4 := net.ParseIP(ip).To4() + if ip4 == nil { + return nil, fmt.Errorf("invalid IPv4 address: %s", ip) + } + + return ModNetworkSource(ip4), nil + } + } + + // ActionModTransportDestinationPort, with its port. + if strings.HasPrefix(s, patModTransportDestinationPort[:len(patModTransportDestinationPort)-2]) { + var port uint16 + n, err := fmt.Sscanf(s, patModTransportDestinationPort, &port) + if err != nil { + return nil, err + } + if n > 0 { + return ModTransportDestinationPort(port), nil + } + } + + // ActionModTransportSourcePort, with its port. + if strings.HasPrefix(s, patModTransportSourcePort[:len(patModTransportSourcePort)-2]) { + var port uint16 + n, err := fmt.Sscanf(s, patModTransportSourcePort, &port) + if err != nil { + return nil, err + } + if n > 0 { + return ModTransportSourcePort(port), nil + } + } + + // ActionModVLANVID, with its VLAN ID + if strings.HasPrefix(s, patModVLANVID[:len(patModVLANVID)-2]) { + var vlan int + n, err := fmt.Sscanf(s, patModVLANVID, &vlan) + if err != nil { + return nil, err + } + if n > 0 { + return ModVLANVID(vlan), nil + } + } + + // ActionConjunction, with it's id, dimension number, and dimension size + if strings.HasPrefix(s, patConjunction[:len(patConjunction)-10]) { + var id, dimensionNumber, dimensionSize int + n, err := fmt.Sscanf(s, patConjunction, &id, &dimensionNumber, &dimensionSize) + if err != nil { + return nil, err + } + if n > 0 { + return Conjunction(id, dimensionNumber, dimensionSize), nil + } + } + + // ActionOutput, with its port number + if strings.HasPrefix(s, patOutput[:len(patOutput)-2]) { + var port int + n, err := fmt.Sscanf(s, patOutput, &port) + if err != nil { + return nil, err + } + if n > 0 { + return Output(port), nil + } + } + + // ActionResubmit, with both port number and table number + if ss := resubmitRe.FindAllStringSubmatch(s, 1); len(ss) > 0 && len(ss[0]) == 3 { + var ( + port int + table int + + err error + ) + + // Results are: + // - full string + // - port in parenthesis + // - table in parenthesis + + if s := ss[0][1]; s != "" { + port, err = strconv.Atoi(s) + if err != nil { + return nil, err + } + } + if s := ss[0][2]; s != "" { + table, err = strconv.Atoi(s) + if err != nil { + return nil, err + } + } + + return Resubmit(port, table), nil + } + + // ActionResubmitPort, with only a port number + if ss := resubmitPortRe.FindAllStringSubmatch(s, 1); len(ss) > 0 && len(ss[0]) == 2 { + port, err := strconv.Atoi(ss[0][1]) + if err != nil { + return nil, err + } + + return ResubmitPort(port), nil + } + + if ss := loadRe.FindAllStringSubmatch(s, 2); len(ss) > 0 && len(ss[0]) == 3 { + // Results are: + // - full string + // - value + // - field + return Load(ss[0][1], ss[0][2]), nil + } + + if ss := moveRe.FindAllStringSubmatch(s, 2); len(ss) > 0 && len(ss[0]) == 3 { + // Results are: + // - full string + // - value + // - field + return Move(ss[0][1], ss[0][2]), nil + } + + if ss := setFieldRe.FindAllStringSubmatch(s, 2); len(ss) > 0 && len(ss[0]) == 3 { + // Results are: + // - full string + // - value + // - field + return SetField(ss[0][1], ss[0][2]), nil + } + + return nil, fmt.Errorf("no action matched for %q", s) +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/app.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/app.go new file mode 100644 index 0000000..53ef076 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/app.go @@ -0,0 +1,67 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "fmt" + "strings" +) + +// AppService runs commands that are available from ovs-appctl +type AppService struct { + c *Client +} + +// ProtoTrace runs ovs-appctl ofproto/trace on the given bridge and match flow +// with the possibility to pass extra parameters like `--ct-next` and returns a *ProtoTrace. +// Also returns err if there is any error parsing the output from ovs-appctl ofproto/trace. +func (a *AppService) ProtoTrace(bridge string, protocol Protocol, matches []Match, params ...string) (*ProtoTrace, error) { + matchFlows := []string{} + if protocol != "" { + matchFlows = append(matchFlows, string(protocol)) + } + + for _, match := range matches { + matchFlow, err := match.MarshalText() + if err != nil { + return nil, err + } + + matchFlows = append(matchFlows, string(matchFlow)) + } + + matchArg := strings.Join(matchFlows, ",") + args := []string{"ofproto/trace", bridge, matchArg} + args = append(args, params...) + out, err := a.exec(args...) + if err != nil { + return nil, err + } + + pt := &ProtoTrace{ + CommandStr: fmt.Sprintf("ovs-appctl %s", strings.Join(args, " ")), + } + err = pt.UnmarshalText(out) + if err != nil { + return nil, err + } + + return pt, nil +} + +// exec executes 'ovs-appctl' + args passed in +func (a *AppService) exec(args ...string) ([]byte, error) { + return a.c.exec("ovs-appctl", args...) +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/client.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/client.go new file mode 100644 index 0000000..0dafe5b --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/client.go @@ -0,0 +1,357 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "os/exec" + "strings" +) + +// A Client is a client type which enables programmatic control of Open +// vSwitch. +type Client struct { + // OpenFlow wraps functionality of the 'ovs-ofctl' binary. + OpenFlow *OpenFlowService + + // App wraps functionality of the 'ovs-appctl' binary + App *AppService + + // VSwitch wraps functionality of the 'ovs-vsctl' binary. + VSwitch *VSwitchService + + // DataPath wraps functionality of the 'ovs-dpctl' binary + DataPath *DataPathService + + // Additional flags applied to all OVS actions, such as timeouts + // or retries. + flags []string + + // Additional flags applied to 'ovs-ofctl' commands. + ofctlFlags []string + + // Enable or disable debugging log messages for OVS commands. + debug bool + + // Prefix all commands with "sudo". + sudo bool + + // Implementation of ExecFunc. + execFunc ExecFunc + + // Implementation of PipeFunc. + pipeFunc PipeFunc +} + +// An ExecFunc is a function which accepts input arguments and returns raw +// byte output and an error. ExecFuncs are swappable to enable testing +// without OVS installed. +type ExecFunc func(cmd string, args ...string) ([]byte, error) + +// shellExec is an ExecFunc which shells out to the binary cmd using the +// arguments args, and returns its combined stdout and stderr and any errors +// which may have occurred. +func shellExec(cmd string, args ...string) ([]byte, error) { + return exec.Command(cmd, args...).CombinedOutput() +} + +// exec executes an ExecFunc using the values from cmd and args. +// The ExecFunc may shell out to an appropriate binary, or may be swapped +// for testing. +func (c *Client) exec(cmd string, args ...string) ([]byte, error) { + // Prepend recurring flags before arguments + flags := append(c.flags, args...) + + // If needed, prefix sudo. + if c.sudo { + flags = append([]string{cmd}, flags...) + cmd = "sudo" + } + + c.debugf("exec: %s %v", cmd, flags) + + // Execute execFunc with all flags and clean up any whitespace or + // newlines from its output. + out, err := c.execFunc(cmd, flags...) + if out != nil { + out = bytes.TrimSpace(out) + c.debugf("exec: %q", string(out)) + } + if err != nil { + // Wrap errors in Error type for further introspection + return nil, &Error{ + Out: out, + Err: err, + } + } + + return out, nil +} + +// A PipeFunc is a function which accepts an input stdin stream, command, +// and arguments, and returns command output and an error. PipeFuncs are +// swappable to enable testing without OVS installed. +type PipeFunc func(stdin io.Reader, cmd string, args ...string) ([]byte, error) + +// shellPipe is a PipeFunc which shells out to the binary cmd using the arguments +// args, and writing to the command's stdin using stdin. +func shellPipe(stdin io.Reader, cmd string, args ...string) ([]byte, error) { + command := exec.Command(cmd, args...) + + stdout, err := command.StdoutPipe() + if err != nil { + return nil, err + } + stderr, err := command.StderrPipe() + if err != nil { + return nil, err + } + + wc, err := command.StdinPipe() + if err != nil { + return nil, err + } + + if err := command.Start(); err != nil { + return nil, err + } + + if _, err := io.Copy(wc, stdin); err != nil { + return nil, err + } + + // Needed to indicate to ovs-ofctl that stdin is done being read. + // "... if the command being run will not exit until standard input is + // closed, the caller must close the pipe." + // Reference: https://golang.org/pkg/os/exec/#Cmd.StdinPipe + if err := wc.Close(); err != nil { + return nil, err + } + + mr := io.MultiReader(stdout, stderr) + b, err := ioutil.ReadAll(mr) + if err != nil { + return nil, err + } + + return b, command.Wait() +} + +// pipe executes a PipeFunc using the values from stdin, cmd, and args. +// stdin is used to feed input data to the stdin of a forked process. +// The PipeFunc may shell out to an appropriate binary, or may be swapped +// for testing. +func (c *Client) pipe(stdin io.Reader, cmd string, args ...string) error { + // Prepend recurring flags before arguments + flags := append(c.flags, args...) + + // If needed, prefix sudo. + if c.sudo { + flags = append([]string{cmd}, flags...) + cmd = "sudo" + } + + c.debugf("pipe: %s %v", cmd, flags) + c.debugf("bundle:") + + tr := io.TeeReader(stdin, writerFunc(func(p []byte) (int, error) { + c.debugf("%s", string(p)) + return len(p), nil + })) + + if out, err := c.pipeFunc(tr, cmd, flags...); err != nil { + c.debugf("pipe error: %v: %q", err, string(out)) + return &pipeError{ + out: out, + err: err, + } + } + + return nil + +} + +// A pipeError is an error returned by Client.pipe, containing combined +// stdout/stderr from a process as well as its error. +type pipeError struct { + out []byte + err error +} + +// Error returns the string representation of a pipeError. +func (e *pipeError) Error() string { + return fmt.Sprintf("pipe error: %v: %q", e.err, string(e.out)) +} + +// debugf prints a logging debug message when debugging is enabled. +func (c *Client) debugf(format string, a ...interface{}) { + if !c.debug { + return + } + + log.Printf("ovs: "+format, a...) +} + +// New creates a new Client with zero or more OptionFunc configurations +// applied. +func New(options ...OptionFunc) *Client { + // Always execute and pipe using shell when created with New. + c := &Client{ + flags: make([]string, 0), + ofctlFlags: make([]string, 0), + execFunc: shellExec, + pipeFunc: shellPipe, + } + for _, o := range options { + o(c) + } + + vss := &VSwitchService{ + c: c, + } + vss.Get = &VSwitchGetService{ + v: vss, + } + vss.Set = &VSwitchSetService{ + v: vss, + } + c.VSwitch = vss + + ofs := &OpenFlowService{ + c: c, + } + c.OpenFlow = ofs + + app := &AppService{ + c: c, + } + c.App = app + + c.DataPath = &DataPathService{ + CLI: &DpCLI{ + c: c, + }, + } + + return c +} + +// An OptionFunc is a function which can apply configuration to a Client. +type OptionFunc func(c *Client) + +// Timeout returns an OptionFunc which sets a timeout in seconds for all +// Open vSwitch interactions. +func Timeout(seconds int) OptionFunc { + return func(c *Client) { + c.flags = append(c.flags, fmt.Sprintf("--timeout=%d", seconds)) + } +} + +// Debug returns an OptionFunc which enables debugging output for the Client +// type. +func Debug(enable bool) OptionFunc { + return func(c *Client) { + c.debug = enable + } +} + +// Exec returns an OptionFunc which sets an ExecFunc for use with a Client. +// This function should typically only be used in tests. +func Exec(fn ExecFunc) OptionFunc { + return func(c *Client) { + c.execFunc = fn + } +} + +// Pipe returns an OptionFunc which sets a PipeFunc for use with a Client. +// This function should typically only be used in tests. +func Pipe(fn PipeFunc) OptionFunc { + return func(c *Client) { + c.pipeFunc = fn + } +} + +const ( + // FlowFormatNXMTableID is a flow format which allows Nicira Extended match + // with the ability to place a flow in a specific table. + FlowFormatNXMTableID = "NXM+table_id" + + // FlowFormatOXMOpenFlow14 is a flow format which allows Open vSwitch + // extensible match. + FlowFormatOXMOpenFlow14 = "OXM-OpenFlow14" +) + +// FlowFormat specifies the flow format to be used when shelling to +// 'ovs-ofctl'. +func FlowFormat(format string) OptionFunc { + return func(c *Client) { + c.ofctlFlags = append(c.ofctlFlags, fmt.Sprintf("--flow-format=%s", format)) + } +} + +// Protocol constants for use with Protocols and BridgeOptions. +const ( + ProtocolOpenFlow10 = "OpenFlow10" + ProtocolOpenFlow11 = "OpenFlow11" + ProtocolOpenFlow12 = "OpenFlow12" + ProtocolOpenFlow13 = "OpenFlow13" + ProtocolOpenFlow14 = "OpenFlow14" + ProtocolOpenFlow15 = "OpenFlow15" +) + +// Protocols specifies one or more OpenFlow protocol versions to be used when shelling +// to 'ovs-ofctl'. +func Protocols(versions []string) OptionFunc { + return func(c *Client) { + c.ofctlFlags = append(c.ofctlFlags, + fmt.Sprintf("--protocols=%s", strings.Join(versions, ",")), + ) + } +} + +// SetSSLParam configures SSL authentication using a private key, certificate, +// and CA certificate for use with ovs-ofctl. +func SetSSLParam(pkey string, cert string, cacert string) OptionFunc { + return func(c *Client) { + c.ofctlFlags = append(c.ofctlFlags, fmt.Sprintf("--private-key=%s", pkey), + fmt.Sprintf("--certificate=%s", cert), fmt.Sprintf("--ca-cert=%s", cacert)) + } +} + +// SetTCPParam configures the OVSDB connection using a TCP format ip:port +// for use with all ovs-vsctl commands. +func SetTCPParam(addr string) OptionFunc { + return func(c *Client) { + c.flags = append(c.flags, fmt.Sprintf("--db=tcp:%s", addr)) + } +} + +// Sudo specifies that "sudo" should be prefixed to all OVS commands. +func Sudo() OptionFunc { + return func(c *Client) { + c.sudo = true + } +} + +// A writerFunc is an adapter for a function to be used as an io.Writer. +type writerFunc func(p []byte) (n int, err error) + +func (fn writerFunc) Write(p []byte) (int, error) { + return fn(p) +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/codegen.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/codegen.go new file mode 100644 index 0000000..40377b1 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/codegen.go @@ -0,0 +1,62 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "bytes" + "fmt" + "net" + "strconv" +) + +// hwAddrGoString converts a net.HardwareAddr into its Go syntax representation. +func hwAddrGoString(addr net.HardwareAddr) string { + buf := bytes.NewBufferString("net.HardwareAddr{") + for i, b := range addr { + _, _ = buf.WriteString(fmt.Sprintf("0x%02x", b)) + + if i != len(addr)-1 { + _, _ = buf.WriteString(", ") + } + } + _, _ = buf.WriteString("}") + + return buf.String() +} + +// ipv4GoString converts a net.IP (IPv4 only) into its Go syntax representation. +func ipv4GoString(ip net.IP) string { + ip4 := ip.To4() + if ip4 == nil { + return `panic("invalid IPv4 address")` + } + + buf := bytes.NewBufferString("net.IPv4(") + for i, b := range ip4 { + _, _ = buf.WriteString(strconv.Itoa(int(b))) + + if i != len(ip4)-1 { + _, _ = buf.WriteString(", ") + } + } + _, _ = buf.WriteString(")") + + return buf.String() +} + +// bprintf is fmt.Sprintf, but it returns a byte slice instead of a string. +func bprintf(format string, a ...interface{}) []byte { + return []byte(fmt.Sprintf(format, a...)) +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/datapath.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/datapath.go new file mode 100644 index 0000000..853e12a --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/datapath.go @@ -0,0 +1,338 @@ +// Copyright 2021 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "errors" + "regexp" + "strconv" + "strings" +) + +var ( + errMissingMandatoryDataPathName = errors.New("datapath name argument is mandatory") + errUninitializedClient = errors.New("client unitialized") + errMissingMandatoryZone = errors.New("at least 1 zone is mandatory") + errWrongArgumentNumber = errors.New("missing or too many arguments to setup ct limits") + errWrongDefaultArgument = errors.New("wrong argument while setting default ct limits") + errWrongZoneArgument = errors.New("wrong argument while setting zone ct limits") +) + +// CTLimit defines the type used to store a zone as it is returned +// by ovs-dpctl ct-*-limits commands +type CTLimit map[string]uint64 + +// ConntrackOutput is a type defined to store the output +// of ovs-dpctl ct-*-limits commands. For example it stores +// such a cli output: +// # ovs-dpctl ct-get-limits system@ovs-system zone=2,3 +// default limit=0 +// zone=2,limit=0,count=0 +// zone=3,limit=0,count=0 +type ConntrackOutput struct { + // defaultLimit is used to store the global setting: default + defaultLimit CTLimit + // zones stores all remaning zone's settings + zoneLimits []CTLimit +} + +// DataPathReader is the interface defining the read operations +// for the ovs DataPaths +type DataPathReader interface { + // Version is the method used to get the version of ovs-dpctl + Version() (string, error) + // GetDataPath is the method that returns all DataPaths setup + // for an ovs switch + GetDataPath() ([]string, error) +} + +// DataPathWriter is the interface defining the wrtie operations +// for the ovs DataPaths +type DataPathWriter interface { + // AddDataPath is the method used to add a datapath to the switch + AddDataPath(string) error + // DelDataPath is the method used to remove a datapath from the switch + DelDataPath(string) error +} + +// ConnTrackReader is the interface defining the read operations +// of ovs conntrack +type ConnTrackReader interface { + // GetCTLimits is the method used to querying conntrack limits for a + // datapath on a switch + GetCTLimits(string, []uint64) (ConntrackOutput, error) +} + +// ConnTrackWriter is the interface defining the write operations +// of ovs conntrack +type ConnTrackWriter interface { + // SetCTLimits is the method used to setup a limit for a zone + // belonging to a datapath of a switch + SetCTLimits(string) (string, error) + // DelCTLimits is the method used to remove a limit to a zone + // belonging to a datapath of a switch + DelCTLimits(string, []uint64) (string, error) +} + +// CLI is an interface defining a contract for executing a command. +// Implementation of shell cli is done by the Client concrete type +type CLI interface { + Exec(args ...string) ([]byte, error) +} + +// DataPathService defines the concrete type used for DataPath operations +// supported by the ovs-dpctl command +type DataPathService struct { + // We define here a CLI interface making easier to mock ovs-dpctl command + // as in github.com/danieldin95/go-openvswitch/ovs/datapath_test.go + CLI +} + +// NewDataPathService is a builder for the DataPathService. +// sudo is defined as a default option. +func NewDataPathService() *DataPathService { + return &DataPathService{ + CLI: &DpCLI{ + c: New(Sudo()), + }, + } +} + +// Version retruns the ovs-dptcl --version currently installed +func (dp *DataPathService) Version() (string, error) { + result, err := dp.CLI.Exec("--version") + if err != nil { + return "", err + } + + return string(result), nil +} + +// GetDataPaths returns the output of the command 'ovs-dpctl dump-dps' +func (dp *DataPathService) GetDataPaths() ([]string, error) { + result, err := dp.CLI.Exec("dump-dps") + if err != nil { + return nil, err + } + + return strings.Split(string(result), "\n"), nil +} + +// AddDataPath create a Datapath with the command 'ovs-dpctl add-dp ' +// It takes one argument, the required DataPath Name and returns an error +// if it failed +func (dp *DataPathService) AddDataPath(dpName string) error { + _, err := dp.CLI.Exec("add-dp", dpName) + return err +} + +// DelDataPath create a Datapath with the command 'ovs-dpctl del-dp ' +// It takes one argument, the required DataPath Name and returns an error +// if it failed +func (dp *DataPathService) DelDataPath(dpName string) error { + _, err := dp.CLI.Exec("del-dp", dpName) + + return err +} + +// GetCTLimits returns the conntrack limits for a given datapath +// equivalent to running: 'sudo ovs-dpctl ct-get-limits zone=<#1>,<#2>,...' +func (dp *DataPathService) GetCTLimits(dpName string, zones []uint64) (*ConntrackOutput, error) { + // Start by building the args + if dpName == "" { + return nil, errMissingMandatoryDataPathName + } + + args := []string{"ct-get-limits", dpName} + + zoneParam := getZoneString(zones) + if zoneParam != "" { + args = append(args, zoneParam) + } + + // call the cli + results, err := dp.CLI.Exec(args...) + if err != nil { + return nil, err + } + + // Process the results + entries := strings.Split(string(results), "\n") + ctOut := &ConntrackOutput{} + + r, err := regexp.Compile(`default`) + if err != nil { + return nil, err + } + + // First start extracting the default conntrack limit setup + // If found the default value is removed from the entries + for i, entry := range entries { + if r.MatchString(entry) { + ctOut.defaultLimit = make(CTLimit) + limit, err := strconv.Atoi(strings.Split(entry, "=")[1]) + if err != nil { + return nil, err + } + ctOut.defaultLimit["default"] = uint64(limit) + // As the default has been found let's remove it + entries = append(entries[:i], entries[i+1:]...) + } + } + + // Now process the zones setup + for _, entry := range entries { + fields := strings.Split(entry, ",") + z := make(CTLimit) + for _, field := range fields { + buf := strings.Split(field, "=") + val, _ := strconv.Atoi(buf[1]) + z[buf[0]] = uint64(val) + } + ctOut.zoneLimits = append(ctOut.zoneLimits, z) + } + + return ctOut, nil +} + +// SetCTLimits set the limit for a specific zone or globally. +// Only one zone or default can be set up at once as the cli allows. +// Examples of commands it wrapps: +// sudo ovs-dpctl ct-set-limits system@ovs-system zone=331,limit=1000000 +// sudo ovs-dpctl ct-set-limits system@ovs-system default=1000000 +func (dp *DataPathService) SetCTLimits(dpName string, zone map[string]uint64) (string, error) { + // Sanitize the input + if dpName == "" { + return "", errMissingMandatoryDataPathName + } + argsStr, err := ctSetLimitsArgsToString(zone) + if err != nil { + return "", err + } + // call the cli + argsCLI := []string{"ct-set-limits", dpName, argsStr} + results, err := dp.CLI.Exec(argsCLI...) + + return string(results), err +} + +// DelCTLimits deletes limits setup for zones. It takes the Datapath name +// and zones to delete the limits. +// sudo ovs-dpctl ct-del-limits system@ovs-system zone=40,4 +func (dp *DataPathService) DelCTLimits(dpName string, zones []uint64) (string, error) { + if dpName == "" { + return "", errMissingMandatoryDataPathName + } + if len(zones) < 1 { + return "", errMissingMandatoryZone + } + + var firstZone uint64 + firstZone, zones = zones[0], zones[1:] + zonesStr := "zone=" + strconv.FormatUint(firstZone, 10) + for _, z := range zones { + zonesStr += "," + strconv.FormatUint(z, 10) + } + + // call the cli + argsCLI := []string{"ct-del-limits", dpName, zonesStr} + results, err := dp.CLI.Exec(argsCLI...) + + return string(results), err +} + +// ctSetLimitsArgsToString helps formating and sanatizing an input +// It takes a map and output a string like this: +// - "zone=2,limit=10000" or "limit=10000,zone=2" +// - "default=10000" +func ctSetLimitsArgsToString(zone map[string]uint64) (string, error) { + defaultSetup := false + args := make([]string, 0) + for k, v := range zone { + if k == "default" { + args = append(args, k+"="+strconv.FormatUint(v, 10)) + defaultSetup = true + } else if k == "zone" || k == "limit" { + args = append(args, k+"="+strconv.FormatUint(v, 10)) + } + } + + // We need at most 2 arguments and at least 1 + if len(args) == 0 || len(args) > 2 { + return "", errWrongArgumentNumber + + } + // if we setup the default global setting we only need a single parameter + // like "default=100000" and nothing else + if defaultSetup && len(args) != 1 { + return "", errWrongDefaultArgument + } + // if we setup a limit for dedicated zone we need 2 params like + // "zone=3" and "limit=50000" + if !defaultSetup && len(args) != 2 { + return "", errWrongZoneArgument + } + + var argsStr string + argsStr, args = args[0], args[1:] + if len(args) > 0 { + for _, s := range args { + argsStr += "," + s + } + } + return argsStr, nil +} + +// getZoneString takes the zones as []uint64 to return a formated +// string usable in different ovs-dpctl commands +// Example a slice: var zones = []uint64{2, 3, 4} +// will output: "zone=2,3,4" +func getZoneString(z []uint64) string { + zonesStr := make([]string, 0) + for _, zone := range z { + zonesStr = append(zonesStr, strconv.FormatUint(zone, 10)) + } + + var sb strings.Builder + var firstZone string + if len(zonesStr) > 0 { + sb.WriteString("zone=") + firstZone, zonesStr = zonesStr[0], zonesStr[1:] + } + sb.WriteString(firstZone) + + for _, zone := range zonesStr { + sb.WriteString(",") + sb.WriteString(zone) + } + + return sb.String() +} + +// DpCLI implements the CLI interface by invoking the Client exec +// method. +type DpCLI struct { + // Wrapped client for ovs-dpctl + c *Client +} + +// Exec executes 'ovs-dpctl' + args passed in argument +func (cli *DpCLI) Exec(args ...string) ([]byte, error) { + if cli.c == nil { + return nil, errUninitializedClient + } + + return cli.c.exec("ovs-dpctl", args...) +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/doc.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/doc.go new file mode 100644 index 0000000..1e98ec4 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/doc.go @@ -0,0 +1,17 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ovs is a client library for Open vSwitch which enables programmatic +// control of the virtual switch. +package ovs diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/flow.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/flow.go new file mode 100644 index 0000000..0de9337 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/flow.go @@ -0,0 +1,492 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + // PortLOCAL is a special in_port value which refers to the local port + // of an OVS bridge. + PortLOCAL = -1 +) + +// Possible errors which may be encountered while marshaling or unmarshaling +// a Flow. +var ( + errActionsWithDrop = errors.New("Flow actions include drop, but multiple actions specified") + errInvalidActions = errors.New("invalid actions for Flow") + errNoActions = errors.New("no actions defined for Flow") + errNotEnoughElements = errors.New("not enough elements for valid Flow") + errInvalidLearnedActions = errors.New("invalid actions for LearnedFlow") +) + +// A Protocol is an OpenFlow protocol designation accepted by Open vSwitch. +type Protocol string + +// Protocol constants which can be used in OVS flow configurations. +const ( + ProtocolARP Protocol = "arp" + ProtocolICMPv4 Protocol = "icmp" + ProtocolICMPv6 Protocol = "icmp6" + ProtocolIPv4 Protocol = "ip" + ProtocolIPv6 Protocol = "ipv6" + ProtocolTCPv4 Protocol = "tcp" + ProtocolTCPv6 Protocol = "tcp6" + ProtocolUDPv4 Protocol = "udp" + ProtocolUDPv6 Protocol = "udp6" +) + +// A Flow is an OpenFlow flow meant for adding flows to a software bridge. It can be marshaled +// to and from its textual form for use with Open vSwitch. +type Flow struct { + Priority int + Protocol Protocol + InPort int + Matches []Match + Table int + IdleTimeout int + Cookie uint64 + Actions []Action +} + +// A LearnedFlow is defined as part of the Learn action. +type LearnedFlow struct { + Priority int + InPort int + Matches []Match + Table int + IdleTimeout int + Cookie uint64 + Actions []Action + + DeleteLearned bool + FinHardTimeout int + HardTimeout int + Limit int +} + +var _ error = &FlowError{} + +// A FlowError is an error encountered while marshaling or unmarshaling +// a Flow. +type FlowError struct { + // Str indicates the string, if any, that caused the flow to + // fail while unmarshaling. + Str string + + // Err indicates the error that halted flow marshaling or unmarshaling. + Err error +} + +// Error returns the string representation of a FlowError. +func (e *FlowError) Error() string { + if e.Str == "" { + return e.Err.Error() + } + + return fmt.Sprintf("flow error due to string %q: %v", + e.Str, e.Err) +} + +// Constants and variables used repeatedly to reduce errors in code. +const ( + priorityString = "priority=" + + priority = "priority" + cookie = "cookie" + keyActions = "actions" + idleTimeout = "idle_timeout" + inPort = "in_port" + table = "table" + duration = "duration" + nPackets = "n_packets" + nBytes = "n_bytes" + hardAge = "hard_age" + idleAge = "idle_age" + + // Variables used in LearnedFlows only. + deleteLearned = "delete_learned" + finHardTimeout = "fin_hard_timeout" + hardTimeout = "hard_timeout" + limit = "limit" + + portLOCAL = "LOCAL" +) + +var ( + priorityBytes = []byte(priorityString) +) + +// MarshalText marshals a Flow into its textual form. +func (f *Flow) MarshalText() ([]byte, error) { + if len(f.Actions) == 0 { + return nil, &FlowError{ + Err: errNoActions, + } + } + + actions, err := marshalActions(f.Actions) + if err != nil { + return nil, err + } + + matches, err := marshalMatches(f.Matches) + if err != nil { + return nil, err + } + + // Action "drop" must only be specified by itself + if len(actions) > 1 { + for _, a := range actions { + if a == actionDrop { + return nil, &FlowError{ + Err: errActionsWithDrop, + } + } + } + } + + b := make([]byte, len(priorityBytes)) + copy(b, priorityBytes) + + b = strconv.AppendInt(b, int64(f.Priority), 10) + + if f.Protocol != "" { + b = append(b, ',') + b = append(b, f.Protocol...) + } + + if f.InPort != 0 { + b = append(b, ","+inPort+"="...) + + // Special case, InPortLOCAL is converted to the literal string LOCAL + if f.InPort == PortLOCAL { + b = append(b, portLOCAL...) + } else { + b = strconv.AppendInt(b, int64(f.InPort), 10) + } + } + + if len(matches) > 0 { + b = append(b, ","+strings.Join(matches, ",")...) + } + + b = append(b, ","+table+"="...) + b = strconv.AppendInt(b, int64(f.Table), 10) + + b = append(b, ","+idleTimeout+"="...) + b = strconv.AppendInt(b, int64(f.IdleTimeout), 10) + + if f.Cookie > 0 { + // Hexadecimal cookies are much easier to read. + b = append(b, ","+cookie+"="...) + b = append(b, paddedHexUint64(f.Cookie)...) + } + + b = append(b, ","+keyActions+"="+strings.Join(actions, ",")...) + + return b, nil +} + +// MarshalText marshals a LearnedFlow into its textual form. +func (f *LearnedFlow) MarshalText() ([]byte, error) { + if len(f.Actions) == 0 { + return nil, &FlowError{ + Err: errNoActions, + } + } + + // A learned flow can have a limited set of actions, namely `load` and `output:field`. + for _, a := range f.Actions { + switch a := a.(type) { + case *loadSetFieldAction: + if a.typ != actionLoad { + return nil, errInvalidLearnedActions + } + case *outputFieldAction: + default: + return nil, errInvalidLearnedActions + } + } + + actions, err := marshalActions(f.Actions) + if err != nil { + return nil, err + } + + matches, err := marshalMatches(f.Matches) + if err != nil { + return nil, err + } + + b := make([]byte, len(priorityBytes)) + copy(b, priorityBytes) + + b = strconv.AppendInt(b, int64(f.Priority), 10) + + if f.InPort != 0 { + b = append(b, ","+inPort+"="...) + + // Special case, InPortLOCAL is converted to the literal string LOCAL + if f.InPort == PortLOCAL { + b = append(b, portLOCAL...) + } else { + b = strconv.AppendInt(b, int64(f.InPort), 10) + } + } + + if len(matches) > 0 { + b = append(b, ","+strings.Join(matches, ",")...) + } + + b = append(b, ","+table+"="...) + b = strconv.AppendInt(b, int64(f.Table), 10) + + b = append(b, ","+idleTimeout+"="...) + b = strconv.AppendInt(b, int64(f.IdleTimeout), 10) + + b = append(b, ","+finHardTimeout+"="...) + b = strconv.AppendInt(b, int64(f.FinHardTimeout), 10) + + b = append(b, ","+hardTimeout+"="...) + b = strconv.AppendInt(b, int64(f.HardTimeout), 10) + + if f.Limit > 0 { + // On older version of OpenVSwitch, the limit option doesn't exist. + // Make sure we don't use it if the value is not set. + b = append(b, ","+limit+"="...) + b = strconv.AppendInt(b, int64(f.Limit), 10) + } + + if f.DeleteLearned { + b = append(b, ","+deleteLearned...) + } + + if f.Cookie > 0 { + // Hexadecimal cookies are much easier to read. + b = append(b, ","+cookie+"="...) + b = append(b, paddedHexUint64(f.Cookie)...) + } + + b = append(b, ","+strings.Join(actions, ",")...) + + return b, nil +} + +// UnmarshalText unmarshals flow text into a Flow. +func (f *Flow) UnmarshalText(b []byte) error { + // Make a copy per documentation for encoding.TextUnmarshaler. + // A string is easier to work with in this case. + s := string(b) + + // Must have one and only one actions=... field in the flow. + ss := strings.Split(s, keyActions+"=") + if len(ss) != 2 || ss[1] == "" { + return &FlowError{ + Err: errNoActions, + } + } + if len(ss) < 2 { + return &FlowError{ + Err: errNotEnoughElements, + } + } + matchers, actions := strings.TrimSpace(ss[0]), strings.TrimSpace(ss[1]) + + // Handle matchers first. + ss = strings.Split(matchers, ",") + f.Matches = make([]Match, 0) + for i := 0; i < len(ss); i++ { + if !strings.Contains(ss[i], "=") { + // that means this will be a protocol field. + if ss[i] != "" { + f.Protocol = Protocol(ss[i]) + } + continue + } + + // All remaining comma-separated values should be in key=value format, + // but parsing "actions" is done later because actions can use the form: + // actions=foo,bar,baz + kv := strings.Split(ss[i], "=") + if len(kv) != 2 { + continue + } + kv[1] = strings.TrimSpace(kv[1]) + + switch strings.TrimSpace(kv[0]) { + case priority: + // Parse priority into struct field. + pri, err := strconv.ParseInt(kv[1], 10, 0) + if err != nil { + return &FlowError{ + Str: kv[1], + Err: err, + } + } + f.Priority = int(pri) + continue + case cookie: + // Parse cookie into struct field. + cookie, err := strconv.ParseUint(kv[1], 0, 64) + if err != nil { + return &FlowError{ + Str: kv[1], + Err: err, + } + } + f.Cookie = cookie + continue + case keyActions: + // Parse actions outside this loop. + continue + case inPort: + // Parse in_port into struct field. + s := kv[1] + if strings.TrimSpace(s) == portLOCAL { + f.InPort = PortLOCAL + continue + } + + port, err := strconv.ParseInt(s, 10, 0) + if err != nil { + return &FlowError{ + Str: s, + Err: err, + } + } + f.InPort = int(port) + continue + case idleTimeout: + // Parse idle_timeout into struct field. + timeout, err := strconv.ParseInt(kv[1], 10, 0) + if err != nil { + return &FlowError{ + Str: kv[1], + Err: err, + } + } + f.IdleTimeout = int(timeout) + continue + case table: + // Parse table into struct field. + table, err := strconv.ParseInt(kv[1], 10, 0) + if err != nil { + return &FlowError{ + Str: kv[1], + Err: err, + } + } + f.Table = int(table) + continue + case duration, nPackets, nBytes, hardAge, idleAge: + // ignore those fields. + continue + } + + // All arbitrary key/value pairs that + // don't match the case above. + match, err := parseMatch(kv[0], kv[1]) + if err != nil { + return err + } + // The keyword will be skipped if unknown, + // don't add a nil value + if match != nil { + f.Matches = append(f.Matches, match) + } + } + + // Parse all actions from the flow. + p := newActionParser(strings.NewReader(actions)) + out, raw, err := p.Parse() + if err != nil { + return &FlowError{ + Str: actions, + Err: errInvalidActions, + } + } + f.Actions = out + + // Action "drop" must only be specified by itself. + if len(raw) > 1 { + for _, a := range raw { + if a == actionDrop { + return &FlowError{ + Err: errActionsWithDrop, + } + } + } + } + + return nil +} + +// MatchFlow converts Flow into MatchFlow. +func (f *Flow) MatchFlow() *MatchFlow { + return &MatchFlow{ + Protocol: f.Protocol, + InPort: f.InPort, + Matches: f.Matches, + Table: f.Table, + Cookie: f.Cookie, + } +} + +// marshalActions marshals all provided Actions to their text form. +func marshalActions(aa []Action) ([]string, error) { + fns := make([]func() ([]byte, error), 0, len(aa)) + for _, fn := range aa { + fns = append(fns, fn.MarshalText) + } + + return marshalFunctions(fns) +} + +// marshalMatches marshals all provided Matches to their text form. +func marshalMatches(mm []Match) ([]string, error) { + fns := make([]func() ([]byte, error), 0, len(mm)) + for _, fn := range mm { + fns = append(fns, fn.MarshalText) + } + + return marshalFunctions(fns) +} + +// marshalFunctions marshals a slice of functions to their text form. +func marshalFunctions(fns []func() ([]byte, error)) ([]string, error) { + out := make([]string, 0, len(fns)) + for _, fn := range fns { + o, err := fn() + if err != nil { + return nil, err + } + + out = append(out, string(o)) + } + + return out, nil +} + +// paddedHexUint64 formats and displays a uint64 as a hexadecimal string +// prefixed with 0x and zero-padded up to 16 characters in length. +func paddedHexUint64(i uint64) string { + return fmt.Sprintf("%#016x", i) +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/flowstats.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/flowstats.go new file mode 100644 index 0000000..c2c4b27 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/flowstats.go @@ -0,0 +1,96 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "errors" + "strconv" + "strings" +) + +var ( + // ErrInvalidFlowStats is returned when flow statistics from 'ovs-ofctl + // dump-aggregate' do not match the expected output format. + ErrInvalidFlowStats = errors.New("invalid flow statistics") +) + +// FlowStats contains a variety of statistics about an Open vSwitch port, +// including its port ID and numbers about packet receive and transmit +// operations. +type FlowStats struct { + PacketCount uint64 + ByteCount uint64 +} + +// UnmarshalText unmarshals a FlowStats from textual form. +func (f *FlowStats) UnmarshalText(b []byte) error { + // Make a copy per documentation for encoding.TextUnmarshaler. + s := string(b) + + // Constants only needed within this method, to avoid polluting the + // package namespace with generic names + const ( + packetCount = "packet_count" + byteCount = "byte_count" + flowCount = "flow_count" + ) + + // Find the index of packet count to find stats. + idx := strings.Index(s, packetCount) + if idx == -1 { + return ErrInvalidFlowStats + } + + // Assume the last three fields are packets, bytes, and flows, in that order. + ss := strings.Fields(s[idx:]) + fields := []string{ + packetCount, + byteCount, + flowCount, + } + + if len(ss) != len(fields) { + return ErrInvalidFlowStats + } + + var values []uint64 + for i := range ss { + // Split key from its integer value. + kv := strings.Split(ss[i], "=") + if len(kv) != 2 { + return ErrInvalidFlowStats + } + + // Verify keys appear in expected order. + if kv[0] != fields[i] { + return ErrInvalidFlowStats + } + + n, err := strconv.ParseUint(kv[1], 10, 64) + if err != nil { + return err + } + + values = append(values, n) + } + + *f = FlowStats{ + PacketCount: values[0], + ByteCount: values[1], + // Flow count unused. + } + + return nil +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/match.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/match.go new file mode 100644 index 0000000..69df910 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/match.go @@ -0,0 +1,1553 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "bytes" + "encoding" + "fmt" + "net" + "strings" +) + +// Constants for use in Match names. +const ( + source = "src" + destination = "dst" +) + +// Constants of full Match names. +const ( + arpOp = "arp_op" + arpSHA = "arp_sha" + arpSPA = "arp_spa" + arpTHA = "arp_tha" + arpTPA = "arp_tpa" + conjID = "conj_id" + ctMark = "ct_mark" + ctState = "ct_state" + ctZone = "ct_zone" + dlDST = "dl_dst" + dlSRC = "dl_src" + dlType = "dl_type" + dlVLAN = "dl_vlan" + dlVLANPCP = "dl_vlan_pcp" + icmp6Code = "icmpv6_code" + icmp6Type = "icmpv6_type" + icmpCode = "icmp_code" + icmpType = "icmp_type" + ipFrag = "ip_frag" + ipv6DST = "ipv6_dst" + ipv6Label = "ipv6_label" + ipv6SRC = "ipv6_src" + metadata = "metadata" + ndSLL = "nd_sll" + ndTarget = "nd_target" + ndTLL = "nd_tll" + nwDST = "nw_dst" + nwECN = "nw_ecn" + nwProto = "nw_proto" + nwSRC = "nw_src" + nwTOS = "nw_tos" + nwTTL = "nw_ttl" + tcpFlags = "tcp_flags" + tpDST = "tp_dst" + tpSRC = "tp_src" + tunDST = "tun_dst" + tunFlags = "tun_flags" + tunGbpFlags = "tun_gbp_flags" + tunGbpID = "tun_gbp_id" + tunID = "tun_id" + tunSRC = "tun_src" + tunTOS = "tun_tos" + tunTTL = "tun_ttl" + tunv6DST = "tun_ipv6_dst" + tunv6SRC = "tun_ipv6_src" + vlanTCI1 = "vlan_tci1" + vlanTCI = "vlan_tci" +) + +// A Match is a type which can be marshaled into an OpenFlow packet matching +// statement. Matches can be used with Flows to match specific packet types +// and fields. +// +// Matches must also implement fmt.GoStringer for code generation purposes. +type Match interface { + encoding.TextMarshaler + fmt.GoStringer +} + +// DataLinkSource matches packets with a source hardware address and optional +// wildcard mask matching addr. +func DataLinkSource(addr string) Match { + return &dataLinkMatch{ + srcdst: source, + addr: addr, + } +} + +// DataLinkDestination matches packets with a destination hardware address +// and optional wildcard mask matching addr. +func DataLinkDestination(addr string) Match { + return &dataLinkMatch{ + srcdst: destination, + addr: addr, + } +} + +const ( + // ethernetAddrLen is the length in bytes of an ethernet hardware address. + ethernetAddrLen = 6 +) + +var _ Match = &dataLinkMatch{} + +// A dataLinkMatch is a Match returned by DataLink{Source,Destination}. +type dataLinkMatch struct { + srcdst string + addr string +} + +// GoString implements Match. +func (m *dataLinkMatch) GoString() string { + if m.srcdst == source { + return fmt.Sprintf("ovs.DataLinkSource(%q)", m.addr) + } + + return fmt.Sprintf("ovs.DataLinkDestination(%q)", m.addr) +} + +// MarshalText implements Match. +func (m *dataLinkMatch) MarshalText() ([]byte, error) { + // Split the string before possible wildcard mask + ss := strings.SplitN(m.addr, "/", 2) + + hwAddr, err := net.ParseMAC(ss[0]) + if err != nil { + return nil, err + } + if len(hwAddr) != ethernetAddrLen { + return nil, fmt.Errorf("hardware address must be %d octets, but got %d", + ethernetAddrLen, len(hwAddr)) + } + + if len(ss) == 1 { + // Address has no wildcard mask + return bprintf("dl_%s=%s", m.srcdst, hwAddr.String()), nil + } + + wildcard, err := net.ParseMAC(ss[1]) + if err != nil { + return nil, err + } + if len(wildcard) != ethernetAddrLen { + return nil, fmt.Errorf("wildcard mask must be %d octets, but got %d", + ethernetAddrLen, len(wildcard)) + } + + return bprintf("dl_%s=%s/%s", m.srcdst, hwAddr.String(), wildcard.String()), nil +} + +// DataLinkType matches packets with the specified EtherType. +func DataLinkType(etherType uint16) Match { + return &dataLinkTypeMatch{ + etherType: etherType, + } +} + +var _ Match = &dataLinkTypeMatch{} + +// A dataLinkTypeMatch is a Match returned by DataLinkType. +type dataLinkTypeMatch struct { + etherType uint16 +} + +// MarshalText implements Match. +func (m *dataLinkTypeMatch) MarshalText() ([]byte, error) { + return bprintf("%s=0x%04x", dlType, m.etherType), nil +} + +// GoString implements Match. +func (m *dataLinkTypeMatch) GoString() string { + return fmt.Sprintf("ovs.DataLinkType(0x%04x)", m.etherType) +} + +// VLANNone is a special value which indicates that DataLinkVLAN should only +// match packets with no VLAN tag specified. +const VLANNone = 0xffff + +// DataLinkVLAN matches packets with the specified VLAN ID matching vid. +func DataLinkVLAN(vid int) Match { + return &dataLinkVLANMatch{ + vid: vid, + } +} + +var _ Match = &dataLinkVLANMatch{} + +// A dataLinkVLANMatch is a Match returned by DataLinkVLAN. +type dataLinkVLANMatch struct { + vid int +} + +// MarshalText implements Match. +func (m *dataLinkVLANMatch) MarshalText() ([]byte, error) { + if !validVLANVID(m.vid) && m.vid != VLANNone { + return nil, errInvalidVLANVID + } + + if m.vid == VLANNone { + return bprintf("%s=0xffff", dlVLAN), nil + } + + return bprintf("%s=%d", dlVLAN, m.vid), nil +} + +// GoString implements Match. +func (m *dataLinkVLANMatch) GoString() string { + if m.vid == VLANNone { + return "ovs.DataLinkVLAN(ovs.VLANNone)" + } + + return fmt.Sprintf("ovs.DataLinkVLAN(%d)", m.vid) +} + +// DataLinkVLANPCP matches packets with the specified VLAN PCP matching pcp. +func DataLinkVLANPCP(pcp int) Match { + return &dataLinkVLANPCPMatch{ + pcp: pcp, + } +} + +var _ Match = &dataLinkVLANPCPMatch{} + +// A dataLinkVLANPCPMatch is a Match returned by DataLinkVLANPCP. +type dataLinkVLANPCPMatch struct { + pcp int +} + +// MarshalText implements Match. +func (m *dataLinkVLANPCPMatch) MarshalText() ([]byte, error) { + if !validVLANPCP(m.pcp) { + return nil, errInvalidVLANPCP + } + + return bprintf("%s=%d", dlVLANPCP, m.pcp), nil +} + +// GoString implements Match. +func (m *dataLinkVLANPCPMatch) GoString() string { + return fmt.Sprintf("ovs.DataLinkVLANPCP(%d)", m.pcp) +} + +// NetworkSource matches packets with a source IPv4 address or IPv4 CIDR +// block matching ip. +func NetworkSource(ip string) Match { + return &networkMatch{ + srcdst: source, + ip: ip, + } +} + +// NetworkDestination matches packets with a destination IPv4 address or +// IPv4 CIDR block matching ip. +func NetworkDestination(ip string) Match { + return &networkMatch{ + srcdst: destination, + ip: ip, + } +} + +var _ Match = &networkMatch{} + +// A networkMatch is a Match returned by Network{Source,Destination}. +type networkMatch struct { + srcdst string + ip string +} + +// MarshalText implements Match. +func (m *networkMatch) MarshalText() ([]byte, error) { + return matchIPv4AddressOrCIDR(fmt.Sprintf("nw_%s", m.srcdst), m.ip) +} + +// GoString implements Match. +func (m *networkMatch) GoString() string { + if m.srcdst == source { + return fmt.Sprintf("ovs.NetworkSource(%q)", m.ip) + } + + return fmt.Sprintf("ovs.NetworkDestination(%q)", m.ip) +} + +// NetworkECN creates a new networkECN +func NetworkECN(ecn int) Match { + return &networkECN{ + ecn: ecn, + } +} + +var _ Match = &networkECN{} + +// a networkECN is a match for network Explicit Congestion Notification +type networkECN struct { + ecn int +} + +// MarshalText implements Match. +func (e *networkECN) MarshalText() ([]byte, error) { + return bprintf("nw_ecn=%d", e.ecn), nil +} + +// GoString implements Match. +func (e *networkECN) GoString() string { + return fmt.Sprintf("ovs.NetworkECN(%d)", e.ecn) +} + +// NetworkTOS returns a new networkTOS type +func NetworkTOS(tos int) Match { + return &networkTOS{ + tos: tos, + } +} + +var _ Match = &networkTOS{} + +// networkTOS is a match for network type of service +type networkTOS struct { + tos int +} + +// MarshalText implements Match. +func (t *networkTOS) MarshalText() ([]byte, error) { + return bprintf("nw_tos=%d", t.tos), nil +} + +// GoString implements Match. +func (t *networkTOS) GoString() string { + return fmt.Sprintf("ovs.NetworkTOS(%d)", t.tos) +} + +// TunnelGBP returns a new tunnelGBP +func TunnelGBP(gbp int) Match { + return &tunnelGBP{ + gbp: gbp, + } +} + +var _ Match = &tunnelGBP{} + +// tunnelGBP is a match for tunnel GBP +type tunnelGBP struct { + gbp int +} + +// MarshalText implements Match. +func (t *tunnelGBP) MarshalText() ([]byte, error) { + return bprintf("tun_gbp_id=%d", t.gbp), nil +} + +// GoString implements Match. +func (t *tunnelGBP) GoString() string { + return fmt.Sprintf("ovs.TunnelGBP(%d)", t.gbp) +} + +// TunnelGbpFlags returns a new tunnelFlags +func TunnelGbpFlags(gbpFlags int) Match { + return &tunnelGbpFlags{ + gbpFlags: gbpFlags, + } +} + +var _ Match = &tunnelGbpFlags{} + +// tunnelGbpFlags is a match for tunnel Flags +type tunnelGbpFlags struct { + gbpFlags int +} + +// MarshalText implements Match. +func (t *tunnelGbpFlags) MarshalText() ([]byte, error) { + return bprintf("tun_gbp_flags=%d", t.gbpFlags), nil +} + +// GoString implements Match. +func (t *tunnelGbpFlags) GoString() string { + return fmt.Sprintf("ovs.TunnelGbpFlags(%d)", t.gbpFlags) +} + +// TunnelFlags returns a new tunnelFlags +func TunnelFlags(flags int) Match { + return &tunnelFlags{ + flags: flags, + } +} + +var _ Match = &tunnelFlags{} + +// tunnelFlags is a match for tunnel Flags +type tunnelFlags struct { + flags int +} + +// MarshalText implements Match. +func (t *tunnelFlags) MarshalText() ([]byte, error) { + return bprintf("tun_flags=%d", t.flags), nil +} + +// GoString implements Match. +func (t *tunnelFlags) GoString() string { + return fmt.Sprintf("ovs.TunnelFlags(%d)", t.flags) +} + +// NetworkTTL returns a new networkTTL +func NetworkTTL(ttl int) Match { + return &networkTTL{ + ttl: ttl, + } +} + +var _ Match = &networkTTL{} + +// networkTTL is a match for network time to live +type networkTTL struct { + ttl int +} + +// MarshalText implements Match. +func (t *networkTTL) MarshalText() ([]byte, error) { + return bprintf("nw_ttl=%d", t.ttl), nil +} + +// GoString implements Match. +func (t *networkTTL) GoString() string { + return fmt.Sprintf("ovs.NetworkTTL(%d)", t.ttl) +} + +// TunnelTTL returns a new tunnelTTL +func TunnelTTL(ttl int) Match { + return &tunnelTTL{ + ttl: ttl, + } +} + +var _ Match = &tunnelTTL{} + +// tunnelTTL is a match for a tunnel time to live +type tunnelTTL struct { + ttl int +} + +// MarshalText implements Match. +func (t *tunnelTTL) MarshalText() ([]byte, error) { + return bprintf("tun_ttl=%d", t.ttl), nil +} + +// GoString implements Match. +func (t *tunnelTTL) GoString() string { + return fmt.Sprintf("ovs.TunnelTTL(%d)", t.ttl) +} + +// ConjunctionID matches flows that have matched all dimension of a conjunction +// inside of the openflow table. +func ConjunctionID(id uint32) Match { + return &conjunctionIDMatch{ + id: id, + } +} + +// TunnelTOS returns a new tunnelTOS +func TunnelTOS(tos int) Match { + return &tunnelTOS{ + tos: tos, + } +} + +var _ Match = &tunnelTOS{} + +// tunnelTOS is a match for a tunnel type of service +type tunnelTOS struct { + tos int +} + +// MarshalText implements Match. +func (t *tunnelTOS) MarshalText() ([]byte, error) { + return bprintf("tun_tos=%d", t.tos), nil +} + +// GoString implements Match. +func (t *tunnelTOS) GoString() string { + return fmt.Sprintf("ovs.TunnelTOS(%d)", t.tos) +} + +// A conjunctionIDMatch is a Match returned by ConjunctionID +type conjunctionIDMatch struct { + id uint32 +} + +// MarshalText implements Match. +func (m *conjunctionIDMatch) MarshalText() ([]byte, error) { + return bprintf("conj_id=%v", m.id), nil +} + +// GoString implements Match. +func (m *conjunctionIDMatch) GoString() string { + return fmt.Sprintf("ovs.ConjunctionID(%v)", m.id) +} + +// NetworkProtocol matches packets with the specified IP or IPv6 protocol +// number matching num. For example, specifying 1 when a Flow's Protocol +// is IPv4 matches ICMP packets, or 58 when Protocol is IPv6 matches ICMPv6 +// packets. +func NetworkProtocol(num uint8) Match { + return &networkProtocolMatch{ + num: num, + } +} + +var _ Match = &networkProtocolMatch{} + +// A networkProtocolMatch is a Match returned by NetworkProtocol. +type networkProtocolMatch struct { + num uint8 +} + +// MarshalText implements Match. +func (m *networkProtocolMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%d", nwProto, m.num), nil +} + +// GoString implements Match. +func (m *networkProtocolMatch) GoString() string { + return fmt.Sprintf("ovs.NetworkProtocol(%d)", m.num) +} + +// IPv6Source matches packets with a source IPv6 address or IPv6 CIDR +// block matching ip. +func IPv6Source(ip string) Match { + return &ipv6Match{ + srcdst: source, + ip: ip, + } +} + +// IPv6Destination matches packets with a destination IPv6 address or +// IPv6 CIDR block matching ip. +func IPv6Destination(ip string) Match { + return &ipv6Match{ + srcdst: destination, + ip: ip, + } +} + +var _ Match = &ipv6Match{} + +// An ipv6Match is a Match returned by IPv6{Source,Destination}. +type ipv6Match struct { + srcdst string + ip string +} + +// MarshalText implements Match. +func (m *ipv6Match) MarshalText() ([]byte, error) { + return matchIPv6AddressOrCIDR(fmt.Sprintf("ipv6_%s", m.srcdst), m.ip) +} + +// GoString implements Match. +func (m *ipv6Match) GoString() string { + if m.srcdst == source { + return fmt.Sprintf("ovs.IPv6Source(%q)", m.ip) + } + + return fmt.Sprintf("ovs.IPv6Destination(%q)", m.ip) +} + +// ICMPType matches packets with the specified ICMP type matching typ. +func ICMPType(typ uint8) Match { + return &icmpTypeMatch{ + typ: typ, + } +} + +var _ Match = &icmpTypeMatch{} + +// An icmpTypeMatch is a Match returned by ICMPType. +type icmpTypeMatch struct { + typ uint8 +} + +// MarshalText implements Match. +func (m *icmpTypeMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%d", icmpType, m.typ), nil +} + +// GoString implements Match. +func (m *icmpTypeMatch) GoString() string { + return fmt.Sprintf("ovs.ICMPType(%d)", m.typ) +} + +// ICMPCode matches packets with the specified ICMP code. +func ICMPCode(code uint8) Match { + return &icmpCodeMatch{ + code: code, + } +} + +var _ Match = &icmpCodeMatch{} + +// An icmpCodeMatch is a Match returned by ICMPCode. +type icmpCodeMatch struct { + code uint8 +} + +// MarshalText implements Match. +func (m *icmpCodeMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%d", icmpCode, m.code), nil +} + +// GoString implements Match. +func (m *icmpCodeMatch) GoString() string { + return fmt.Sprintf("ovs.ICMPCode(%d)", m.code) +} + +// ICMP6Type matches packets with the specified ICMP type matching typ. +func ICMP6Type(typ uint8) Match { + return &icmp6TypeMatch{ + typ: typ, + } +} + +var _ Match = &icmp6TypeMatch{} + +// An icmp6TypeMatch is a Match returned by ICMP6Type. +type icmp6TypeMatch struct { + typ uint8 +} + +// MarshalText implements Match. +func (m *icmp6TypeMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%d", icmp6Type, m.typ), nil +} + +// GoString implements Match. +func (m *icmp6TypeMatch) GoString() string { + return fmt.Sprintf("ovs.ICMP6Type(%d)", m.typ) +} + +// ICMP6Code matches packets with the specified ICMP type matching typ. +func ICMP6Code(code uint8) Match { + return &icmp6CodeMatch{ + code: code, + } +} + +var _ Match = &icmp6CodeMatch{} + +// An icmp6CodeMatch is a Match returned by ICMP6Code. +type icmp6CodeMatch struct { + code uint8 +} + +// MarshalText implements Match. +func (m *icmp6CodeMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%d", icmp6Code, m.code), nil +} + +// GoString implements Match. +func (m *icmp6CodeMatch) GoString() string { + return fmt.Sprintf("ovs.ICMP6Code(%d)", m.code) +} + +// InPortMatch matches packets ingressing from a specified OVS port +func InPortMatch(port int) Match { + return &inPortMatch{ + port: port, + } +} + +var _ Match = &inPortMatch{} + +// inPort matches packets ingressing from a specified OVS port +type inPortMatch struct { + port int +} + +// MarshalText implements Match. +func (i *inPortMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%d", inPort, i.port), nil +} + +// GoString implements Match. +func (i *inPortMatch) GoString() string { + return fmt.Sprintf("ovs.InPort(%q)", i.port) +} + +// NeighborDiscoveryTarget matches packets with an IPv6 neighbor discovery target +// IPv6 address or IPv6 CIDR block matching ip. +func NeighborDiscoveryTarget(ip string) Match { + return &neighborDiscoveryTargetMatch{ + ip: ip, + } +} + +var _ Match = &neighborDiscoveryTargetMatch{} + +// A neighborDiscoveryTargetMatch is a Match returned by NeighborDiscoveryTarget. +type neighborDiscoveryTargetMatch struct { + ip string +} + +// MarshalText implements Match. +func (m *neighborDiscoveryTargetMatch) MarshalText() ([]byte, error) { + return matchIPv6AddressOrCIDR(ndTarget, m.ip) +} + +// GoString implements Match. +func (m *neighborDiscoveryTargetMatch) GoString() string { + return fmt.Sprintf("ovs.NeighborDiscoveryTarget(%q)", m.ip) +} + +// NeighborDiscoverySourceLinkLayer matches packets with an IPv6 neighbor +// solicitation source link-layer address matching addr. +func NeighborDiscoverySourceLinkLayer(addr net.HardwareAddr) Match { + return &neighborDiscoveryLinkLayerMatch{ + srctgt: source, + addr: addr, + } +} + +// NeighborDiscoveryTargetLinkLayer matches packets with an IPv6 neighbor +// solicitation target link-layer address matching addr. +func NeighborDiscoveryTargetLinkLayer(addr net.HardwareAddr) Match { + return &neighborDiscoveryLinkLayerMatch{ + srctgt: destination, + addr: addr, + } +} + +var _ Match = &neighborDiscoveryLinkLayerMatch{} + +// A neighborDiscoveryLinkLayerMatch is a Match returned by DataLinkVLAN. +type neighborDiscoveryLinkLayerMatch struct { + srctgt string + addr net.HardwareAddr +} + +// MarshalText implements Match. +func (m *neighborDiscoveryLinkLayerMatch) MarshalText() ([]byte, error) { + if m.srctgt == source { + return matchEthernetHardwareAddress(ndSLL, m.addr) + } + + return matchEthernetHardwareAddress(ndTLL, m.addr) +} + +// GoString implements Match. +func (m *neighborDiscoveryLinkLayerMatch) GoString() string { + syntax := hwAddrGoString(m.addr) + + if m.srctgt == source { + return fmt.Sprintf("ovs.NeighborDiscoverySourceLinkLayer(%s)", syntax) + } + + return fmt.Sprintf("ovs.NeighborDiscoveryTargetLinkLayer(%s)", syntax) +} + +// ARPOperation matches packets with the specified ARP operation matching oper. +func ARPOperation(oper uint16) Match { + return &arpOperationMatch{ + oper: oper, + } +} + +var _ Match = &arpOperationMatch{} + +// An arpOperationMatch is a Match returned by ARPOperation. +type arpOperationMatch struct { + oper uint16 +} + +// MarshalText implements Match. +func (m *arpOperationMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%d", arpOp, m.oper), nil +} + +// GoString implements Match. +func (m *arpOperationMatch) GoString() string { + return fmt.Sprintf("ovs.ARPOperation(%d)", m.oper) +} + +// ARPSourceHardwareAddress matches packets with an ARP source hardware address +// (SHA) matching addr. +func ARPSourceHardwareAddress(addr net.HardwareAddr) Match { + return &arpHardwareAddressMatch{ + srctgt: source, + addr: addr, + } +} + +// ARPTargetHardwareAddress matches packets with an ARP target hardware address +// (THA) matching addr. +func ARPTargetHardwareAddress(addr net.HardwareAddr) Match { + return &arpHardwareAddressMatch{ + srctgt: destination, + addr: addr, + } +} + +var _ Match = &arpHardwareAddressMatch{} + +// An arpHardwareAddressMatch is a Match returned by ARP{Source,Target}HardwareAddress. +type arpHardwareAddressMatch struct { + srctgt string + addr net.HardwareAddr +} + +// MarshalText implements Match. +func (m *arpHardwareAddressMatch) MarshalText() ([]byte, error) { + if m.srctgt == source { + return matchEthernetHardwareAddress(arpSHA, m.addr) + } + + return matchEthernetHardwareAddress(arpTHA, m.addr) +} + +// GoString implements Match. +func (m *arpHardwareAddressMatch) GoString() string { + syntax := hwAddrGoString(m.addr) + + if m.srctgt == source { + return fmt.Sprintf("ovs.ARPSourceHardwareAddress(%s)", syntax) + } + + return fmt.Sprintf("ovs.ARPTargetHardwareAddress(%s)", syntax) +} + +// ARPSourceProtocolAddress matches packets with an ARP source protocol address +// (SPA) IPv4 address or IPv4 CIDR block matching addr. +func ARPSourceProtocolAddress(ip string) Match { + return &arpProtocolAddressMatch{ + srctgt: source, + ip: ip, + } +} + +// ARPTargetProtocolAddress matches packets with an ARP target protocol address +// (TPA) IPv4 address or IPv4 CIDR block matching addr. +func ARPTargetProtocolAddress(ip string) Match { + return &arpProtocolAddressMatch{ + srctgt: destination, + ip: ip, + } +} + +var _ Match = &arpProtocolAddressMatch{} + +// An arpProtocolAddressMatch is a Match returned by ARP{Source,Target}ProtocolAddress. +type arpProtocolAddressMatch struct { + srctgt string + ip string +} + +// MarshalText implements Match. +func (m *arpProtocolAddressMatch) MarshalText() ([]byte, error) { + if m.srctgt == source { + return matchIPv4AddressOrCIDR(arpSPA, m.ip) + } + + return matchIPv4AddressOrCIDR(arpTPA, m.ip) +} + +// GoString implements Match. +func (m *arpProtocolAddressMatch) GoString() string { + if m.srctgt == source { + return fmt.Sprintf("ovs.ARPSourceProtocolAddress(%q)", m.ip) + } + + return fmt.Sprintf("ovs.ARPTargetProtocolAddress(%q)", m.ip) +} + +// TransportSourcePort matches packets with a transport layer (TCP/UDP) source +// port matching port. +func TransportSourcePort(port uint16) Match { + return &transportPortMatch{ + srcdst: source, + port: port, + mask: 0, + } +} + +// TransportDestinationPort matches packets with a transport layer (TCP/UDP) +// destination port matching port. +func TransportDestinationPort(port uint16) Match { + return &transportPortMatch{ + srcdst: destination, + port: port, + mask: 0, + } +} + +// TransportSourceMaskedPort matches packets with a transport layer (TCP/UDP) +// source port matching a masked port range. +func TransportSourceMaskedPort(port uint16, mask uint16) Match { + return &transportPortMatch{ + srcdst: source, + port: port, + mask: mask, + } +} + +// TransportDestinationMaskedPort matches packets with a transport layer (TCP/UDP) +// destination port matching a masked port range. +func TransportDestinationMaskedPort(port uint16, mask uint16) Match { + return &transportPortMatch{ + srcdst: destination, + port: port, + mask: mask, + } +} + +// A transportPortMatch is a Match returned by Transport{Source,Destination}Port. +type transportPortMatch struct { + srcdst string + port uint16 + mask uint16 +} + +var _ Match = &transportPortMatch{} + +// A TransportPortRanger represents a port range that can be expressed as an array of bitwise matches. +type TransportPortRanger interface { + MaskedPorts() ([]Match, error) +} + +// A TransportPortRange reprsents the start and end values of a transport protocol port range. +type transportPortRange struct { + srcdst string + startPort uint16 + endPort uint16 +} + +// TransportDestinationPortRange represent a port range intended for a transport protocol destination port. +func TransportDestinationPortRange(startPort uint16, endPort uint16) TransportPortRanger { + return &transportPortRange{ + srcdst: destination, + startPort: startPort, + endPort: endPort, + } +} + +// TransportSourcePortRange represent a port range intended for a transport protocol source port. +func TransportSourcePortRange(startPort uint16, endPort uint16) TransportPortRanger { + return &transportPortRange{ + srcdst: source, + startPort: startPort, + endPort: endPort, + } +} + +// MaskedPorts returns the represented port ranges as an array of bitwise matches. +func (pr *transportPortRange) MaskedPorts() ([]Match, error) { + portRange := PortRange{ + Start: pr.startPort, + End: pr.endPort, + } + + bitRanges, err := portRange.BitwiseMatch() + if err != nil { + return nil, err + } + + var ports []Match + + for _, br := range bitRanges { + maskedPortRange := &transportPortMatch{ + srcdst: pr.srcdst, + port: br.Value, + mask: br.Mask, + } + ports = append(ports, maskedPortRange) + } + + return ports, nil +} + +// MarshalText implements Match. +func (m *transportPortMatch) MarshalText() ([]byte, error) { + return matchTransportPort(m.srcdst, m.port, m.mask) +} + +// GoString implements Match. +func (m *transportPortMatch) GoString() string { + if m.mask > 0 { + if m.srcdst == source { + return fmt.Sprintf("ovs.TransportSourceMaskedPort(%#x, %#x)", m.port, m.mask) + } + + return fmt.Sprintf("ovs.TransportDestinationMaskedPort(%#x, %#x)", m.port, m.mask) + } + + if m.srcdst == source { + return fmt.Sprintf("ovs.TransportSourcePort(%d)", m.port) + } + + return fmt.Sprintf("ovs.TransportDestinationPort(%d)", m.port) +} + +// A vlanTCIMatch is a Match returned by VLANTCI. +type vlanTCIMatch struct { + tci uint16 + mask uint16 +} + +// VLANTCI matches packets based on their VLAN tag control information, using +// the specified TCI and optional mask value. +func VLANTCI(tci, mask uint16) Match { + return &vlanTCIMatch{ + tci: tci, + mask: mask, + } +} + +// MarshalText implements Match. +func (m *vlanTCIMatch) MarshalText() ([]byte, error) { + if m.mask != 0 { + return bprintf("%s=0x%04x/0x%04x", vlanTCI, m.tci, m.mask), nil + } + + return bprintf("%s=0x%04x", vlanTCI, m.tci), nil +} + +// GoString implements Match. +func (m *vlanTCIMatch) GoString() string { + return fmt.Sprintf("ovs.VLANTCI(0x%04x, 0x%04x)", m.tci, m.mask) +} + +// A vlanTCI1Match is a Match returned by VLANTCI1. +type vlanTCI1Match struct { + tci uint16 + mask uint16 +} + +// VLANTCI1 matches packets based on their VLAN tag control information, using +// the specified TCI and optional mask value. +func VLANTCI1(tci, mask uint16) Match { + return &vlanTCI1Match{ + tci: tci, + mask: mask, + } +} + +// MarshalText implements Match. +func (m *vlanTCI1Match) MarshalText() ([]byte, error) { + if m.mask != 0 { + return bprintf("%s=0x%04x/0x%04x", vlanTCI1, m.tci, m.mask), nil + } + + return bprintf("%s=0x%04x", vlanTCI1, m.tci), nil +} + +// GoString implements Match. +func (m *vlanTCI1Match) GoString() string { + return fmt.Sprintf("ovs.VLANTCI1(0x%04x, 0x%04x)", m.tci, m.mask) +} + +// An ipv6LabelMatch is a Match returned by IPv6Label. +type ipv6LabelMatch struct { + label uint32 + mask uint32 +} + +// IPv6Label matches packets based on their IPv6 label information, using +// the specified label and optional mask value. +func IPv6Label(label, mask uint32) Match { + return &ipv6LabelMatch{ + label: label, + mask: mask, + } +} + +// MarshalText implements Match. +func (m *ipv6LabelMatch) MarshalText() ([]byte, error) { + if !validIPv6Label(m.label) || !validIPv6Label(m.mask) { + return nil, errInvalidIPv6Label + } + if m.mask != 0 { + return bprintf("%s=0x%05x/0x%05x", ipv6Label, m.label, m.mask), nil + } + + return bprintf("%s=0x%05x", ipv6Label, m.label), nil +} + +// GoString implements Match. +func (m *ipv6LabelMatch) GoString() string { + return fmt.Sprintf("ovs.IPv6Label(0x%04x, 0x%04x)", m.label, m.mask) +} + +// An arpOpMatch is a Match returned by ArpOp. +type arpOpMatch struct { + op uint16 +} + +// ArpOp matches packets based on their IPv6 label information, using +// the specified op. +func ArpOp(op uint16) Match { + return &arpOpMatch{ + op: op, + } +} + +// MarshalText implements Match. +func (m *arpOpMatch) MarshalText() ([]byte, error) { + if !validARPOP(m.op) { + return nil, errInvalidARPOP + } + + return bprintf("%s=%1d", arpOp, m.op), nil +} + +// GoString implements Match. +func (m *arpOpMatch) GoString() string { + return fmt.Sprintf("ovs.ArpOp(%01d)", m.op) +} + +// A connectionTrackingMarkMatch is a Match returned by ConnectionTrackingMark. +type connectionTrackingMarkMatch struct { + mark uint32 + mask uint32 +} + +// ConnectionTrackingMark matches a metadata associated with a connection tracking entry +func ConnectionTrackingMark(mark, mask uint32) Match { + return &connectionTrackingMarkMatch{ + mark: mark, + mask: mask, + } +} + +// MarshalText implements Match. +func (m *connectionTrackingMarkMatch) MarshalText() ([]byte, error) { + if m.mask != 0 { + return bprintf("%s=0x%08x/0x%08x", ctMark, m.mark, m.mask), nil + } + + return bprintf("%s=0x%08x", ctMark, m.mark), nil +} + +// GoString implements Match. +func (m *connectionTrackingMarkMatch) GoString() string { + return fmt.Sprintf("ovs.ConnectionTrackingMark(0x%08x, 0x%08x)", m.mark, m.mask) +} + +// A connectionTrackingZoneMatch is a Match returned by ConnectionTrackingZone. +type connectionTrackingZoneMatch struct { + zone uint16 +} + +// ConnectionTrackingZone is a mechanism to define separate connection tracking contexts. +func ConnectionTrackingZone(zone uint16) Match { + return &connectionTrackingZoneMatch{ + zone: zone, + } +} + +// MarshalText implements Match. +func (m *connectionTrackingZoneMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%d", ctZone, m.zone), nil +} + +// GoString implements Match. +func (m *connectionTrackingZoneMatch) GoString() string { + return fmt.Sprintf("ovs.ConnectionTrackingZone(%d)", m.zone) +} + +// ConnectionTrackingState matches packets using their connection state, when +// connection tracking is enabled on the host. Use the SetState and UnsetState +// functions to populate the parameter list for this function. +func ConnectionTrackingState(state ...string) Match { + return &connectionTrackingMatch{ + state: state, + } +} + +var _ Match = &connectionTrackingMatch{} + +// A connectionTrackingMatch is a Match returned by ConnectionTrackingState. +type connectionTrackingMatch struct { + state []string +} + +// MarshalText implements Match. +func (m *connectionTrackingMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%s", ctState, strings.Join(m.state, "")), nil +} + +// GoString implements Match. +func (m *connectionTrackingMatch) GoString() string { + buf := bytes.NewBuffer(nil) + for i, s := range m.state { + _, _ = buf.WriteString(fmt.Sprintf("%q", s)) + + if i != len(m.state)-1 { + _, _ = buf.WriteString(", ") + } + } + + return fmt.Sprintf("ovs.ConnectionTrackingState(%s)", buf.String()) +} + +// CTState is a connection tracking state, which can be used with the +// ConnectionTrackingState function. +type CTState string + +// List of common CTState constants available in OVS 2.5. Reference the +// ovs-ofctl man-page for a description of each one. +const ( + CTStateNew CTState = "new" + CTStateEstablished CTState = "est" + CTStateRelated CTState = "rel" + CTStateReply CTState = "rpl" + CTStateInvalid CTState = "inv" + CTStateTracked CTState = "trk" +) + +// SetState sets the specified CTState flag. This helper should be used +// with ConnectionTrackingState. +func SetState(state CTState) string { + return fmt.Sprintf("+%s", state) +} + +// UnsetState unsets the specified CTState flag. This helper should be used +// with ConnectionTrackingState. +func UnsetState(state CTState) string { + return fmt.Sprintf("-%s", state) +} + +// Metadata returns a Match that matches the given Metadata exactly. +func Metadata(id uint64) Match { + return &metadataMatch{ + data: id, + mask: 0, + } +} + +// MetadataWithMask returns a Match with specified Metadata and mask. +func MetadataWithMask(id, mask uint64) Match { + return &metadataMatch{ + data: id, + mask: mask, + } +} + +var _ Match = &metadataMatch{} + +// A metadataMatch is a Match against a Metadata field. +type metadataMatch struct { + data uint64 + mask uint64 +} + +// GoString implements Match. +func (m *metadataMatch) GoString() string { + if m.mask > 0 { + return fmt.Sprintf("ovs.MetadataWithMask(%#x, %#x)", m.data, m.mask) + } + + return fmt.Sprintf("ovs.Metadata(%#x)", m.data) +} + +// MarshalText implements Match. +func (m *metadataMatch) MarshalText() ([]byte, error) { + if m.mask == 0 { + return bprintf("%s=%#x", metadata, m.data), nil + } + + return bprintf("%s=%#x/%#x", metadata, m.data, m.mask), nil +} + +// TCPFlags matches packets using their enabled TCP flags, when matching TCP +// flags on a TCP segment. Use the SetTCPFlag and UnsetTCPFlag functions to +// populate the parameter list for this function. +func TCPFlags(flags ...string) Match { + return &tcpFlagsMatch{ + flags: flags, + } +} + +var _ Match = &tcpFlagsMatch{} + +// A tcpFlagsMatch is a Match returned by TCPFlags. +type tcpFlagsMatch struct { + flags []string +} + +// MarshalText implements Match. +func (m *tcpFlagsMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%s", tcpFlags, strings.Join(m.flags, "")), nil +} + +// GoString implements Match. +func (m *tcpFlagsMatch) GoString() string { + buf := bytes.NewBuffer(nil) + for i, s := range m.flags { + _, _ = buf.WriteString(fmt.Sprintf("%q", s)) + + if i != len(m.flags)-1 { + _, _ = buf.WriteString(", ") + } + } + + return fmt.Sprintf("ovs.TCPFlags(%s)", buf.String()) +} + +// TCPFlag represents a flag in the TCP header, which can be used with the +// TCPFlags function. +type TCPFlag string + +// RFC 793 TCP Flags +const ( + TCPFlagURG TCPFlag = "urg" + TCPFlagACK TCPFlag = "ack" + TCPFlagPSH TCPFlag = "psh" + TCPFlagRST TCPFlag = "rst" + TCPFlagSYN TCPFlag = "syn" + TCPFlagFIN TCPFlag = "fin" +) + +// SetTCPFlag sets the specified TCPFlag. This helper should be used +// with TCPFlags. +func SetTCPFlag(flag TCPFlag) string { + return fmt.Sprintf("+%s", flag) +} + +// UnsetTCPFlag unsets the specified TCPFlag. This helper should be used +// with TCPFlags. +func UnsetTCPFlag(flag TCPFlag) string { + return fmt.Sprintf("-%s", flag) +} + +// TunnelID returns a Match that matches the given ID exactly. +func TunnelID(id uint64) Match { + return &tunnelIDMatch{ + id: id, + mask: 0, + } +} + +// TunnelIDWithMask returns a Match with specified ID and mask. +func TunnelIDWithMask(id, mask uint64) Match { + return &tunnelIDMatch{ + id: id, + mask: mask, + } +} + +var _ Match = &tunnelIDMatch{} + +// A tunnelIDMatch is a Match against a tunnel ID. +type tunnelIDMatch struct { + id uint64 + mask uint64 +} + +// GoString implements Match. +func (m *tunnelIDMatch) GoString() string { + if m.mask > 0 { + return fmt.Sprintf("ovs.TunnelIDWithMask(%#x, %#x)", m.id, m.mask) + } + + return fmt.Sprintf("ovs.TunnelID(%#x)", m.id) +} + +// MarshalText implements Match. +func (m *tunnelIDMatch) MarshalText() ([]byte, error) { + if m.mask == 0 { + return bprintf("%s=%#x", tunID, m.id), nil + } + + return bprintf("%s=%#x/%#x", tunID, m.id, m.mask), nil +} + +// TunnelSrc returns a Match with specified Tunnel Source. +func TunnelSrc(addr string) Match { + return &tunnelMatch{ + srcdst: source, + ip: addr, + } +} + +// TunnelDst returns a Match with specified Tunnel Destination. +func TunnelDst(addr string) Match { + return &tunnelMatch{ + srcdst: destination, + ip: addr, + } +} + +var _ Match = &tunnelMatch{} + +// A tunnelMatch is a Match against a tunnel {source|destination}. +type tunnelMatch struct { + srcdst string + ip string +} + +// GoString implements Match. +func (m *tunnelMatch) GoString() string { + if m.srcdst == source { + return fmt.Sprintf("ovs.TunnelSrc(%q)", m.ip) + } + + return fmt.Sprintf("ovs.TunnelDst(%q)", m.ip) +} + +// MarshalText implements Match. +func (m *tunnelMatch) MarshalText() ([]byte, error) { + return matchIPv4AddressOrCIDR(fmt.Sprintf("tun_%s", m.srcdst), m.ip) +} + +// matchIPv4AddressOrCIDR attempts to create a Match using the specified key +// and input string, which could be interpreted as an IPv4 address or IPv4 +// CIDR block. +func matchIPv4AddressOrCIDR(key string, ip string) ([]byte, error) { + errInvalidIPv4 := fmt.Errorf("%q is not a valid IPv4 address or IPv4 CIDR block", ip) + + if ipAddr, _, err := net.ParseCIDR(ip); err == nil { + if ipAddr.To4() == nil { + return nil, errInvalidIPv4 + } + + return bprintf("%s=%s", key, ip), nil + } + + if ipAddr := net.ParseIP(ip); ipAddr != nil { + if ipAddr.To4() == nil { + return nil, errInvalidIPv4 + } + + return bprintf("%s=%s", key, ipAddr.String()), nil + } + + return nil, errInvalidIPv4 +} + +// matchIPv6AddressOrCIDR attempts to create a Match using the specified key +// and input string, which could be interpreted as an IPv6 address or IPv6 +// CIDR block. +func matchIPv6AddressOrCIDR(key string, ip string) ([]byte, error) { + errInvalidIPv6 := fmt.Errorf("%q is not a valid IPv6 address or IPv6 CIDR block", ip) + + if ipAddr, _, err := net.ParseCIDR(ip); err == nil { + if ipAddr.To16() == nil || ipAddr.To4() != nil { + return nil, errInvalidIPv6 + } + + return bprintf("%s=%s", key, ip), nil + } + + if ipAddr := net.ParseIP(ip); ipAddr != nil { + if ipAddr.To16() == nil || ipAddr.To4() != nil { + return nil, errInvalidIPv6 + } + + return bprintf("%s=%s", key, ipAddr.String()), nil + } + + return nil, errInvalidIPv6 +} + +// matchEthernetHardwareAddress attempts to create a Match using the specified +// key and input hardware address, which must be a 6-octet Ethernet hardware +// address. +func matchEthernetHardwareAddress(key string, addr net.HardwareAddr) ([]byte, error) { + if len(addr) != ethernetAddrLen { + return nil, fmt.Errorf("hardware address must be %d octets, but got %d", + ethernetAddrLen, len(addr)) + } + + return bprintf("%s=%s", key, addr.String()), nil +} + +// matchTransportPort is the common implementation for +// Transport{Source,Destination}Port. +func matchTransportPort(srcdst string, port uint16, mask uint16) ([]byte, error) { + // No mask specified + if mask == 0 { + return bprintf("tp_%s=%d", srcdst, port), nil + } + + return bprintf("tp_%s=0x%04x/0x%04x", srcdst, port, mask), nil +} + +// IPFragFlag is a string type which can be used with the IPFragMatch. +type IPFragFlag string + +// OvS IP frag flags. +// Source: http://www.openvswitch.org/support/dist-docs-2.5/ovs-ofctl.8.txt +const ( + IPFragFlagYes IPFragFlag = "yes" + IPFragFlagNo IPFragFlag = "no" + IPFragFlagFirst IPFragFlag = "first" + IPFragFlagLater IPFragFlag = "later" + IPFragFlagNotLater IPFragFlag = "not_later" +) + +// IPFrag returns an ipFragMatch. +func IPFrag(flag IPFragFlag) Match { + return &ipFragMatch{flag: flag} +} + +// ipFragMatch implements the Match interface and is a match against +// a packet fragmentation value. +type ipFragMatch struct { + flag IPFragFlag +} + +var _ Match = &ipFragMatch{} + +// GoString implements Match. +func (m *ipFragMatch) GoString() string { + return fmt.Sprintf("ovs.IpFrag(%v)", m.flag) +} + +// MarshalText implements Match. +func (m *ipFragMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%s", ipFrag, m.flag), nil +} + +// FieldMatch returns an fieldMatch. +func FieldMatch(field, srcOrValue string) Match { + return &fieldMatch{field: field, srcOrValue: srcOrValue} +} + +// fieldMatch implements the Match interface and +// matches a given field against another a value, e.g. "0x123" or "1.2.3.4", +// or against another src field in the packet, e.g "arp_tpa" or "NXM_OF_ARP_TPA[]". +type fieldMatch struct { + field string + srcOrValue string +} + +var _ Match = &fieldMatch{} + +// GoString implements Match. +func (m *fieldMatch) GoString() string { + return fmt.Sprintf("ovs.FieldMatch(%v,%v)", m.field, m.srcOrValue) +} + +// MarshalText implements Match. +func (m *fieldMatch) MarshalText() ([]byte, error) { + return bprintf("%s=%s", m.field, m.srcOrValue), nil +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/matchflow.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/matchflow.go new file mode 100644 index 0000000..445afe9 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/matchflow.go @@ -0,0 +1,162 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + // AnyTable is a special table value to match flows in any table. + AnyTable = -1 +) + +var ( + // errEmptyMatchFlow is returned when a MatchFlow has no arguments. + errEmptyMatchFlow = errors.New("match flow is empty") +) + +// TODO(mdlayher): it would be nice if MatchFlow was just a Flow. + +// A MatchFlow is an OpenFlow flow intended for flow deletion. It can be marshaled to its textual +// form for use with Open vSwitch. +type MatchFlow struct { + Protocol Protocol + InPort int + Matches []Match + Table int + + // Cookie indicates a cookie value to use when matching flows. + Cookie uint64 + + // CookieMask is a mask used alongside Cookie to enable matching flows + // which match a mask. If CookieMask is not set, Cookie will be matched + // exactly. + CookieMask uint64 +} + +var _ error = &MatchFlowError{} + +// A MatchFlowError is an error encountered while marshaling or unmarshaling +// a MatchFlow. +type MatchFlowError struct { + // Str indicates the string, if any, that caused the flow to + // fail while unmarshaling. + Str string + + // Err indicates the error that halted flow marshaling or unmarshaling. + Err error +} + +// Error returns the string representation of a MatchFlowError. +func (e *MatchFlowError) Error() string { + if e.Str == "" { + return e.Err.Error() + } + + return fmt.Sprintf("flow error due to string %q: %v", + e.Str, e.Err) +} + +// MarshalText marshals a MatchFlow into its textual form. +func (f *MatchFlow) MarshalText() ([]byte, error) { + matches, err := f.marshalMatches() + if err != nil { + return nil, err + } + + var b []byte + + if f.Protocol != "" { + b = append(b, f.Protocol...) + b = append(b, ',') + } + + if f.InPort != 0 { + b = append(b, inPort+"="...) + + // Special case, InPortLOCAL is converted to the literal string LOCAL + if f.InPort == PortLOCAL { + b = append(b, portLOCAL...) + } else { + b = strconv.AppendInt(b, int64(f.InPort), 10) + } + b = append(b, ',') + } + + if len(matches) > 0 { + b = append(b, strings.Join(matches, ",")...) + b = append(b, ',') + } + + if f.Cookie > 0 { + // Hexadecimal cookies and masks are much easier to read. + b = append(b, cookie+"="...) + b = append(b, paddedHexUint64(f.Cookie)...) + b = append(b, '/') + + if f.CookieMask == 0 { + b = append(b, "-1"...) + } else { + b = append(b, paddedHexUint64(f.CookieMask)...) + } + + b = append(b, ',') + } + + if f.Table != AnyTable { + b = append(b, table+"="...) + b = strconv.AppendInt(b, int64(f.Table), 10) + } + + b = bytes.Trim(b, ",") + + if len(b) == 0 { + return nil, &MatchFlowError{ + Err: errEmptyMatchFlow, + } + } + + return b, nil +} + +// marshalMatches marshals all Matches in a MatchFlow to their text form. +func (f *MatchFlow) marshalMatches() ([]string, error) { + fns := make([]func() ([]byte, error), 0, len(f.Matches)) + for _, fn := range f.Matches { + fns = append(fns, fn.MarshalText) + } + + return f.marshalFunctions(fns) +} + +// marshalFunctions marshals a slice of functions to their text form. +func (f *MatchFlow) marshalFunctions(fns []func() ([]byte, error)) ([]string, error) { + out := make([]string, 0, len(fns)) + for _, fn := range fns { + o, err := fn() + if err != nil { + return nil, err + } + + out = append(out, string(o)) + } + + return out, nil +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/matchparser.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/matchparser.go new file mode 100644 index 0000000..9307044 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/matchparser.go @@ -0,0 +1,574 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "bytes" + "errors" + "fmt" + "math" + "net" + "strconv" + "strings" +) + +// parseMatch creates a Match function from the input string. +func parseMatch(key string, value string) (Match, error) { + switch key { + case arpSHA, arpTHA, ndSLL, ndTLL: + return parseMACMatch(key, value) + case arpOp: + return parseArpOp(value) + case icmpType, icmpCode, icmp6Type, icmp6Code, nwProto: + return parseIntMatch(key, value, math.MaxUint8) + case ctZone: + return parseIntMatch(key, value, math.MaxUint16) + case tpSRC, tpDST: + return parsePort(key, value, math.MaxUint16) + case conjID: + return parseIntMatch(key, value, math.MaxInt32) + case arpSPA: + return ARPSourceProtocolAddress(value), nil + case arpTPA: + return ARPTargetProtocolAddress(value), nil + case ctState: + return parseCTState(value) + case tcpFlags: + return parseTCPFlags(value) + case dlSRC: + return DataLinkSource(value), nil + case dlDST: + return DataLinkDestination(value), nil + case dlType: + etherType, err := parseHexUint16(value) + if err != nil { + return nil, err + } + + return DataLinkType(etherType), nil + case dlVLANPCP: + return parseDataLinkVLANPCP(value) + case dlVLAN: + return parseDataLinkVLAN(value) + case ndTarget: + return NeighborDiscoveryTarget(value), nil + case nwECN: + return parseIntMatch(key, value, math.MaxInt32) + case nwTTL: + return parseIntMatch(key, value, math.MaxInt32) + case tunTTL: + return parseIntMatch(key, value, math.MaxInt32) + case tunTOS: + return parseIntMatch(key, value, math.MaxInt32) + case nwTOS: + return parseIntMatch(key, value, math.MaxInt32) + case tunGbpID: + return parseIntMatch(key, value, math.MaxInt32) + case tunGbpFlags: + return parseIntMatch(key, value, math.MaxInt32) + case tunFlags: + return parseIntMatch(key, value, math.MaxInt32) + case inPort: + return parseIntMatch(key, value, math.MaxInt32) + case ipv6SRC: + return IPv6Source(value), nil + case ipv6DST: + return IPv6Destination(value), nil + case metadata: + return parseMetadata(value) + case tunv6SRC: + return IPv6Source(value), nil + case tunv6DST: + return IPv6Destination(value), nil + case ipv6Label: + return parseIPv6Label(value) + case nwSRC: + return NetworkSource(value), nil + case tunSRC: + return NetworkSource(value), nil + case tunDST: + return NetworkDestination(value), nil + case nwDST: + return NetworkDestination(value), nil + case vlanTCI1: + return parseVLANTCI1(value) + case vlanTCI: + return parseVLANTCI(value) + case ctMark: + return parseCTMark(value) + case tunID: + return parseTunID(value) + } + + return nil, nil +} + +// parseClampInt calls strconv.Atoi on s, and then ensures that s is less than +// or equal to the integer specified by max. +func parseClampInt(s string, max int) (int, error) { + t, err := strconv.Atoi(s) + if err != nil { + return 0, err + } + if t > max { + return 0, fmt.Errorf("integer %d too large; %d > %d", t, t, max) + } + + return t, nil +} + +// parseIntMatch parses an integer Match value from the input key and value, +// with a maximum possible value of max. +func parseIntMatch(key string, value string, max int) (Match, error) { + t, err := parseClampInt(value, max) + if err != nil { + return nil, err + } + + switch key { + case icmpType: + return ICMPType(uint8(t)), nil + case icmpCode: + return ICMPCode(uint8(t)), nil + case icmp6Type: + return ICMP6Type(uint8(t)), nil + case icmp6Code: + return ICMP6Code(uint8(t)), nil + case inPort: + return InPortMatch(int(t)), nil + case nwECN: + return NetworkECN(int(t)), nil + case nwTTL: + return NetworkTTL(int(t)), nil + case tunTTL: + return TunnelTTL(int(t)), nil + case tunTOS: + return TunnelTOS(int(t)), nil + case nwTOS: + return NetworkTOS(int(t)), nil + case tunGbpID: + return TunnelGBP(int(t)), nil + case tunGbpFlags: + return TunnelGbpFlags(int(t)), nil + case tunFlags: + return TunnelFlags(int(t)), nil + case nwProto: + return NetworkProtocol(uint8(t)), nil + case ctZone: + return ConnectionTrackingZone(uint16(t)), nil + case conjID: + return ConjunctionID(uint32(t)), nil + } + + return nil, fmt.Errorf("no action matched for %s=%s", key, value) +} + +// parsePort parses a port or port/mask Match value from the input key and value, +// with a maximum possible value of max. +func parsePort(key string, value string, max int) (Match, error) { + + var values []uint64 + //Split the string + ss := strings.Split(value, "/") + + //If input is just port + switch len(ss) { + case 1: + val, err := parseClampInt(value, max) + if err != nil { + return nil, err + } + values = append(values, uint64(val)) + values = append(values, 0) + // If input is port/mask + case 2: + for _, s := range ss { + val, err := parseHexUint64(s) + if err != nil { + return nil, err + } + // Return error if val > 65536 (uint16) + if val > uint64(max) { + return nil, fmt.Errorf("integer %d too large; %d > %d", val, val, max) + } + + values = append(values, val) + } + default: + return nil, fmt.Errorf("invalid value, no action matched for %s=%s", key, value) + } + + switch key { + case tpSRC: + return TransportSourceMaskedPort(uint16(values[0]), uint16(values[1])), nil + case tpDST: + return TransportDestinationMaskedPort(uint16(values[0]), uint16(values[1])), nil + } + // Return error if input is invalid + return nil, fmt.Errorf("no action matched for %s=%s", key, value) +} + +// parseMACMatch parses a MAC address Match value from the input key and value. +func parseMACMatch(key string, value string) (Match, error) { + mac, err := net.ParseMAC(value) + if err != nil { + return nil, err + } + + switch key { + case arpSHA: + return ARPSourceHardwareAddress(mac), nil + case arpTHA: + return ARPTargetHardwareAddress(mac), nil + case ndSLL: + return NeighborDiscoverySourceLinkLayer(mac), nil + case ndTLL: + return NeighborDiscoveryTargetLinkLayer(mac), nil + } + + return nil, fmt.Errorf("no action matched for %s=%s", key, value) +} + +// parseCTState parses a series of connection tracking values into a Match. +func parseCTState(value string) (Match, error) { + // If the format use bar: + // "est|trk|dnat" => "+est+trk+dnat" + if strings.Contains(value, "|") { + value = strings.ReplaceAll(value, "|", "+") + value = "+" + value + } + + // Add space between flags + // "+est+trk+dnat-snat" => "+est +trk +dnat -snat" + if strings.Contains(value, "+") || strings.Contains(value, "-") { + value = strings.ReplaceAll(value, "+", " +") + value = strings.ReplaceAll(value, "-", " -") + value = strings.Trim(value, " ") + } else { + // Assume only one state is specified: "ct_state=trk" + // "trk" => "+trk" + value = "+" + value + } + + states := strings.Fields(value) + return ConnectionTrackingState(states...), nil +} + +// parseTCPFlags parses a series of TCP flags into a Match. Open vSwitch's representation +// of These TCP flags are outlined in the ovs-field(7) man page, +func parseTCPFlags(value string) (Match, error) { + // tcp_flag can also be decimal number + if _, err := strconv.Atoi(value); err == nil { + return TCPFlags(value), nil + } + + if len(value)%4 != 0 { + return nil, errors.New("tcp_flags length must be divisible by 4") + } + + var buf bytes.Buffer + var flags []string + + for i, r := range value { + if i != 0 && i%4 == 0 { + flags = append(flags, buf.String()) + buf.Reset() + } + + _, _ = buf.WriteRune(r) + } + flags = append(flags, buf.String()) + + return TCPFlags(flags...), nil +} + +// hexPrefix denotes that a string integer is in hex format. +const hexPrefix = "0x" + +// parseDataLinkVLAN parses a DataLinkVLAN Match from value. +func parseDataLinkVLAN(value string) (Match, error) { + if !strings.HasPrefix(value, hexPrefix) { + vlan, err := strconv.Atoi(value) + if err != nil { + return nil, err + } + + return DataLinkVLAN(vlan), nil + } + + vlan, err := parseHexUint16(value) + if err != nil { + return nil, err + } + + return DataLinkVLAN(int(vlan)), nil +} + +// parseDataLinkVLANPCP parses a DataLinkVLANPCP Match from value. +func parseDataLinkVLANPCP(value string) (Match, error) { + if !strings.HasPrefix(value, hexPrefix) { + pcp, err := strconv.Atoi(value) + if err != nil { + return nil, err + } + + return DataLinkVLANPCP(pcp), nil + } + + pcp, err := parseHexUint16(value) + if err != nil { + return nil, err + } + + return DataLinkVLANPCP(int(pcp)), nil +} + +// parseVLANTCI parses a VLANTCI Match from value. +func parseVLANTCI(value string) (Match, error) { + var values []uint16 + for _, s := range strings.Split(value, "/") { + if !strings.HasPrefix(s, hexPrefix) { + v, err := strconv.Atoi(s) + if err != nil { + return nil, err + } + + values = append(values, uint16(v)) + continue + } + + v, err := parseHexUint16(s) + if err != nil { + return nil, err + } + + values = append(values, v) + } + + switch len(values) { + case 1: + return VLANTCI(values[0], 0), nil + case 2: + return VLANTCI(values[0], values[1]), nil + // Match had too many parts, e.g. "vlan_tci=10/10/10" + default: + return nil, fmt.Errorf("invalid vlan_tci match: %q", value) + } +} + +// parseVLANTCI1 parses a VLANTCI1 Match from value. +func parseVLANTCI1(value string) (Match, error) { + var values []uint16 + for _, s := range strings.Split(value, "/") { + if !strings.HasPrefix(s, hexPrefix) { + v, err := strconv.Atoi(s) + if err != nil { + return nil, err + } + + values = append(values, uint16(v)) + continue + } + + v, err := parseHexUint16(s) + if err != nil { + return nil, err + } + + values = append(values, v) + } + + switch len(values) { + case 1: + return VLANTCI1(values[0], 0), nil + case 2: + return VLANTCI1(values[0], values[1]), nil + // Match had too many parts, e.g. "vlan_tci1=10/10/10" + default: + return nil, fmt.Errorf("invalid vlan_tci1 match: %q", value) + } +} + +// parseIPv6Label parses a IPv6Label Match from value. +func parseIPv6Label(value string) (Match, error) { + var values []uint32 + for _, s := range strings.Split(value, "/") { + if !strings.HasPrefix(s, hexPrefix) { + v, err := strconv.Atoi(s) + if err != nil { + return nil, err + } + + values = append(values, uint32(v)) + continue + } + + v, err := parseHexUint32(s) + if err != nil { + return nil, err + } + + values = append(values, v) + } + + switch len(values) { + case 1: + return IPv6Label(values[0], 0), nil + case 2: + return IPv6Label(values[0], values[1]), nil + // Match had too many parts, e.g. "ipv6_label=10/10/10" + default: + return nil, fmt.Errorf("invalid ipv6_label match: %q", value) + } +} + +// parseArpOp parses a ArpOp Match from value. +func parseArpOp(value string) (Match, error) { + if !strings.HasPrefix(value, hexPrefix) { + parsed, err := strconv.ParseUint(value, 10, 16) + if err != nil { + return nil, err + } + return ArpOp(uint16(parsed)), nil + } + + v, err := parseHexUint16(value) + if err != nil { + return nil, err + } + return ArpOp(v), nil +} + +// parseCTMark parses a CTMark Match from value. +func parseCTMark(value string) (Match, error) { + var values []uint32 + for _, s := range strings.Split(value, "/") { + if !strings.HasPrefix(s, hexPrefix) { + v, err := strconv.Atoi(s) + if err != nil { + return nil, err + } + + values = append(values, uint32(v)) + continue + } + + v, err := parseHexUint32(s) + if err != nil { + return nil, err + } + + values = append(values, v) + } + + switch len(values) { + case 1: + return ConnectionTrackingMark(values[0], 0), nil + case 2: + return ConnectionTrackingMark(values[0], values[1]), nil + // Match had too many parts, e.g. "ct_mark=10/10/10" + default: + return nil, fmt.Errorf("invalid ct_mark match: %q", value) + } +} + +// parseMetadata parses a Metadata Match from value. +func parseMetadata(value string) (Match, error) { + var values []uint64 + for _, s := range strings.Split(value, "/") { + if !strings.HasPrefix(s, hexPrefix) { + v, err := strconv.Atoi(s) + if err != nil { + return nil, err + } + + values = append(values, uint64(v)) + continue + } + + v, err := parseHexUint64(s) + if err != nil { + return nil, err + } + + values = append(values, v) + } + + switch len(values) { + case 1: + return Metadata(values[0]), nil + case 2: + return MetadataWithMask(values[0], values[1]), nil + // Match had too many parts, e.g. "metadata=10/10/10" + default: + return nil, fmt.Errorf("invalid metadata match: %q", value) + } +} + +// parseTunID parses a tunID Match from value. +func parseTunID(value string) (Match, error) { + var values []uint64 + for _, s := range strings.Split(value, "/") { + if !strings.HasPrefix(s, hexPrefix) { + v, err := strconv.Atoi(s) + if err != nil { + return nil, err + } + + values = append(values, uint64(v)) + continue + } + + v, err := parseHexUint64(s) + if err != nil { + return nil, err + } + + values = append(values, v) + } + + switch len(values) { + case 1: + return TunnelID(values[0]), nil + case 2: + return TunnelIDWithMask(values[0], values[1]), nil + // Match had too many parts, e.g. "tun_id=10/10/10" + default: + return nil, fmt.Errorf("invalid tun_id match: %q", value) + } +} + +// parseHexUint16 parses a uint16 value from a hexadecimal string. +func parseHexUint16(value string) (uint16, error) { + val, err := strconv.ParseUint(strings.TrimPrefix(value, hexPrefix), 16, 32) + if err != nil { + return 0, err + } + return uint16(val), nil +} + +// parseHexUint32 parses a uint32 value from a hexadecimal string. +func parseHexUint32(value string) (uint32, error) { + val, err := strconv.ParseUint(strings.TrimPrefix(value, hexPrefix), 16, 32) + if err != nil { + return 0, err + } + return uint32(val), nil +} + +// parseHexUint64 parses a uint64 value from a hexadecimal string. +func parseHexUint64(value string) (uint64, error) { + return strconv.ParseUint(strings.TrimPrefix(value, hexPrefix), 16, 64) +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/openflow.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/openflow.go new file mode 100644 index 0000000..17458e4 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/openflow.go @@ -0,0 +1,475 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" +) + +var ( + // errMultipleValues is returned when a function should only retrieve a + // single value, but multiple values are found instead. + errMultipleValues = errors.New("multiple values returned") + + // errNotCommitted is returned when a flow bundle transaction is not + // committed, and thus, the contents of the bundle are discarded. + errNotCommitted = errors.New("flow bundle not committed, discarding flows") +) + +// An OpenFlowService is used in a Client to execute 'ovs-ofctl' commands. +type OpenFlowService struct { + // Wrapped Client for ExecFunc and debugging. + c *Client +} + +// AddFlow adds a Flow to a bridge attached to Open vSwitch. +func (o *OpenFlowService) AddFlow(bridge string, flow *Flow) error { + fb, err := flow.MarshalText() + if err != nil { + return err + } + + args := []string{"add-flow"} + args = append(args, o.c.ofctlFlags...) + args = append(args, []string{bridge, string(fb)}...) + + _, err = o.exec(args...) + return err +} + +// A FlowTransaction is a transaction used when adding or deleting +// multiple flows using an Open vSwitch flow bundle. +type FlowTransaction struct { + flows []flowDirective + committed bool + err error +} + +// A flowDirective is a directive and flow string pair, used to perform +// multiple operations within a Transaction. +type flowDirective struct { + directive string + flow string +} + +// Possible flowDirective directive values. +const ( + dirAdd = "add" + dirDelete = "delete" +) + +// Add pushes zero or more Flows on to the transaction, to be added by +// Open vSwitch. If any of the flows are invalid, Add becomes a no-op +// and the error will be surfaced when Commit is called. +func (tx *FlowTransaction) Add(flows ...*Flow) { + if tx.err != nil { + return + } + + tms := make([]encoding.TextMarshaler, 0, len(flows)) + for _, f := range flows { + tms = append(tms, f) + } + + tx.push(dirAdd, tms...) +} + +// Delete pushes zero or more MatchFlows on to the transaction, to be deleted +// by Open vSwitch. If any of the flows are invalid, Delete becomes a no-op +// and the error will be surfaced when Commit is called. +func (tx *FlowTransaction) Delete(flows ...*MatchFlow) { + if tx.err != nil { + return + } + + tms := make([]encoding.TextMarshaler, 0, len(flows)) + for _, f := range flows { + tms = append(tms, f) + } + + tx.push(dirDelete, tms...) +} + +// push pushes zero or more encoding.TextMarshalers on to the transaction +// (typically a Flow or MatchFlow). +func (tx *FlowTransaction) push(directive string, flows ...encoding.TextMarshaler) { + for _, f := range flows { + fb, err := f.MarshalText() + if err != nil { + tx.err = err + return + } + + tx.flows = append(tx.flows, flowDirective{ + directive: directive, + flow: string(fb), + }) + } +} + +// Commit finalizes an AddFlowTransaction, returning any errors that may +// have occurred while adding flows. Commit must be called at the end +// of a successful transaction, but may return an error if one was encountered +// during a call to Add. +func (tx *FlowTransaction) Commit() error { + if tx.err != nil { + return tx.err + } + + tx.committed = true + return nil +} + +// Discard discards the contents of an AddFlowTransaction, returning an +// error wrapping the input error. Discard should be called if any +// operations fail in the middle of an AddFlowTransaction function. +func (tx *FlowTransaction) Discard(err error) error { + tx.flows = make([]flowDirective, 0) + return fmt.Errorf("discarding add flow transaction: %v", err) +} + +// AddFlowBundle creates an Open vSwitch flow bundle and enables adding and +// removing flows to and from the specified bridge using a FlowTransaction. +// This function enables atomic addition and deletion of flows to and from +// Open vSwitch. +func (o *OpenFlowService) AddFlowBundle(bridge string, fn func(tx *FlowTransaction) error) error { + // Flows will be added to and read from an in-memory buffer. The buffer's + // contents are piped to 'ovs-ofctl' using stdin. + buf := bytes.NewBuffer(nil) + + tx := &FlowTransaction{} + if err := fn(tx); err != nil { + // Errors from "tx.Commit()" or "tx.Discard()" will be returned here. + return err + } + + // Require an explicit "tx.Commit()" to make changes. + if !tx.committed { + return errNotCommitted + } + + for _, flow := range tx.flows { + // Syntax for adding a flow in the file is: + // "add priority=10,ip,actions=drop\n" + s := fmt.Sprintf("%s %s\n", flow.directive, flow.flow) + if _, err := io.WriteString(buf, s); err != nil { + return err + } + } + + args := []string{"--bundle", "add-flow"} + args = append(args, o.c.ofctlFlags...) + // Read from stdin. + args = append(args, bridge, "-") + + return o.pipe(buf, args...) +} + +// DelFlows removes flows that match MatchFlow from a bridge attached to Open vSwitch. +// +// If flow is nil, all flows will be deleted from the specified bridge. +func (o *OpenFlowService) DelFlows(bridge string, flow *MatchFlow) error { + if flow == nil { + // This means we'll flush the entire flows + // from the specifided bridge. + _, err := o.exec("del-flows", bridge) + return err + } + fb, err := flow.MarshalText() + if err != nil { + return err + } + + _, err = o.exec("del-flows", bridge, string(fb)) + return err +} + +// ModPort modifies the specified characteristics for the specified port. +func (o *OpenFlowService) ModPort(bridge string, port string, action PortAction) error { + _, err := o.exec("mod-port", bridge, string(port), string(action)) + return err +} + +// DumpPort retrieves statistics about the specified port attached to the +// specified bridge. +func (o *OpenFlowService) DumpPort(bridge string, port string) (*PortStats, error) { + stats, err := o.dumpPorts(bridge, port) + if err != nil { + return nil, err + } + if len(stats) != 1 { + return nil, errMultipleValues + } + + return stats[0], nil +} + +// DumpPorts retrieves statistics about all ports attached to the specified +// bridge. +func (o *OpenFlowService) DumpPorts(bridge string) ([]*PortStats, error) { + return o.dumpPorts(bridge, "") +} + +// DumpTables retrieves statistics about all tables for the specified bridge. +// If a table has no active flows and has not been used for a lookup or matched +// by an incoming packet, it is filtered from the output. +func (o *OpenFlowService) DumpTables(bridge string) ([]*Table, error) { + out, err := o.exec("dump-tables", bridge) + if err != nil { + return nil, err + } + + var tables []*Table + err = parseEach(out, dumpTablesPrefix, func(b []byte) error { + t := new(Table) + if err := t.UnmarshalText(b); err != nil { + return err + } + + // Ignore empty tables + if t.Active == 0 && t.Lookup == 0 && t.Matched == 0 { + return nil + } + + tables = append(tables, t) + return nil + }) + + return tables, err +} + +// DumpFlows retrieves statistics about all flows for the specified bridge. +// If a table has no active flows and has not been used for a lookup or matched +// by an incoming packet, it is filtered from the output. +func (o *OpenFlowService) DumpFlows(bridge string) ([]*Flow, error) { + out, err := o.exec("dump-flows", bridge) + if err != nil { + return nil, err + } + + var flows []*Flow + err = parseEachLine(out, dumpFlowsPrefix, func(b []byte) error { + // Do not attempt to parse NXST_FLOW messages. + if bytes.HasPrefix(b, dumpFlowsPrefix) { + return nil + } + + f := new(Flow) + if err := f.UnmarshalText(b); err != nil { + return err + } + + flows = append(flows, f) + return nil + }) + + return flows, err +} + +// DumpAggregate retrieves statistics about the specified flow attached to the +// specified bridge. +func (o *OpenFlowService) DumpAggregate(bridge string, flow *MatchFlow) (*FlowStats, error) { + stats, err := o.dumpAggregate(bridge, flow) + if err != nil { + return nil, err + } + + return stats, nil +} + +var ( + // dumpPortsPrefix is a sentinel value returned at the beginning of + // the output from 'ovs-ofctl dump-ports'. + dumpPortsPrefix = []byte("OFPST_PORT reply") + + // dumpTablesPrefix is a sentinel value returned at the beginning of + // the output from 'ovs-ofctl dump-tables'. + dumpTablesPrefix = []byte("OFPST_TABLE reply") + + // dumpFlowsPrefix is a sentinel value returned at the beginning of + // the output from 'ovs-ofctl dump-flows'. + dumpFlowsPrefix = []byte("NXST_FLOW reply") + + // dumpAggregatePrefix is a sentinel value returned at the beginning of + // the output from "ovs-ofctl dump-aggregate" + //dumpAggregatePrefix = []byte("NXST_AGGREGATE reply") +) + +// dumpPorts calls 'ovs-ofctl dump-ports' with the specified arguments and +// parses the output into zero or more PortStats structs. +func (o *OpenFlowService) dumpPorts(bridge string, port string) ([]*PortStats, error) { + args := []string{ + "dump-ports", + bridge, + } + + args = append(o.c.ofctlFlags, args...) + + // Attach port argument only if non-empty. + if port != "" { + args = append(args, string(port)) + } + + out, err := o.exec(args...) + if err != nil { + return nil, err + } + + var stats []*PortStats + err = parseEach(out, dumpPortsPrefix, func(b []byte) error { + s := new(PortStats) + if err := s.UnmarshalText(b); err != nil { + return err + } + + stats = append(stats, s) + return nil + }) + + return stats, err +} + +// dumpAggregate calls 'ovs-ofctl dump-aggregate' with the specified arguments and +// parses the output into zero or more FlowStat structs. +func (o *OpenFlowService) dumpAggregate(bridge string, flow *MatchFlow) (*FlowStats, error) { + + flowText, err := flow.MarshalText() + if err != nil { + return nil, err + } + + args := []string{ + "dump-aggregate", + bridge, + string(flowText), + } + + args = append(o.c.ofctlFlags, args...) + + out, err := o.exec(args...) + if err != nil { + return nil, err + } + + var stats FlowStats + if err := stats.UnmarshalText(out); err != nil { + return nil, err + } + + return &stats, err +} + +// parseEachLine parses ovs-ofctl output from the input buffer, ensuring it has the +// specified prefix, and invoking the input function on each line scanned, +// so more complex structures can be parsed. +func parseEachLine(in []byte, prefix []byte, fn func(b []byte) error) error { + // First line must not be empty + scanner := bufio.NewScanner(bytes.NewReader(in)) + scanner.Split(bufio.ScanLines) + if !scanner.Scan() { + return io.ErrUnexpectedEOF + } + + // First line must contain prefix returned by OVS + if !bytes.HasPrefix(scanner.Bytes(), prefix) { + return io.ErrUnexpectedEOF + } + + // Scan every line to retrieve information needed to unmarshal + // a single Flow struct. + for scanner.Scan() { + b := make([]byte, len(scanner.Bytes())) + copy(b, scanner.Bytes()) + if err := fn(b); err != nil { + return err + } + } + + return scanner.Err() +} + +// parseEach parses ovs-ofctl output from the input buffer, ensuring it has the +// specified prefix, and invoking the input function on each two lines scanned, +// so more complex structures can be parsed. +func parseEach(in []byte, prefix []byte, fn func(b []byte) error) error { + // First line must not be empty + scanner := bufio.NewScanner(bytes.NewReader(in)) + scanner.Split(bufio.ScanLines) + if !scanner.Scan() { + return io.ErrUnexpectedEOF + } + + // First line must contain prefix returned by OVS + if !bytes.HasPrefix(scanner.Bytes(), prefix) { + return io.ErrUnexpectedEOF + } + + // OVS with OpenFlow 1.3+ returns an additional line with more metadata + // which must be ignored. A banner appears containing "(OF1.x)" which we + // detect here to discover if the last line should be discarded. + hasDuration := bytes.Contains(scanner.Bytes(), []byte("(OF1.")) + // Detect if CUSTOM Statistics is present, to skip additional lines + hasCustomStats := bytes.Contains(in, []byte("CUSTOM")) + + // Scan every two lines to retrieve information needed to unmarshal + // a single PortStats struct. + for scanner.Scan() { + b := make([]byte, len(scanner.Bytes())) + copy(b, scanner.Bytes()) + + // Must always scan two lines + if !scanner.Scan() { + return io.ErrUnexpectedEOF + } + b = append(b, scanner.Bytes()...) + + if hasDuration { + // Discard the third line of information if applicable. + if !scanner.Scan() { + return io.ErrUnexpectedEOF + } + //Discard 4th & 5th lines if Custom stats are present + if hasCustomStats { + if !scanner.Scan() { + return io.ErrUnexpectedEOF + } + if !scanner.Scan() { + return io.ErrUnexpectedEOF + } + } + } + + if err := fn(b); err != nil { + return err + } + } + + return scanner.Err() +} + +// exec executes an ExecFunc using 'ovs-ofctl'. +func (o *OpenFlowService) exec(args ...string) ([]byte, error) { + return o.c.exec("ovs-ofctl", args...) +} + +// pipe executes a PipeFunc using 'ovs-ofctl'. +func (o *OpenFlowService) pipe(stdin io.Reader, args ...string) error { + return o.c.pipe(stdin, "ovs-ofctl", args...) +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/ovs.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/ovs.go new file mode 100644 index 0000000..c211a32 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/ovs.go @@ -0,0 +1,89 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "bytes" + "fmt" +) + +// A FailMode is a failure mode which Open vSwitch uses when it cannot +// contact a controller. +type FailMode string + +// FailMode constants which can be used in OVS configurations. +const ( + FailModeStandalone FailMode = "standalone" + FailModeSecure FailMode = "secure" +) + +// An InterfaceType is a network interface type recognized by Open vSwitch. +type InterfaceType string + +// InterfaceType constants which can be used in OVS configurations. +const ( + InterfaceTypeGRE InterfaceType = "gre" + InterfaceTypeInternal InterfaceType = "internal" + InterfaceTypePatch InterfaceType = "patch" + InterfaceTypeSTT InterfaceType = "stt" + InterfaceTypeVXLAN InterfaceType = "vxlan" +) + +// A PortAction is a port actions to change the port characteristics of the +// specific port through the ModPort API. +type PortAction string + +// PortAction constants for ModPort API. +const ( + PortActionUp PortAction = "up" + PortActionDown PortAction = "down" + PortActionSTP PortAction = "stp" + PortActionNoSTP PortAction = "no-stp" + PortActionReceive PortAction = "receive" + PortActionNoReceive PortAction = "no-receive" + PortActionReceiveSTP PortAction = "receive-stp" + PortActionNoReceiveSTP PortAction = "no-receive-stp" + PortActionForward PortAction = "forward" + PortActionNoForward PortAction = "no-forward" + PortActionFlood PortAction = "flood" + PortActionNoFlood PortAction = "no-flood" + PortActionPacketIn PortAction = "packet-in" + PortActionNoPacketIn PortAction = "no-packet-in" +) + +// An Error is an error returned when shelling out to an Open vSwitch control +// program. It captures the combined stdout and stderr as well as the exit +// code. +type Error struct { + Out []byte + Err error +} + +// Error returns the string representation of an Error. +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Err, string(e.Out)) +} + +// IsPortNotExist checks if err is of type Error and is caused by asking OVS for +// information regarding a non-existent port. +func IsPortNotExist(err error) bool { + oerr, ok := err.(*Error) + if !ok { + return false + } + + return bytes.HasPrefix(oerr.Out, []byte("ovs-vsctl: no port named ")) && + oerr.Err.Error() == "exit status 1" +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/portrange.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/portrange.go new file mode 100644 index 0000000..77b95f9 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/portrange.go @@ -0,0 +1,130 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "errors" + "math" +) + +var ( + // ErrInvalidPortRange is returned when there's a port range that invalid. + ErrInvalidPortRange = errors.New("invalid port range") +) + +// An PortRange represents a range of ports expressed in 16 bit integers. The start and +// end values of this range are inclusive. +type PortRange struct { + Start uint16 + End uint16 +} + +// A BitRange is a representation of a range of values from base value with a bitmask +// applied. +type BitRange struct { + Value uint16 + Mask uint16 +} + +// BitwiseMatch returns an array of BitRanges that represent the range of integers +// in the PortRange. +func (r *PortRange) BitwiseMatch() ([]BitRange, error) { + if r.Start <= 0 || r.End <= 0 { + return nil, ErrInvalidPortRange + } + if r.Start > r.End { + return nil, ErrInvalidPortRange + } + + if r.Start == r.End { + return []BitRange{ + {Value: r.Start, Mask: 0xffff}, + }, nil + } + + bitRanges := []BitRange{} + + // Find the largest window we can get on a binary boundary + window := (r.End - r.Start) + 1 + bitLength := uint(math.Floor(math.Log2(float64(window)))) + + rangeStart, rangeEnd := getRange(r.End, bitLength) + + // Decrement our mask until we fit inside the range we want from a binary boundary. + for rangeEnd > r.End { + bitLength-- + rangeStart, rangeEnd = getRange(r.End, bitLength) + } + + current := BitRange{ + Value: rangeStart, + Mask: getMask(bitLength), + } + + // The range we picked out was from the middle of our set, so we'll need to recurse on + // the remaining values for anything less than or greater than the current + // range. + + if r.Start != rangeStart { + leftRemainder := PortRange{ + Start: r.Start, + End: rangeStart - 1, + } + + leftRemainingBitRanges, err := leftRemainder.BitwiseMatch() + if err != nil { + return nil, err + } + + bitRanges = append(bitRanges, leftRemainingBitRanges...) + } + + // We append our current range here, so we're ordered properly. + bitRanges = append(bitRanges, current) + + if r.End != rangeEnd { + rightRemainder := PortRange{ + Start: rangeEnd + 1, + End: r.End, + } + + rightRemainingBitRanges, err := rightRemainder.BitwiseMatch() + if err != nil { + return nil, err + } + + bitRanges = append(bitRanges, rightRemainingBitRanges...) + } + + return bitRanges, nil +} + +func getMask(bitLength uint) uint16 { + // All 1s for everything that doesn't change in the range + return math.MaxUint16 ^ uint16((1<`) + ctThawRegexp = regexp.MustCompile(`thaw`) + ctResumeFromRegexp = regexp.MustCompile(`Resuming from table`) + ctResumeWithRegexp = regexp.MustCompile(`resume conntrack with`) + tunNative = regexp.MustCompile(`native tunnel`) +) + +const ( + popvlan = "popvlan" + pushvlan = "pushvlan" + drop = "drop" + localPort = 65534 +) + +// DataPathActions is a text unmarshaler for data path actions in ofproto/trace output +type DataPathActions interface { + encoding.TextUnmarshaler +} + +// NewDataPathActions returns an implementation of DataPathActions +func NewDataPathActions(actions string) DataPathActions { + return &dataPathActions{ + actions: actions, + } +} + +type dataPathActions struct { + actions string +} + +func (d *dataPathActions) UnmarshalText(b []byte) error { + d.actions = string(b) + return nil +} + +// DataPathFlows represents the initial/final flows passed/returned from ofproto/trace +type DataPathFlows struct { + Protocol Protocol + Matches []Match +} + +// UnmarshalText unmarshals the initial/final data path flows from ofproto/trace output +func (df *DataPathFlows) UnmarshalText(b []byte) error { + matches := strings.Split(string(b), ",") + + if len(matches) == 0 { + return errors.New("error unmarshalling text, no comma delimiter found") + } + + for _, match := range matches { + switch Protocol(match) { + case ProtocolARP, ProtocolICMPv4, ProtocolICMPv6, + ProtocolIPv4, ProtocolIPv6, ProtocolTCPv4, + ProtocolTCPv6, ProtocolUDPv4, ProtocolUDPv6: + df.Protocol = Protocol(match) + continue + } + + // We can safely skip these keywords + switch { + case match == "eth": + continue + case match == "unchanged": + continue + case recircIDRegexp.MatchString(match): + continue + } + + kv := strings.Split(match, "=") + if len(kv) != 2 { + return fmt.Errorf("unexpected match format for match %q", match) + } + + switch strings.TrimSpace(kv[0]) { + case inPort: + // Parse in_port=LOCAL into a new match. + if strings.TrimSpace(kv[1]) == portLOCAL { + df.Matches = append(df.Matches, InPortMatch(localPort)) + continue + } + } + + m, err := parseMatch(kv[0], kv[1]) + if err != nil { + return err + } + // The keyword will be skipped if unknown, + // don't add a nil value + if m != nil { + df.Matches = append(df.Matches, m) + } + } + + return nil +} + +// ProtoTrace is a type representing output from ovs-app-ctl ofproto/trace +type ProtoTrace struct { + CommandStr string + InputFlow *DataPathFlows + FinalFlow *DataPathFlows + DataPathActions DataPathActions + FlowActions []string +} + +// UnmarshalText unmarshals ProtoTrace text into a ProtoTrace type. +// Not implemented yet. +func (pt *ProtoTrace) UnmarshalText(b []byte) error { + lines := strings.Split(string(b), "\n") + for _, line := range lines { + if matches, matched := checkMatch(datapathActionsRegexp, line); matched { + + if recircRegexp.MatchString(line) { + pt.FlowActions = append(pt.FlowActions, "recirc") + } + + // first index is always the left most match, following + // are the actual matches + pt.DataPathActions = &dataPathActions{ + actions: matches[1], + } + + continue + } + + if matches, matched := checkMatch(initialFlowRegexp, line); matched { + flow := &DataPathFlows{} + err := flow.UnmarshalText([]byte(matches[1])) + if err != nil { + return err + } + + pt.InputFlow = flow + continue + } + + if matches, matched := checkMatch(finalFlowRegexp, line); matched { + flow := &DataPathFlows{} + err := flow.UnmarshalText([]byte(matches[1])) + if err != nil { + return err + } + + pt.FinalFlow = flow + continue + } + + if _, matched := checkMatch(megaFlowRegexp, line); matched { + continue + } + + if _, matched := checkMatch(traceStartRegexp, line); matched { + continue + } + + if _, matched := checkMatch(traceFlowRegexp, line); matched { + continue + } + + // We can safely skip these keywords + switch { + case ctCommentRegexp.MatchString(line): + continue + case ctThawRegexp.MatchString(line): + continue + case ctResumeFromRegexp.MatchString(line): + continue + case ctResumeWithRegexp.MatchString(line): + continue + case tunNative.MatchString(line): + continue + } + + if matches, matched := checkMatch(traceActionRegexp, line); matched { + pt.FlowActions = append(pt.FlowActions, matches[1]) + continue + } + } + + return nil +} + +func checkMatch(re *regexp.Regexp, s string) ([]string, bool) { + matches := re.FindStringSubmatch(s) + if len(matches) == 0 { + return matches, false + } + + return matches, true +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/table.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/table.go new file mode 100644 index 0000000..c77d3d5 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/table.go @@ -0,0 +1,99 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "errors" + "strconv" + "strings" +) + +var ( + // ErrInvalidTable is returned when tables from 'ovs-ofctl dump-tables' + // do not match the expected output format. + ErrInvalidTable = errors.New("invalid openflow table") +) + +// A Table is an Open vSwitch table. +type Table struct { + ID int + Name string + Wild string + Max int + Active int + Lookup uint64 + Matched uint64 +} + +// UnmarshalText unmarshals a Table from textual form as output by +// 'ovs-ofctl dump-tables': +// 0: classifier: wild=0x3fffff, max=1000000, active=0 +// lookup=0, matched=0 +func (t *Table) UnmarshalText(b []byte) error { + // Make a copy per documentation for encoding.TextUnmarshaler. + s := string(b) + + ss := strings.Fields(s) + if len(ss) != 7 && len(ss) != 8 { + return ErrInvalidTable + } + + // ID has trailing colon which must be removed + id, err := strconv.ParseInt(strings.TrimSuffix(ss[0], ":"), 10, 0) + if err != nil { + return err + } + t.ID = int(id) + + // Numeric fields start at index 2 normally, but if the table name + // does not contain a trailing colon (OVS tables that aren't "classifier"), + // it will start at index 3 + idx := 2 + if !strings.HasSuffix(ss[1], ":") { + idx = 3 + } + // Name has trailing colon which must be removed + t.Name = strings.TrimSuffix(ss[1], ":") + + out := make([]uint64, 0, 4) + for i, str := range ss[idx:] { + // Strip trailing commas from key/value fields + str = strings.TrimSuffix(str, ",") + + pair := strings.Split(str, "=") + if len(pair) != 2 { + return ErrInvalidTable + } + + if i == 0 { + t.Wild = pair[1] + continue + } + + n, err := strconv.ParseUint(pair[1], 10, 64) + if err != nil { + return err + } + + out = append(out, n) + } + + t.Max = int(out[0]) + t.Active = int(out[1]) + t.Lookup = out[2] + t.Matched = out[3] + + return nil +} diff --git a/vendor/github.com/danieldin95/go-openvswitch/ovs/vswitch.go b/vendor/github.com/danieldin95/go-openvswitch/ovs/vswitch.go new file mode 100644 index 0000000..1e74674 --- /dev/null +++ b/vendor/github.com/danieldin95/go-openvswitch/ovs/vswitch.go @@ -0,0 +1,351 @@ +// Copyright 2017 DigitalOcean. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ovs + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + // DefaultIngressRatePolicing is used to disable the ingress policing, + // which is the default behavior. + DefaultIngressRatePolicing = int64(-1) + + // DefaultIngressBurstPolicing is to change the ingress policing + // burst to the default size, 1000 kb. + DefaultIngressBurstPolicing = int64(-1) +) + +// A VSwitchService is used in a Client to execute 'ovs-vsctl' commands. +type VSwitchService struct { + // Get wraps functionality of the 'ovs-vsctl get' subcommand. + Get *VSwitchGetService + + // Set wraps functionality of the 'ovs-vsctl set' subcommand. + Set *VSwitchSetService + + // Wrapped Client for ExecFunc and debugging. + c *Client +} + +// AddBridge attaches a bridge to Open vSwitch. The bridge may or may +// not already exist. +func (v *VSwitchService) AddBridge(bridge string) error { + _, err := v.exec("--may-exist", "add-br", bridge) + return err +} + +// AddPort attaches a port to a bridge on Open vSwitch. The port may or may +// not already exist. +func (v *VSwitchService) AddPort(bridge string, port string) error { + _, err := v.exec("--may-exist", "add-port", bridge, string(port)) + return err +} + +// DeleteBridge detaches a bridge from Open vSwitch. The bridge may or may +// not already exist. +func (v *VSwitchService) DeleteBridge(bridge string) error { + _, err := v.exec("--if-exists", "del-br", bridge) + return err +} + +// DeletePort detaches a port from a bridge on Open vSwitch. The port may or may +// not already exist. +func (v *VSwitchService) DeletePort(bridge string, port string) error { + _, err := v.exec("--if-exists", "del-port", bridge, string(port)) + return err +} + +// ListPorts lists the ports in Open vSwitch. +func (v *VSwitchService) ListPorts(bridge string) ([]string, error) { + output, err := v.exec("list-ports", bridge) + if err != nil { + return nil, err + } + + // Do no ports exist? + if len(output) == 0 { + return nil, nil + } + + ports := strings.Split(strings.TrimSpace(string(output)), "\n") + return ports, nil +} + +// ListBridges lists the bridges in Open vSwitch. +func (v *VSwitchService) ListBridges() ([]string, error) { + output, err := v.exec("list-br") + if err != nil { + return nil, err + } + + // Do no bridges exist? + if len(output) == 0 { + return nil, nil + } + + bridges := strings.Split(strings.TrimSpace(string(output)), "\n") + return bridges, nil +} + +// PortToBridge attempts to determine which bridge a port is attached to. +// If port does not exist, an error will be returned, which can be checked +// using IsPortNotExist. +func (v *VSwitchService) PortToBridge(port string) (string, error) { + out, err := v.exec("port-to-br", string(port)) + if err != nil { + return "", err + } + + return string(out), nil +} + +// GetFailMode gets the FailMode for the specified bridge. +func (v *VSwitchService) GetFailMode(bridge string) (FailMode, error) { + out, err := v.exec("get-fail-mode", bridge) + if err != nil { + return "", err + } + + return FailMode(out), nil +} + +// SetFailMode sets the specified FailMode for the specified bridge. +func (v *VSwitchService) SetFailMode(bridge string, mode FailMode) error { + _, err := v.exec("set-fail-mode", bridge, string(mode)) + return err +} + +// SetController sets the controller for this bridge so that ovs-ofctl +// can use this address to communicate. +func (v *VSwitchService) SetController(bridge string, address string) error { + _, err := v.exec("set-controller", bridge, address) + return err +} + +// GetController gets the controller address for this bridge. +func (v *VSwitchService) GetController(bridge string) (string, error) { + address, err := v.exec("get-controller", bridge) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(address)), nil +} + +// exec executes an ExecFunc using 'ovs-vsctl'. +func (v *VSwitchService) exec(args ...string) ([]byte, error) { + return v.c.exec("ovs-vsctl", args...) +} + +// A VSwitchGetService is used in a VSwitchService to execute 'ovs-vsctl get' +// subcommands. +type VSwitchGetService struct { + // v provides the required exec method. + v *VSwitchService +} + +// Bridge gets configuration for a bridge and returns the values through +// a BridgeOptions struct. +func (v *VSwitchGetService) Bridge(bridge string) (BridgeOptions, error) { + // We only support the protocol option at this point. + args := []string{"--format=json", "get", "bridge", bridge, "protocols"} + out, err := v.v.exec(args...) + if err != nil { + return BridgeOptions{}, err + } + + var protocols []string + if err := json.Unmarshal(out, &protocols); err != nil { + return BridgeOptions{}, err + } + + return BridgeOptions{ + Protocols: protocols, + }, nil +} + +// A VSwitchSetService is used in a VSwitchService to execute 'ovs-vsctl set' +// subcommands. +type VSwitchSetService struct { + // v provides the required exec method. + v *VSwitchService +} + +// Bridge sets configuration for a bridge using the values from a BridgeOptions +// struct. +func (v *VSwitchSetService) Bridge(bridge string, options BridgeOptions) error { + // Prepend command line arguments before expanding options slice + // and appending it + args := []string{"set", "bridge", bridge} + args = append(args, options.slice()...) + + _, err := v.v.exec(args...) + return err +} + +// An BridgeOptions enables configuration of a bridge. +type BridgeOptions struct { + // Protocols specifies the OpenFlow protocols the bridge should use. + Protocols []string +} + +// slice creates a string slice containing any non-zero option values from the +// struct in the format expected by Open vSwitch. +func (o BridgeOptions) slice() []string { + var s []string + + if len(o.Protocols) > 0 { + s = append(s, fmt.Sprintf("protocols=%s", strings.Join(o.Protocols, ","))) + } + + return s +} + +// Interface sets configuration for an interface using the values from an +// InterfaceOptions struct. +func (v *VSwitchSetService) Interface(ifi string, options InterfaceOptions) error { + // Prepend command line arguments before expanding options slice + // and appending it + args := []string{"set", "interface", ifi} + args = append(args, options.slice()...) + + _, err := v.v.exec(args...) + return err +} + +// An InterfaceOptions struct enables configuration of an Interface. +type InterfaceOptions struct { + // Type specifies the Open vSwitch interface type. + Type InterfaceType + + // Indicate whether enable bfd. + BfdEnable bool + + // Peer specifies an interface to peer with when creating a patch interface. + Peer string + + // MTURequest specifies the maximum transmission unit associated with an + // interface. + MTURequest int + + // Ingress Policing + // + // These settings control ingress policing for packets received on this + // interface. On a physical interface, this limits the rate at which + // traffic is allowed into the system from the outside; on a virtual + // interface (one connected to a virtual machine), this limits the rate + // at which the VM is able to transmit. + + // IngressRatePolicing specifies the maximum rate for data received on + // this interface in kbps. Data received faster than this rate is dropped. + // Set to 0 (the default) to disable policing. + IngressRatePolicing int64 + + // IngressBurstPolicing specifies the maximum burst size for data received on + // this interface in kb. The default burst size if set to 0 is 1000 kb. + // This value has no effect if IngressRatePolicing is set to 0. Specifying + // a larger burst size lets the algorithm be more forgiving, which is important + // for protocols like TCP that react severely to dropped packets. The burst + // size should be at least the size of the interface's MTU. Specifying a + // value that is numerically at least as large as 10% of IngressRatePolicing + // helps TCP come closer to achieving the full rate. + IngressBurstPolicing int64 + + // RemoteIP can be populated when the interface is a tunnel interface type + // for example "stt" or "vxlan". It specifies the remote IP address with which to + // form tunnels when traffic is sent to this port. Optionally it could be set to + // "flow" which expects the flow to set tunnel destination. + RemoteIP string + + // Key can be populated when the interface is a tunnel interface type + // for example "stt" or "vxlan". It specifies the tunnel ID to attach to + // tunneled traffic leaving this interface. Optionally it could be set to + // "flow" which expects the flow to set tunnel ID. + Key string + + // Specifies the usage of the Don't Fragment flag (DF) bit in outgoing packets + // with IPv4 headers. The value inherit causes the bit to be copied from + // the original IP header. The values unset and set cause the bit to be always unset + // or always set, respectively. By default, the bit is not set. + DfDefault string + + // Specifies the source IP address to use in outgoing packets. + LocalIP string + + // Specifies the UDP destination port to communicate to the remote + // VXLAN tunnel endpoint. + DstPort uint32 +} + +// slice creates a string slice containing any non-zero option values from the +// struct in the format expected by Open vSwitch. +func (i InterfaceOptions) slice() []string { + var s []string + + if i.Type != "" { + s = append(s, fmt.Sprintf("type=%s", i.Type)) + } + + if i.BfdEnable { + s = append(s, "bfd:enable=true") + } + + if i.Peer != "" { + s = append(s, fmt.Sprintf("options:peer=%s", i.Peer)) + } + + if i.MTURequest > 0 { + s = append(s, fmt.Sprintf("mtu_request=%d", i.MTURequest)) + } + + if i.IngressRatePolicing == DefaultIngressRatePolicing { + // Set to 0 (the default) to disable policing. + s = append(s, "ingress_policing_rate=0") + } else if i.IngressRatePolicing > 0 { + s = append(s, fmt.Sprintf("ingress_policing_rate=%d", i.IngressRatePolicing)) + } + + if i.IngressBurstPolicing == DefaultIngressBurstPolicing { + // Set to 0 (the default) to the default burst size. + s = append(s, "ingress_policing_burst=0") + } else if i.IngressBurstPolicing > 0 { + s = append(s, fmt.Sprintf("ingress_policing_burst=%d", i.IngressBurstPolicing)) + } + + if i.RemoteIP != "" { + s = append(s, fmt.Sprintf("options:remote_ip=%s", i.RemoteIP)) + } + + if i.Key != "" { + s = append(s, fmt.Sprintf("options:key=%s", i.Key)) + } + + if i.DfDefault != "" { + s = append(s, fmt.Sprintf("options:df_default=%s", i.DfDefault)) + } + + if i.LocalIP != "" { + s = append(s, fmt.Sprintf("options:local_ip=%s", i.LocalIP)) + } + + if i.DstPort > 0 { + s = append(s, fmt.Sprintf("options:dst_port=%d", i.DstPort)) + } + + return s +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000..bc52e96 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000..7929947 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000..205c28d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000..1be8ce9 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000..2e3d22f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000..aacaac6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000..f78d89f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000..b04edb7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 0000000..32c0e33 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/go-ldap/ldap/.gitignore b/vendor/github.com/go-ldap/ldap/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/go-ldap/ldap/.travis.yml b/vendor/github.com/go-ldap/ldap/.travis.yml new file mode 100644 index 0000000..bb899b2 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/.travis.yml @@ -0,0 +1,31 @@ +sudo: false +language: go +go: + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - tip + +git: + depth: 1 + +matrix: + fast_finish: true + allow_failures: + - go: tip +go_import_path: gopkg.in/ldap.v3 +install: + - go get gopkg.in/asn1-ber.v1 + - go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover + - go get github.com/golang/lint/golint || go get golang.org/x/lint/golint || true + - go build -v ./... +script: + - make test + - make fmt + - make vet + - make lint diff --git a/vendor/github.com/go-ldap/ldap/CONTRIBUTING.md b/vendor/github.com/go-ldap/ldap/CONTRIBUTING.md new file mode 100644 index 0000000..a788523 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# Contribution Guidelines + +We welcome contribution and improvements. + +## Guiding Principles + +To begin with here is a draft from an email exchange: + + * take compatibility seriously (our semvers, compatibility with older go versions, etc) + * don't tag untested code for release + * beware of baking in implicit behavior based on other libraries/tools choices + * be as high-fidelity as possible in plumbing through LDAP data (don't mask errors or reduce power of someone using the library) diff --git a/vendor/github.com/go-ldap/ldap/LICENSE b/vendor/github.com/go-ldap/ldap/LICENSE new file mode 100644 index 0000000..6c0ed4b --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-ldap Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-ldap/ldap/Makefile b/vendor/github.com/go-ldap/ldap/Makefile new file mode 100644 index 0000000..c496647 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/Makefile @@ -0,0 +1,82 @@ +.PHONY: default install build test quicktest fmt vet lint + +# List of all release tags "supported" by our current Go version +# E.g. ":go1.1:go1.2:go1.3:go1.4:go1.5:go1.6:go1.7:go1.8:go1.9:go1.10:go1.11:go1.12:" +GO_RELEASE_TAGS := $(shell go list -f ':{{join (context.ReleaseTags) ":"}}:' runtime) + +# Only use the `-race` flag on newer versions of Go (version 1.3 and newer) +ifeq (,$(findstring :go1.3:,$(GO_RELEASE_TAGS))) + RACE_FLAG := +else + RACE_FLAG := -race -cpu 1,2,4 +endif + +# Run `go vet` on Go 1.12 and newer. For Go 1.5-1.11, use `go tool vet` +ifneq (,$(findstring :go1.12:,$(GO_RELEASE_TAGS))) + GO_VET := go vet \ + -atomic \ + -bool \ + -copylocks \ + -nilfunc \ + -printf \ + -rangeloops \ + -unreachable \ + -unsafeptr \ + -unusedresult \ + . +else ifneq (,$(findstring :go1.5:,$(GO_RELEASE_TAGS))) + GO_VET := go tool vet \ + -atomic \ + -bool \ + -copylocks \ + -nilfunc \ + -printf \ + -shadow \ + -rangeloops \ + -unreachable \ + -unsafeptr \ + -unusedresult \ + . +else + GO_VET := @echo "go vet skipped -- not supported on this version of Go" +endif + +default: fmt vet lint build quicktest + +install: + go get -t -v ./... + +build: + go build -v ./... + +test: + go test -v $(RACE_FLAG) -cover ./... + +quicktest: + go test ./... + +# Capture output and force failure when there is non-empty output +fmt: + @echo gofmt -l . + @OUTPUT=`gofmt -l . 2>&1`; \ + if [ "$$OUTPUT" ]; then \ + echo "gofmt must be run on the following files:"; \ + echo "$$OUTPUT"; \ + exit 1; \ + fi + +vet: + $(GO_VET) + +# https://github.com/golang/lint +# go get github.com/golang/lint/golint +# Capture output and force failure when there is non-empty output +# Only run on go1.5+ +lint: + @echo golint ./... + @OUTPUT=`command -v golint >/dev/null 2>&1 && golint ./... 2>&1`; \ + if [ "$$OUTPUT" ]; then \ + echo "golint errors:"; \ + echo "$$OUTPUT"; \ + exit 1; \ + fi diff --git a/vendor/github.com/go-ldap/ldap/README.md b/vendor/github.com/go-ldap/ldap/README.md new file mode 100644 index 0000000..25cf730 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/README.md @@ -0,0 +1,54 @@ +[![GoDoc](https://godoc.org/gopkg.in/ldap.v3?status.svg)](https://godoc.org/gopkg.in/ldap.v3) +[![Build Status](https://travis-ci.org/go-ldap/ldap.svg)](https://travis-ci.org/go-ldap/ldap) + +# Basic LDAP v3 functionality for the GO programming language. + +## Install + +For the latest version use: + + go get gopkg.in/ldap.v3 + +Import the latest version with: + + import "gopkg.in/ldap.v3" + +## Required Libraries: + + - gopkg.in/asn1-ber.v1 + +## Features: + + - Connecting to LDAP server (non-TLS, TLS, STARTTLS) + - Binding to LDAP server + - Searching for entries + - Filter Compile / Decompile + - Paging Search Results + - Modify Requests / Responses + - Add Requests / Responses + - Delete Requests / Responses + - Modify DN Requests / Responses + +## Examples: + + - search + - modify + +## Contributing: + +Bug reports and pull requests are welcome! + +Before submitting a pull request, please make sure tests and verification scripts pass: +``` +make all +``` + +To set up a pre-push hook to run the tests and verify scripts before pushing: +``` +ln -s ../../.githooks/pre-push .git/hooks/pre-push +``` + +--- +The Go gopher was designed by Renee French. (http://reneefrench.blogspot.com/) +The design is licensed under the Creative Commons 3.0 Attributions license. +Read this article for more details: http://blog.golang.org/gopher diff --git a/vendor/github.com/go-ldap/ldap/add.go b/vendor/github.com/go-ldap/ldap/add.go new file mode 100644 index 0000000..19bce1b --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/add.go @@ -0,0 +1,119 @@ +// +// https://tools.ietf.org/html/rfc4511 +// +// AddRequest ::= [APPLICATION 8] SEQUENCE { +// entry LDAPDN, +// attributes AttributeList } +// +// AttributeList ::= SEQUENCE OF attribute Attribute + +package ldap + +import ( + "errors" + "log" + + "gopkg.in/asn1-ber.v1" +) + +// Attribute represents an LDAP attribute +type Attribute struct { + // Type is the name of the LDAP attribute + Type string + // Vals are the LDAP attribute values + Vals []string +} + +func (a *Attribute) encode() *ber.Packet { + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attribute") + seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.Type, "Type")) + set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue") + for _, value := range a.Vals { + set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals")) + } + seq.AppendChild(set) + return seq +} + +// AddRequest represents an LDAP AddRequest operation +type AddRequest struct { + // DN identifies the entry being added + DN string + // Attributes list the attributes of the new entry + Attributes []Attribute + // Controls hold optional controls to send with the request + Controls []Control +} + +func (a AddRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationAddRequest, nil, "Add Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, a.DN, "DN")) + attributes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") + for _, attribute := range a.Attributes { + attributes.AppendChild(attribute.encode()) + } + request.AppendChild(attributes) + return request +} + +// Attribute adds an attribute with the given type and values +func (a *AddRequest) Attribute(attrType string, attrVals []string) { + a.Attributes = append(a.Attributes, Attribute{Type: attrType, Vals: attrVals}) +} + +// NewAddRequest returns an AddRequest for the given DN, with no attributes +func NewAddRequest(dn string, controls []Control) *AddRequest { + return &AddRequest{ + DN: dn, + Controls: controls, + } + +} + +// Add performs the given AddRequest +func (l *Conn) Add(addRequest *AddRequest) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + packet.AppendChild(addRequest.encode()) + if len(addRequest.Controls) > 0 { + packet.AppendChild(encodeControls(addRequest.Controls)) + } + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationAddResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + + l.Debug.Printf("%d: returning", msgCtx.id) + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/bind.go b/vendor/github.com/go-ldap/ldap/bind.go new file mode 100644 index 0000000..59c3f5e --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/bind.go @@ -0,0 +1,135 @@ +package ldap + +import ( + "errors" + "fmt" + + "gopkg.in/asn1-ber.v1" +) + +// SimpleBindRequest represents a username/password bind operation +type SimpleBindRequest struct { + // Username is the name of the Directory object that the client wishes to bind as + Username string + // Password is the credentials to bind with + Password string + // Controls are optional controls to send with the bind request + Controls []Control + // AllowEmptyPassword sets whether the client allows binding with an empty password + // (normally used for unauthenticated bind). + AllowEmptyPassword bool +} + +// SimpleBindResult contains the response from the server +type SimpleBindResult struct { + Controls []Control +} + +// NewSimpleBindRequest returns a bind request +func NewSimpleBindRequest(username string, password string, controls []Control) *SimpleBindRequest { + return &SimpleBindRequest{ + Username: username, + Password: password, + Controls: controls, + AllowEmptyPassword: false, + } +} + +func (bindRequest *SimpleBindRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationBindRequest, nil, "Bind Request") + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, 3, "Version")) + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, bindRequest.Username, "User Name")) + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, bindRequest.Password, "Password")) + + return request +} + +// SimpleBind performs the simple bind operation defined in the given request +func (l *Conn) SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) { + if simpleBindRequest.Password == "" && !simpleBindRequest.AllowEmptyPassword { + return nil, NewError(ErrorEmptyPassword, errors.New("ldap: empty password not allowed by the client")) + } + + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + encodedBindRequest := simpleBindRequest.encode() + packet.AppendChild(encodedBindRequest) + if len(simpleBindRequest.Controls) > 0 { + packet.AppendChild(encodeControls(simpleBindRequest.Controls)) + } + + if l.Debug { + ber.PrintPacket(packet) + } + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if l.Debug { + if err = addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + + result := &SimpleBindResult{ + Controls: make([]Control, 0), + } + + if len(packet.Children) == 3 { + for _, child := range packet.Children[2].Children { + decodedChild, decodeErr := DecodeControl(child) + if decodeErr != nil { + return nil, fmt.Errorf("failed to decode child control: %s", decodeErr) + } + result.Controls = append(result.Controls, decodedChild) + } + } + + err = GetLDAPError(packet) + return result, err +} + +// Bind performs a bind with the given username and password. +// +// It does not allow unauthenticated bind (i.e. empty password). Use the UnauthenticatedBind method +// for that. +func (l *Conn) Bind(username, password string) error { + req := &SimpleBindRequest{ + Username: username, + Password: password, + AllowEmptyPassword: false, + } + _, err := l.SimpleBind(req) + return err +} + +// UnauthenticatedBind performs an unauthenticated bind. +// +// A username may be provided for trace (e.g. logging) purpose only, but it is normally not +// authenticated or otherwise validated by the LDAP server. +// +// See https://tools.ietf.org/html/rfc4513#section-5.1.2 . +// See https://tools.ietf.org/html/rfc4513#section-6.3.1 . +func (l *Conn) UnauthenticatedBind(username string) error { + req := &SimpleBindRequest{ + Username: username, + Password: "", + AllowEmptyPassword: true, + } + _, err := l.SimpleBind(req) + return err +} diff --git a/vendor/github.com/go-ldap/ldap/client.go b/vendor/github.com/go-ldap/ldap/client.go new file mode 100644 index 0000000..c7f41f6 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/client.go @@ -0,0 +1,28 @@ +package ldap + +import ( + "crypto/tls" + "time" +) + +// Client knows how to interact with an LDAP server +type Client interface { + Start() + StartTLS(config *tls.Config) error + Close() + SetTimeout(time.Duration) + + Bind(username, password string) error + SimpleBind(simpleBindRequest *SimpleBindRequest) (*SimpleBindResult, error) + + Add(addRequest *AddRequest) error + Del(delRequest *DelRequest) error + Modify(modifyRequest *ModifyRequest) error + ModifyDN(modifyDNRequest *ModifyDNRequest) error + + Compare(dn, attribute, value string) (bool, error) + PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) + + Search(searchRequest *SearchRequest) (*SearchResult, error) + SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) +} diff --git a/vendor/github.com/go-ldap/ldap/compare.go b/vendor/github.com/go-ldap/ldap/compare.go new file mode 100644 index 0000000..5b5013c --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/compare.go @@ -0,0 +1,83 @@ +// File contains Compare functionality +// +// https://tools.ietf.org/html/rfc4511 +// +// CompareRequest ::= [APPLICATION 14] SEQUENCE { +// entry LDAPDN, +// ava AttributeValueAssertion } +// +// AttributeValueAssertion ::= SEQUENCE { +// attributeDesc AttributeDescription, +// assertionValue AssertionValue } +// +// AttributeDescription ::= LDAPString +// -- Constrained to +// -- [RFC4512] +// +// AttributeValue ::= OCTET STRING +// + +package ldap + +import ( + "errors" + "fmt" + + "gopkg.in/asn1-ber.v1" +) + +// Compare checks to see if the attribute of the dn matches value. Returns true if it does otherwise +// false with any error that occurs if any. +func (l *Conn) Compare(dn, attribute, value string) (bool, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationCompareRequest, nil, "Compare Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, dn, "DN")) + + ava := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "AttributeValueAssertion") + ava.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "AttributeDesc")) + ava.AppendChild(ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "AssertionValue")) + request.AppendChild(ava) + packet.AppendChild(request) + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return false, err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return false, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return false, err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return false, err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationCompareResponse { + err := GetLDAPError(packet) + + switch { + case IsErrorWithCode(err, LDAPResultCompareTrue): + return true, nil + case IsErrorWithCode(err, LDAPResultCompareFalse): + return false, nil + default: + return false, err + } + } + return false, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag) +} diff --git a/vendor/github.com/go-ldap/ldap/conn.go b/vendor/github.com/go-ldap/ldap/conn.go new file mode 100644 index 0000000..c20471f --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/conn.go @@ -0,0 +1,516 @@ +package ldap + +import ( + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "net/url" + "sync" + "sync/atomic" + "time" + + "gopkg.in/asn1-ber.v1" +) + +const ( + // MessageQuit causes the processMessages loop to exit + MessageQuit = 0 + // MessageRequest sends a request to the server + MessageRequest = 1 + // MessageResponse receives a response from the server + MessageResponse = 2 + // MessageFinish indicates the client considers a particular message ID to be finished + MessageFinish = 3 + // MessageTimeout indicates the client-specified timeout for a particular message ID has been reached + MessageTimeout = 4 +) + +const ( + // DefaultLdapPort default ldap port for pure TCP connection + DefaultLdapPort = "389" + // DefaultLdapsPort default ldap port for SSL connection + DefaultLdapsPort = "636" +) + +// PacketResponse contains the packet or error encountered reading a response +type PacketResponse struct { + // Packet is the packet read from the server + Packet *ber.Packet + // Error is an error encountered while reading + Error error +} + +// ReadPacket returns the packet or an error +func (pr *PacketResponse) ReadPacket() (*ber.Packet, error) { + if (pr == nil) || (pr.Packet == nil && pr.Error == nil) { + return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve response")) + } + return pr.Packet, pr.Error +} + +type messageContext struct { + id int64 + // close(done) should only be called from finishMessage() + done chan struct{} + // close(responses) should only be called from processMessages(), and only sent to from sendResponse() + responses chan *PacketResponse +} + +// sendResponse should only be called within the processMessages() loop which +// is also responsible for closing the responses channel. +func (msgCtx *messageContext) sendResponse(packet *PacketResponse) { + select { + case msgCtx.responses <- packet: + // Successfully sent packet to message handler. + case <-msgCtx.done: + // The request handler is done and will not receive more + // packets. + } +} + +type messagePacket struct { + Op int + MessageID int64 + Packet *ber.Packet + Context *messageContext +} + +type sendMessageFlags uint + +const ( + startTLS sendMessageFlags = 1 << iota +) + +// Conn represents an LDAP Connection +type Conn struct { + // requestTimeout is loaded atomically + // so we need to ensure 64-bit alignment on 32-bit platforms. + requestTimeout int64 + conn net.Conn + isTLS bool + closing uint32 + closeErr atomic.Value + isStartingTLS bool + Debug debugging + chanConfirm chan struct{} + messageContexts map[int64]*messageContext + chanMessage chan *messagePacket + chanMessageID chan int64 + wgClose sync.WaitGroup + outstandingRequests uint + messageMutex sync.Mutex +} + +var _ Client = &Conn{} + +// DefaultTimeout is a package-level variable that sets the timeout value +// used for the Dial and DialTLS methods. +// +// WARNING: since this is a package-level variable, setting this value from +// multiple places will probably result in undesired behaviour. +var DefaultTimeout = 60 * time.Second + +// Dial connects to the given address on the given network using net.Dial +// and then returns a new Conn for the connection. +func Dial(network, addr string) (*Conn, error) { + c, err := net.DialTimeout(network, addr, DefaultTimeout) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + conn := NewConn(c, false) + conn.Start() + return conn, nil +} + +// DialTLS connects to the given address on the given network using tls.Dial +// and then returns a new Conn for the connection. +func DialTLS(network, addr string, config *tls.Config) (*Conn, error) { + c, err := tls.DialWithDialer(&net.Dialer{Timeout: DefaultTimeout}, network, addr, config) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + conn := NewConn(c, true) + conn.Start() + return conn, nil +} + +// DialURL connects to the given ldap URL vie TCP using tls.Dial or net.Dial if ldaps:// +// or ldap:// specified as protocol. On success a new Conn for the connection +// is returned. +func DialURL(addr string) (*Conn, error) { + + lurl, err := url.Parse(addr) + if err != nil { + return nil, NewError(ErrorNetwork, err) + } + + host, port, err := net.SplitHostPort(lurl.Host) + if err != nil { + // we asume that error is due to missing port + host = lurl.Host + port = "" + } + + switch lurl.Scheme { + case "ldap": + if port == "" { + port = DefaultLdapPort + } + return Dial("tcp", net.JoinHostPort(host, port)) + case "ldaps": + if port == "" { + port = DefaultLdapsPort + } + tlsConf := &tls.Config{ + ServerName: host, + } + return DialTLS("tcp", net.JoinHostPort(host, port), tlsConf) + } + + return nil, NewError(ErrorNetwork, fmt.Errorf("Unknown scheme '%s'", lurl.Scheme)) +} + +// NewConn returns a new Conn using conn for network I/O. +func NewConn(conn net.Conn, isTLS bool) *Conn { + return &Conn{ + conn: conn, + chanConfirm: make(chan struct{}), + chanMessageID: make(chan int64), + chanMessage: make(chan *messagePacket, 10), + messageContexts: map[int64]*messageContext{}, + requestTimeout: 0, + isTLS: isTLS, + } +} + +// Start initializes goroutines to read responses and process messages +func (l *Conn) Start() { + go l.reader() + go l.processMessages() + l.wgClose.Add(1) +} + +// IsClosing returns whether or not we're currently closing. +func (l *Conn) IsClosing() bool { + return atomic.LoadUint32(&l.closing) == 1 +} + +// setClosing sets the closing value to true +func (l *Conn) setClosing() bool { + return atomic.CompareAndSwapUint32(&l.closing, 0, 1) +} + +// Close closes the connection. +func (l *Conn) Close() { + l.messageMutex.Lock() + defer l.messageMutex.Unlock() + + if l.setClosing() { + l.Debug.Printf("Sending quit message and waiting for confirmation") + l.chanMessage <- &messagePacket{Op: MessageQuit} + <-l.chanConfirm + close(l.chanMessage) + + l.Debug.Printf("Closing network connection") + if err := l.conn.Close(); err != nil { + log.Println(err) + } + + l.wgClose.Done() + } + l.wgClose.Wait() +} + +// SetTimeout sets the time after a request is sent that a MessageTimeout triggers +func (l *Conn) SetTimeout(timeout time.Duration) { + if timeout > 0 { + atomic.StoreInt64(&l.requestTimeout, int64(timeout)) + } +} + +// Returns the next available messageID +func (l *Conn) nextMessageID() int64 { + if messageID, ok := <-l.chanMessageID; ok { + return messageID + } + return 0 +} + +// StartTLS sends the command to start a TLS session and then creates a new TLS Client +func (l *Conn) StartTLS(config *tls.Config) error { + if l.isTLS { + return NewError(ErrorNetwork, errors.New("ldap: already encrypted")) + } + + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Start TLS") + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, "1.3.6.1.4.1.1466.20037", "TLS Extended Command")) + packet.AppendChild(request) + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessageWithFlags(packet, startTLS) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + l.Close() + return err + } + ber.PrintPacket(packet) + } + + if err := GetLDAPError(packet); err == nil { + conn := tls.Client(l.conn, config) + + if connErr := conn.Handshake(); connErr != nil { + l.Close() + return NewError(ErrorNetwork, fmt.Errorf("TLS handshake failed (%v)", connErr)) + } + + l.isTLS = true + l.conn = conn + } else { + return err + } + go l.reader() + + return nil +} + +// TLSConnectionState returns the client's TLS connection state. +// The return values are their zero values if StartTLS did +// not succeed. +func (l *Conn) TLSConnectionState() (state tls.ConnectionState, ok bool) { + tc, ok := l.conn.(*tls.Conn) + if !ok { + return + } + return tc.ConnectionState(), true +} + +func (l *Conn) sendMessage(packet *ber.Packet) (*messageContext, error) { + return l.sendMessageWithFlags(packet, 0) +} + +func (l *Conn) sendMessageWithFlags(packet *ber.Packet, flags sendMessageFlags) (*messageContext, error) { + if l.IsClosing() { + return nil, NewError(ErrorNetwork, errors.New("ldap: connection closed")) + } + l.messageMutex.Lock() + l.Debug.Printf("flags&startTLS = %d", flags&startTLS) + if l.isStartingTLS { + l.messageMutex.Unlock() + return nil, NewError(ErrorNetwork, errors.New("ldap: connection is in startls phase")) + } + if flags&startTLS != 0 { + if l.outstandingRequests != 0 { + l.messageMutex.Unlock() + return nil, NewError(ErrorNetwork, errors.New("ldap: cannot StartTLS with outstanding requests")) + } + l.isStartingTLS = true + } + l.outstandingRequests++ + + l.messageMutex.Unlock() + + responses := make(chan *PacketResponse) + messageID := packet.Children[0].Value.(int64) + message := &messagePacket{ + Op: MessageRequest, + MessageID: messageID, + Packet: packet, + Context: &messageContext{ + id: messageID, + done: make(chan struct{}), + responses: responses, + }, + } + l.sendProcessMessage(message) + return message.Context, nil +} + +func (l *Conn) finishMessage(msgCtx *messageContext) { + close(msgCtx.done) + + if l.IsClosing() { + return + } + + l.messageMutex.Lock() + l.outstandingRequests-- + if l.isStartingTLS { + l.isStartingTLS = false + } + l.messageMutex.Unlock() + + message := &messagePacket{ + Op: MessageFinish, + MessageID: msgCtx.id, + } + l.sendProcessMessage(message) +} + +func (l *Conn) sendProcessMessage(message *messagePacket) bool { + l.messageMutex.Lock() + defer l.messageMutex.Unlock() + if l.IsClosing() { + return false + } + l.chanMessage <- message + return true +} + +func (l *Conn) processMessages() { + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in processMessages: %v", err) + } + for messageID, msgCtx := range l.messageContexts { + // If we are closing due to an error, inform anyone who + // is waiting about the error. + if l.IsClosing() && l.closeErr.Load() != nil { + msgCtx.sendResponse(&PacketResponse{Error: l.closeErr.Load().(error)}) + } + l.Debug.Printf("Closing channel for MessageID %d", messageID) + close(msgCtx.responses) + delete(l.messageContexts, messageID) + } + close(l.chanMessageID) + close(l.chanConfirm) + }() + + var messageID int64 = 1 + for { + select { + case l.chanMessageID <- messageID: + messageID++ + case message := <-l.chanMessage: + switch message.Op { + case MessageQuit: + l.Debug.Printf("Shutting down - quit message received") + return + case MessageRequest: + // Add to message list and write to network + l.Debug.Printf("Sending message %d", message.MessageID) + + buf := message.Packet.Bytes() + _, err := l.conn.Write(buf) + if err != nil { + l.Debug.Printf("Error Sending Message: %s", err.Error()) + message.Context.sendResponse(&PacketResponse{Error: fmt.Errorf("unable to send request: %s", err)}) + close(message.Context.responses) + break + } + + // Only add to messageContexts if we were able to + // successfully write the message. + l.messageContexts[message.MessageID] = message.Context + + // Add timeout if defined + requestTimeout := time.Duration(atomic.LoadInt64(&l.requestTimeout)) + if requestTimeout > 0 { + go func() { + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in RequestTimeout: %v", err) + } + }() + time.Sleep(requestTimeout) + timeoutMessage := &messagePacket{ + Op: MessageTimeout, + MessageID: message.MessageID, + } + l.sendProcessMessage(timeoutMessage) + }() + } + case MessageResponse: + l.Debug.Printf("Receiving message %d", message.MessageID) + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + msgCtx.sendResponse(&PacketResponse{message.Packet, nil}) + } else { + log.Printf("Received unexpected message %d, %v", message.MessageID, l.IsClosing()) + ber.PrintPacket(message.Packet) + } + case MessageTimeout: + // Handle the timeout by closing the channel + // All reads will return immediately + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + l.Debug.Printf("Receiving message timeout for %d", message.MessageID) + msgCtx.sendResponse(&PacketResponse{message.Packet, errors.New("ldap: connection timed out")}) + delete(l.messageContexts, message.MessageID) + close(msgCtx.responses) + } + case MessageFinish: + l.Debug.Printf("Finished message %d", message.MessageID) + if msgCtx, ok := l.messageContexts[message.MessageID]; ok { + delete(l.messageContexts, message.MessageID) + close(msgCtx.responses) + } + } + } + } +} + +func (l *Conn) reader() { + cleanstop := false + defer func() { + if err := recover(); err != nil { + log.Printf("ldap: recovered panic in reader: %v", err) + } + if !cleanstop { + l.Close() + } + }() + + for { + if cleanstop { + l.Debug.Printf("reader clean stopping (without closing the connection)") + return + } + packet, err := ber.ReadPacket(l.conn) + if err != nil { + // A read error is expected here if we are closing the connection... + if !l.IsClosing() { + l.closeErr.Store(fmt.Errorf("unable to read LDAP response packet: %s", err)) + l.Debug.Printf("reader error: %s", err.Error()) + } + return + } + addLDAPDescriptions(packet) + if len(packet.Children) == 0 { + l.Debug.Printf("Received bad ldap packet") + continue + } + l.messageMutex.Lock() + if l.isStartingTLS { + cleanstop = true + } + l.messageMutex.Unlock() + message := &messagePacket{ + Op: MessageResponse, + MessageID: packet.Children[0].Value.(int64), + Packet: packet, + } + if !l.sendProcessMessage(message) { + return + } + } +} diff --git a/vendor/github.com/go-ldap/ldap/control.go b/vendor/github.com/go-ldap/ldap/control.go new file mode 100644 index 0000000..3f18191 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/control.go @@ -0,0 +1,499 @@ +package ldap + +import ( + "fmt" + "strconv" + + "gopkg.in/asn1-ber.v1" +) + +const ( + // ControlTypePaging - https://www.ietf.org/rfc/rfc2696.txt + ControlTypePaging = "1.2.840.113556.1.4.319" + // ControlTypeBeheraPasswordPolicy - https://tools.ietf.org/html/draft-behera-ldap-password-policy-10 + ControlTypeBeheraPasswordPolicy = "1.3.6.1.4.1.42.2.27.8.5.1" + // ControlTypeVChuPasswordMustChange - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 + ControlTypeVChuPasswordMustChange = "2.16.840.1.113730.3.4.4" + // ControlTypeVChuPasswordWarning - https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 + ControlTypeVChuPasswordWarning = "2.16.840.1.113730.3.4.5" + // ControlTypeManageDsaIT - https://tools.ietf.org/html/rfc3296 + ControlTypeManageDsaIT = "2.16.840.1.113730.3.4.2" + + // ControlTypeMicrosoftNotification - https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx + ControlTypeMicrosoftNotification = "1.2.840.113556.1.4.528" + // ControlTypeMicrosoftShowDeleted - https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx + ControlTypeMicrosoftShowDeleted = "1.2.840.113556.1.4.417" +) + +// ControlTypeMap maps controls to text descriptions +var ControlTypeMap = map[string]string{ + ControlTypePaging: "Paging", + ControlTypeBeheraPasswordPolicy: "Password Policy - Behera Draft", + ControlTypeManageDsaIT: "Manage DSA IT", + ControlTypeMicrosoftNotification: "Change Notification - Microsoft", + ControlTypeMicrosoftShowDeleted: "Show Deleted Objects - Microsoft", +} + +// Control defines an interface controls provide to encode and describe themselves +type Control interface { + // GetControlType returns the OID + GetControlType() string + // Encode returns the ber packet representation + Encode() *ber.Packet + // String returns a human-readable description + String() string +} + +// ControlString implements the Control interface for simple controls +type ControlString struct { + ControlType string + Criticality bool + ControlValue string +} + +// GetControlType returns the OID +func (c *ControlString) GetControlType() string { + return c.ControlType +} + +// Encode returns the ber packet representation +func (c *ControlString) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, c.ControlType, "Control Type ("+ControlTypeMap[c.ControlType]+")")) + if c.Criticality { + packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality")) + } + if c.ControlValue != "" { + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, string(c.ControlValue), "Control Value")) + } + return packet +} + +// String returns a human-readable description +func (c *ControlString) String() string { + return fmt.Sprintf("Control Type: %s (%q) Criticality: %t Control Value: %s", ControlTypeMap[c.ControlType], c.ControlType, c.Criticality, c.ControlValue) +} + +// ControlPaging implements the paging control described in https://www.ietf.org/rfc/rfc2696.txt +type ControlPaging struct { + // PagingSize indicates the page size + PagingSize uint32 + // Cookie is an opaque value returned by the server to track a paging cursor + Cookie []byte +} + +// GetControlType returns the OID +func (c *ControlPaging) GetControlType() string { + return ControlTypePaging +} + +// Encode returns the ber packet representation +func (c *ControlPaging) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypePaging, "Control Type ("+ControlTypeMap[ControlTypePaging]+")")) + + p2 := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Control Value (Paging)") + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Search Control Value") + seq.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, int64(c.PagingSize), "Paging Size")) + cookie := ber.Encode(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, nil, "Cookie") + cookie.Value = c.Cookie + cookie.Data.Write(c.Cookie) + seq.AppendChild(cookie) + p2.AppendChild(seq) + + packet.AppendChild(p2) + return packet +} + +// String returns a human-readable description +func (c *ControlPaging) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t PagingSize: %d Cookie: %q", + ControlTypeMap[ControlTypePaging], + ControlTypePaging, + false, + c.PagingSize, + c.Cookie) +} + +// SetCookie stores the given cookie in the paging control +func (c *ControlPaging) SetCookie(cookie []byte) { + c.Cookie = cookie +} + +// ControlBeheraPasswordPolicy implements the control described in https://tools.ietf.org/html/draft-behera-ldap-password-policy-10 +type ControlBeheraPasswordPolicy struct { + // Expire contains the number of seconds before a password will expire + Expire int64 + // Grace indicates the remaining number of times a user will be allowed to authenticate with an expired password + Grace int64 + // Error indicates the error code + Error int8 + // ErrorString is a human readable error + ErrorString string +} + +// GetControlType returns the OID +func (c *ControlBeheraPasswordPolicy) GetControlType() string { + return ControlTypeBeheraPasswordPolicy +} + +// Encode returns the ber packet representation +func (c *ControlBeheraPasswordPolicy) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeBeheraPasswordPolicy, "Control Type ("+ControlTypeMap[ControlTypeBeheraPasswordPolicy]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlBeheraPasswordPolicy) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t Expire: %d Grace: %d Error: %d, ErrorString: %s", + ControlTypeMap[ControlTypeBeheraPasswordPolicy], + ControlTypeBeheraPasswordPolicy, + false, + c.Expire, + c.Grace, + c.Error, + c.ErrorString) +} + +// ControlVChuPasswordMustChange implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 +type ControlVChuPasswordMustChange struct { + // MustChange indicates if the password is required to be changed + MustChange bool +} + +// GetControlType returns the OID +func (c *ControlVChuPasswordMustChange) GetControlType() string { + return ControlTypeVChuPasswordMustChange +} + +// Encode returns the ber packet representation +func (c *ControlVChuPasswordMustChange) Encode() *ber.Packet { + return nil +} + +// String returns a human-readable description +func (c *ControlVChuPasswordMustChange) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t MustChange: %v", + ControlTypeMap[ControlTypeVChuPasswordMustChange], + ControlTypeVChuPasswordMustChange, + false, + c.MustChange) +} + +// ControlVChuPasswordWarning implements the control described in https://tools.ietf.org/html/draft-vchu-ldap-pwd-policy-00 +type ControlVChuPasswordWarning struct { + // Expire indicates the time in seconds until the password expires + Expire int64 +} + +// GetControlType returns the OID +func (c *ControlVChuPasswordWarning) GetControlType() string { + return ControlTypeVChuPasswordWarning +} + +// Encode returns the ber packet representation +func (c *ControlVChuPasswordWarning) Encode() *ber.Packet { + return nil +} + +// String returns a human-readable description +func (c *ControlVChuPasswordWarning) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t Expire: %b", + ControlTypeMap[ControlTypeVChuPasswordWarning], + ControlTypeVChuPasswordWarning, + false, + c.Expire) +} + +// ControlManageDsaIT implements the control described in https://tools.ietf.org/html/rfc3296 +type ControlManageDsaIT struct { + // Criticality indicates if this control is required + Criticality bool +} + +// GetControlType returns the OID +func (c *ControlManageDsaIT) GetControlType() string { + return ControlTypeManageDsaIT +} + +// Encode returns the ber packet representation +func (c *ControlManageDsaIT) Encode() *ber.Packet { + //FIXME + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeManageDsaIT, "Control Type ("+ControlTypeMap[ControlTypeManageDsaIT]+")")) + if c.Criticality { + packet.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, c.Criticality, "Criticality")) + } + return packet +} + +// String returns a human-readable description +func (c *ControlManageDsaIT) String() string { + return fmt.Sprintf( + "Control Type: %s (%q) Criticality: %t", + ControlTypeMap[ControlTypeManageDsaIT], + ControlTypeManageDsaIT, + c.Criticality) +} + +// NewControlManageDsaIT returns a ControlManageDsaIT control +func NewControlManageDsaIT(Criticality bool) *ControlManageDsaIT { + return &ControlManageDsaIT{Criticality: Criticality} +} + +// ControlMicrosoftNotification implements the control described in https://msdn.microsoft.com/en-us/library/aa366983(v=vs.85).aspx +type ControlMicrosoftNotification struct{} + +// GetControlType returns the OID +func (c *ControlMicrosoftNotification) GetControlType() string { + return ControlTypeMicrosoftNotification +} + +// Encode returns the ber packet representation +func (c *ControlMicrosoftNotification) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftNotification, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftNotification]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlMicrosoftNotification) String() string { + return fmt.Sprintf( + "Control Type: %s (%q)", + ControlTypeMap[ControlTypeMicrosoftNotification], + ControlTypeMicrosoftNotification) +} + +// NewControlMicrosoftNotification returns a ControlMicrosoftNotification control +func NewControlMicrosoftNotification() *ControlMicrosoftNotification { + return &ControlMicrosoftNotification{} +} + +// ControlMicrosoftShowDeleted implements the control described in https://msdn.microsoft.com/en-us/library/aa366989(v=vs.85).aspx +type ControlMicrosoftShowDeleted struct{} + +// GetControlType returns the OID +func (c *ControlMicrosoftShowDeleted) GetControlType() string { + return ControlTypeMicrosoftShowDeleted +} + +// Encode returns the ber packet representation +func (c *ControlMicrosoftShowDeleted) Encode() *ber.Packet { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Control") + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, ControlTypeMicrosoftShowDeleted, "Control Type ("+ControlTypeMap[ControlTypeMicrosoftShowDeleted]+")")) + + return packet +} + +// String returns a human-readable description +func (c *ControlMicrosoftShowDeleted) String() string { + return fmt.Sprintf( + "Control Type: %s (%q)", + ControlTypeMap[ControlTypeMicrosoftShowDeleted], + ControlTypeMicrosoftShowDeleted) +} + +// NewControlMicrosoftShowDeleted returns a ControlMicrosoftShowDeleted control +func NewControlMicrosoftShowDeleted() *ControlMicrosoftShowDeleted { + return &ControlMicrosoftShowDeleted{} +} + +// FindControl returns the first control of the given type in the list, or nil +func FindControl(controls []Control, controlType string) Control { + for _, c := range controls { + if c.GetControlType() == controlType { + return c + } + } + return nil +} + +// DecodeControl returns a control read from the given packet, or nil if no recognized control can be made +func DecodeControl(packet *ber.Packet) (Control, error) { + var ( + ControlType = "" + Criticality = false + value *ber.Packet + ) + + switch len(packet.Children) { + case 0: + // at least one child is required for control type + return nil, fmt.Errorf("at least one child is required for control type") + + case 1: + // just type, no criticality or value + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + case 2: + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + // Children[1] could be criticality or value (both are optional) + // duck-type on whether this is a boolean + if _, ok := packet.Children[1].Value.(bool); ok { + packet.Children[1].Description = "Criticality" + Criticality = packet.Children[1].Value.(bool) + } else { + packet.Children[1].Description = "Control Value" + value = packet.Children[1] + } + + case 3: + packet.Children[0].Description = "Control Type (" + ControlTypeMap[ControlType] + ")" + ControlType = packet.Children[0].Value.(string) + + packet.Children[1].Description = "Criticality" + Criticality = packet.Children[1].Value.(bool) + + packet.Children[2].Description = "Control Value" + value = packet.Children[2] + + default: + // more than 3 children is invalid + return nil, fmt.Errorf("more than 3 children is invalid for controls") + } + + switch ControlType { + case ControlTypeManageDsaIT: + return NewControlManageDsaIT(Criticality), nil + case ControlTypePaging: + value.Description += " (Paging)" + c := new(ControlPaging) + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + value = value.Children[0] + value.Description = "Search Control Value" + value.Children[0].Description = "Paging Size" + value.Children[1].Description = "Cookie" + c.PagingSize = uint32(value.Children[0].Value.(int64)) + c.Cookie = value.Children[1].Data.Bytes() + value.Children[1].Value = c.Cookie + return c, nil + case ControlTypeBeheraPasswordPolicy: + value.Description += " (Password Policy - Behera)" + c := NewControlBeheraPasswordPolicy() + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + + sequence := value.Children[0] + + for _, child := range sequence.Children { + if child.Tag == 0 { + //Warning + warningPacket := child.Children[0] + packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int64) + if ok { + if warningPacket.Tag == 0 { + //timeBeforeExpiration + c.Expire = val + warningPacket.Value = c.Expire + } else if warningPacket.Tag == 1 { + //graceAuthNsRemaining + c.Grace = val + warningPacket.Value = c.Grace + } + } + } else if child.Tag == 1 { + // Error + packet, err := ber.DecodePacketErr(child.Data.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int8) + if !ok { + // what to do? + val = -1 + } + c.Error = val + child.Value = c.Error + c.ErrorString = BeheraPasswordPolicyErrorMap[c.Error] + } + } + return c, nil + case ControlTypeVChuPasswordMustChange: + c := &ControlVChuPasswordMustChange{MustChange: true} + return c, nil + case ControlTypeVChuPasswordWarning: + c := &ControlVChuPasswordWarning{Expire: -1} + expireStr := ber.DecodeString(value.Data.Bytes()) + + expire, err := strconv.ParseInt(expireStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse value as int: %s", err) + } + c.Expire = expire + value.Value = c.Expire + + return c, nil + case ControlTypeMicrosoftNotification: + return NewControlMicrosoftNotification(), nil + case ControlTypeMicrosoftShowDeleted: + return NewControlMicrosoftShowDeleted(), nil + default: + c := new(ControlString) + c.ControlType = ControlType + c.Criticality = Criticality + if value != nil { + c.ControlValue = value.Value.(string) + } + return c, nil + } +} + +// NewControlString returns a generic control +func NewControlString(controlType string, criticality bool, controlValue string) *ControlString { + return &ControlString{ + ControlType: controlType, + Criticality: criticality, + ControlValue: controlValue, + } +} + +// NewControlPaging returns a paging control +func NewControlPaging(pagingSize uint32) *ControlPaging { + return &ControlPaging{PagingSize: pagingSize} +} + +// NewControlBeheraPasswordPolicy returns a ControlBeheraPasswordPolicy +func NewControlBeheraPasswordPolicy() *ControlBeheraPasswordPolicy { + return &ControlBeheraPasswordPolicy{ + Expire: -1, + Grace: -1, + Error: -1, + } +} + +func encodeControls(controls []Control) *ber.Packet { + packet := ber.Encode(ber.ClassContext, ber.TypeConstructed, 0, nil, "Controls") + for _, control := range controls { + packet.AppendChild(control.Encode()) + } + return packet +} diff --git a/vendor/github.com/go-ldap/ldap/debug.go b/vendor/github.com/go-ldap/ldap/debug.go new file mode 100644 index 0000000..7279fc2 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/debug.go @@ -0,0 +1,24 @@ +package ldap + +import ( + "log" + + "gopkg.in/asn1-ber.v1" +) + +// debugging type +// - has a Printf method to write the debug output +type debugging bool + +// write debug output +func (debug debugging) Printf(format string, args ...interface{}) { + if debug { + log.Printf(format, args...) + } +} + +func (debug debugging) PrintPacket(packet *ber.Packet) { + if debug { + ber.PrintPacket(packet) + } +} diff --git a/vendor/github.com/go-ldap/ldap/del.go b/vendor/github.com/go-ldap/ldap/del.go new file mode 100644 index 0000000..6f78beb --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/del.go @@ -0,0 +1,84 @@ +// +// https://tools.ietf.org/html/rfc4511 +// +// DelRequest ::= [APPLICATION 10] LDAPDN + +package ldap + +import ( + "errors" + "log" + + "gopkg.in/asn1-ber.v1" +) + +// DelRequest implements an LDAP deletion request +type DelRequest struct { + // DN is the name of the directory entry to delete + DN string + // Controls hold optional controls to send with the request + Controls []Control +} + +func (d DelRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypePrimitive, ApplicationDelRequest, d.DN, "Del Request") + request.Data.Write([]byte(d.DN)) + return request +} + +// NewDelRequest creates a delete request for the given DN and controls +func NewDelRequest(DN string, + Controls []Control) *DelRequest { + return &DelRequest{ + DN: DN, + Controls: Controls, + } +} + +// Del executes the given delete request +func (l *Conn) Del(delRequest *DelRequest) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + packet.AppendChild(delRequest.encode()) + if len(delRequest.Controls) > 0 { + packet.AppendChild(encodeControls(delRequest.Controls)) + } + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationDelResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + + l.Debug.Printf("%d: returning", msgCtx.id) + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/dn.go b/vendor/github.com/go-ldap/ldap/dn.go new file mode 100644 index 0000000..f89e73a --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/dn.go @@ -0,0 +1,247 @@ +// File contains DN parsing functionality +// +// https://tools.ietf.org/html/rfc4514 +// +// distinguishedName = [ relativeDistinguishedName +// *( COMMA relativeDistinguishedName ) ] +// relativeDistinguishedName = attributeTypeAndValue +// *( PLUS attributeTypeAndValue ) +// attributeTypeAndValue = attributeType EQUALS attributeValue +// attributeType = descr / numericoid +// attributeValue = string / hexstring +// +// ; The following characters are to be escaped when they appear +// ; in the value to be encoded: ESC, one of , leading +// ; SHARP or SPACE, trailing SPACE, and NULL. +// string = [ ( leadchar / pair ) [ *( stringchar / pair ) +// ( trailchar / pair ) ] ] +// +// leadchar = LUTF1 / UTFMB +// LUTF1 = %x01-1F / %x21 / %x24-2A / %x2D-3A / +// %x3D / %x3F-5B / %x5D-7F +// +// trailchar = TUTF1 / UTFMB +// TUTF1 = %x01-1F / %x21 / %x23-2A / %x2D-3A / +// %x3D / %x3F-5B / %x5D-7F +// +// stringchar = SUTF1 / UTFMB +// SUTF1 = %x01-21 / %x23-2A / %x2D-3A / +// %x3D / %x3F-5B / %x5D-7F +// +// pair = ESC ( ESC / special / hexpair ) +// special = escaped / SPACE / SHARP / EQUALS +// escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE +// hexstring = SHARP 1*hexpair +// hexpair = HEX HEX +// +// where the productions , , , , +// , , , , , , , , +// , , and are defined in [RFC4512]. +// + +package ldap + +import ( + "bytes" + enchex "encoding/hex" + "errors" + "fmt" + "strings" + + "gopkg.in/asn1-ber.v1" +) + +// AttributeTypeAndValue represents an attributeTypeAndValue from https://tools.ietf.org/html/rfc4514 +type AttributeTypeAndValue struct { + // Type is the attribute type + Type string + // Value is the attribute value + Value string +} + +// RelativeDN represents a relativeDistinguishedName from https://tools.ietf.org/html/rfc4514 +type RelativeDN struct { + Attributes []*AttributeTypeAndValue +} + +// DN represents a distinguishedName from https://tools.ietf.org/html/rfc4514 +type DN struct { + RDNs []*RelativeDN +} + +// ParseDN returns a distinguishedName or an error +func ParseDN(str string) (*DN, error) { + dn := new(DN) + dn.RDNs = make([]*RelativeDN, 0) + rdn := new(RelativeDN) + rdn.Attributes = make([]*AttributeTypeAndValue, 0) + buffer := bytes.Buffer{} + attribute := new(AttributeTypeAndValue) + escaping := false + + unescapedTrailingSpaces := 0 + stringFromBuffer := func() string { + s := buffer.String() + s = s[0 : len(s)-unescapedTrailingSpaces] + buffer.Reset() + unescapedTrailingSpaces = 0 + return s + } + + for i := 0; i < len(str); i++ { + char := str[i] + switch { + case escaping: + unescapedTrailingSpaces = 0 + escaping = false + switch char { + case ' ', '"', '#', '+', ',', ';', '<', '=', '>', '\\': + buffer.WriteByte(char) + continue + } + // Not a special character, assume hex encoded octet + if len(str) == i+1 { + return nil, errors.New("got corrupted escaped character") + } + + dst := []byte{0} + n, err := enchex.Decode([]byte(dst), []byte(str[i:i+2])) + if err != nil { + return nil, fmt.Errorf("failed to decode escaped character: %s", err) + } else if n != 1 { + return nil, fmt.Errorf("expected 1 byte when un-escaping, got %d", n) + } + buffer.WriteByte(dst[0]) + i++ + case char == '\\': + unescapedTrailingSpaces = 0 + escaping = true + case char == '=': + attribute.Type = stringFromBuffer() + // Special case: If the first character in the value is # the + // following data is BER encoded so we can just fast forward + // and decode. + if len(str) > i+1 && str[i+1] == '#' { + i += 2 + index := strings.IndexAny(str[i:], ",+") + data := str + if index > 0 { + data = str[i : i+index] + } else { + data = str[i:] + } + rawBER, err := enchex.DecodeString(data) + if err != nil { + return nil, fmt.Errorf("failed to decode BER encoding: %s", err) + } + packet, err := ber.DecodePacketErr(rawBER) + if err != nil { + return nil, fmt.Errorf("failed to decode BER packet: %s", err) + } + buffer.WriteString(packet.Data.String()) + i += len(data) - 1 + } + case char == ',' || char == '+': + // We're done with this RDN or value, push it + if len(attribute.Type) == 0 { + return nil, errors.New("incomplete type, value pair") + } + attribute.Value = stringFromBuffer() + rdn.Attributes = append(rdn.Attributes, attribute) + attribute = new(AttributeTypeAndValue) + if char == ',' { + dn.RDNs = append(dn.RDNs, rdn) + rdn = new(RelativeDN) + rdn.Attributes = make([]*AttributeTypeAndValue, 0) + } + case char == ' ' && buffer.Len() == 0: + // ignore unescaped leading spaces + continue + default: + if char == ' ' { + // Track unescaped spaces in case they are trailing and we need to remove them + unescapedTrailingSpaces++ + } else { + // Reset if we see a non-space char + unescapedTrailingSpaces = 0 + } + buffer.WriteByte(char) + } + } + if buffer.Len() > 0 { + if len(attribute.Type) == 0 { + return nil, errors.New("DN ended with incomplete type, value pair") + } + attribute.Value = stringFromBuffer() + rdn.Attributes = append(rdn.Attributes, attribute) + dn.RDNs = append(dn.RDNs, rdn) + } + return dn, nil +} + +// Equal returns true if the DNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Returns true if they have the same number of relative distinguished names +// and corresponding relative distinguished names (by position) are the same. +func (d *DN) Equal(other *DN) bool { + if len(d.RDNs) != len(other.RDNs) { + return false + } + for i := range d.RDNs { + if !d.RDNs[i].Equal(other.RDNs[i]) { + return false + } + } + return true +} + +// AncestorOf returns true if the other DN consists of at least one RDN followed by all the RDNs of the current DN. +// "ou=widgets,o=acme.com" is an ancestor of "ou=sprockets,ou=widgets,o=acme.com" +// "ou=widgets,o=acme.com" is not an ancestor of "ou=sprockets,ou=widgets,o=foo.com" +// "ou=widgets,o=acme.com" is not an ancestor of "ou=widgets,o=acme.com" +func (d *DN) AncestorOf(other *DN) bool { + if len(d.RDNs) >= len(other.RDNs) { + return false + } + // Take the last `len(d.RDNs)` RDNs from the other DN to compare against + otherRDNs := other.RDNs[len(other.RDNs)-len(d.RDNs):] + for i := range d.RDNs { + if !d.RDNs[i].Equal(otherRDNs[i]) { + return false + } + } + return true +} + +// Equal returns true if the RelativeDNs are equal as defined by rfc4517 4.2.15 (distinguishedNameMatch). +// Relative distinguished names are the same if and only if they have the same number of AttributeTypeAndValues +// and each attribute of the first RDN is the same as the attribute of the second RDN with the same attribute type. +// The order of attributes is not significant. +// Case of attribute types is not significant. +func (r *RelativeDN) Equal(other *RelativeDN) bool { + if len(r.Attributes) != len(other.Attributes) { + return false + } + return r.hasAllAttributes(other.Attributes) && other.hasAllAttributes(r.Attributes) +} + +func (r *RelativeDN) hasAllAttributes(attrs []*AttributeTypeAndValue) bool { + for _, attr := range attrs { + found := false + for _, myattr := range r.Attributes { + if myattr.Equal(attr) { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// Equal returns true if the AttributeTypeAndValue is equivalent to the specified AttributeTypeAndValue +// Case of the attribute type is not significant +func (a *AttributeTypeAndValue) Equal(other *AttributeTypeAndValue) bool { + return strings.EqualFold(a.Type, other.Type) && a.Value == other.Value +} diff --git a/vendor/github.com/go-ldap/ldap/doc.go b/vendor/github.com/go-ldap/ldap/doc.go new file mode 100644 index 0000000..f20d39b --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/doc.go @@ -0,0 +1,4 @@ +/* +Package ldap provides basic LDAP v3 functionality. +*/ +package ldap diff --git a/vendor/github.com/go-ldap/ldap/error.go b/vendor/github.com/go-ldap/ldap/error.go new file mode 100644 index 0000000..639ed82 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/error.go @@ -0,0 +1,234 @@ +package ldap + +import ( + "fmt" + + "gopkg.in/asn1-ber.v1" +) + +// LDAP Result Codes +const ( + LDAPResultSuccess = 0 + LDAPResultOperationsError = 1 + LDAPResultProtocolError = 2 + LDAPResultTimeLimitExceeded = 3 + LDAPResultSizeLimitExceeded = 4 + LDAPResultCompareFalse = 5 + LDAPResultCompareTrue = 6 + LDAPResultAuthMethodNotSupported = 7 + LDAPResultStrongAuthRequired = 8 + LDAPResultReferral = 10 + LDAPResultAdminLimitExceeded = 11 + LDAPResultUnavailableCriticalExtension = 12 + LDAPResultConfidentialityRequired = 13 + LDAPResultSaslBindInProgress = 14 + LDAPResultNoSuchAttribute = 16 + LDAPResultUndefinedAttributeType = 17 + LDAPResultInappropriateMatching = 18 + LDAPResultConstraintViolation = 19 + LDAPResultAttributeOrValueExists = 20 + LDAPResultInvalidAttributeSyntax = 21 + LDAPResultNoSuchObject = 32 + LDAPResultAliasProblem = 33 + LDAPResultInvalidDNSyntax = 34 + LDAPResultIsLeaf = 35 + LDAPResultAliasDereferencingProblem = 36 + LDAPResultInappropriateAuthentication = 48 + LDAPResultInvalidCredentials = 49 + LDAPResultInsufficientAccessRights = 50 + LDAPResultBusy = 51 + LDAPResultUnavailable = 52 + LDAPResultUnwillingToPerform = 53 + LDAPResultLoopDetect = 54 + LDAPResultSortControlMissing = 60 + LDAPResultOffsetRangeError = 61 + LDAPResultNamingViolation = 64 + LDAPResultObjectClassViolation = 65 + LDAPResultNotAllowedOnNonLeaf = 66 + LDAPResultNotAllowedOnRDN = 67 + LDAPResultEntryAlreadyExists = 68 + LDAPResultObjectClassModsProhibited = 69 + LDAPResultResultsTooLarge = 70 + LDAPResultAffectsMultipleDSAs = 71 + LDAPResultVirtualListViewErrorOrControlError = 76 + LDAPResultOther = 80 + LDAPResultServerDown = 81 + LDAPResultLocalError = 82 + LDAPResultEncodingError = 83 + LDAPResultDecodingError = 84 + LDAPResultTimeout = 85 + LDAPResultAuthUnknown = 86 + LDAPResultFilterError = 87 + LDAPResultUserCanceled = 88 + LDAPResultParamError = 89 + LDAPResultNoMemory = 90 + LDAPResultConnectError = 91 + LDAPResultNotSupported = 92 + LDAPResultControlNotFound = 93 + LDAPResultNoResultsReturned = 94 + LDAPResultMoreResultsToReturn = 95 + LDAPResultClientLoop = 96 + LDAPResultReferralLimitExceeded = 97 + LDAPResultInvalidResponse = 100 + LDAPResultAmbiguousResponse = 101 + LDAPResultTLSNotSupported = 112 + LDAPResultIntermediateResponse = 113 + LDAPResultUnknownType = 114 + LDAPResultCanceled = 118 + LDAPResultNoSuchOperation = 119 + LDAPResultTooLate = 120 + LDAPResultCannotCancel = 121 + LDAPResultAssertionFailed = 122 + LDAPResultAuthorizationDenied = 123 + LDAPResultSyncRefreshRequired = 4096 + + ErrorNetwork = 200 + ErrorFilterCompile = 201 + ErrorFilterDecompile = 202 + ErrorDebugging = 203 + ErrorUnexpectedMessage = 204 + ErrorUnexpectedResponse = 205 + ErrorEmptyPassword = 206 +) + +// LDAPResultCodeMap contains string descriptions for LDAP error codes +var LDAPResultCodeMap = map[uint16]string{ + LDAPResultSuccess: "Success", + LDAPResultOperationsError: "Operations Error", + LDAPResultProtocolError: "Protocol Error", + LDAPResultTimeLimitExceeded: "Time Limit Exceeded", + LDAPResultSizeLimitExceeded: "Size Limit Exceeded", + LDAPResultCompareFalse: "Compare False", + LDAPResultCompareTrue: "Compare True", + LDAPResultAuthMethodNotSupported: "Auth Method Not Supported", + LDAPResultStrongAuthRequired: "Strong Auth Required", + LDAPResultReferral: "Referral", + LDAPResultAdminLimitExceeded: "Admin Limit Exceeded", + LDAPResultUnavailableCriticalExtension: "Unavailable Critical Extension", + LDAPResultConfidentialityRequired: "Confidentiality Required", + LDAPResultSaslBindInProgress: "Sasl Bind In Progress", + LDAPResultNoSuchAttribute: "No Such Attribute", + LDAPResultUndefinedAttributeType: "Undefined Attribute Type", + LDAPResultInappropriateMatching: "Inappropriate Matching", + LDAPResultConstraintViolation: "Constraint Violation", + LDAPResultAttributeOrValueExists: "Attribute Or Value Exists", + LDAPResultInvalidAttributeSyntax: "Invalid Attribute Syntax", + LDAPResultNoSuchObject: "No Such Object", + LDAPResultAliasProblem: "Alias Problem", + LDAPResultInvalidDNSyntax: "Invalid DN Syntax", + LDAPResultIsLeaf: "Is Leaf", + LDAPResultAliasDereferencingProblem: "Alias Dereferencing Problem", + LDAPResultInappropriateAuthentication: "Inappropriate Authentication", + LDAPResultInvalidCredentials: "Invalid Credentials", + LDAPResultInsufficientAccessRights: "Insufficient Access Rights", + LDAPResultBusy: "Busy", + LDAPResultUnavailable: "Unavailable", + LDAPResultUnwillingToPerform: "Unwilling To Perform", + LDAPResultLoopDetect: "Loop Detect", + LDAPResultSortControlMissing: "Sort Control Missing", + LDAPResultOffsetRangeError: "Result Offset Range Error", + LDAPResultNamingViolation: "Naming Violation", + LDAPResultObjectClassViolation: "Object Class Violation", + LDAPResultResultsTooLarge: "Results Too Large", + LDAPResultNotAllowedOnNonLeaf: "Not Allowed On Non Leaf", + LDAPResultNotAllowedOnRDN: "Not Allowed On RDN", + LDAPResultEntryAlreadyExists: "Entry Already Exists", + LDAPResultObjectClassModsProhibited: "Object Class Mods Prohibited", + LDAPResultAffectsMultipleDSAs: "Affects Multiple DSAs", + LDAPResultVirtualListViewErrorOrControlError: "Failed because of a problem related to the virtual list view", + LDAPResultOther: "Other", + LDAPResultServerDown: "Cannot establish a connection", + LDAPResultLocalError: "An error occurred", + LDAPResultEncodingError: "LDAP encountered an error while encoding", + LDAPResultDecodingError: "LDAP encountered an error while decoding", + LDAPResultTimeout: "LDAP timeout while waiting for a response from the server", + LDAPResultAuthUnknown: "The auth method requested in a bind request is unknown", + LDAPResultFilterError: "An error occurred while encoding the given search filter", + LDAPResultUserCanceled: "The user canceled the operation", + LDAPResultParamError: "An invalid parameter was specified", + LDAPResultNoMemory: "Out of memory error", + LDAPResultConnectError: "A connection to the server could not be established", + LDAPResultNotSupported: "An attempt has been made to use a feature not supported LDAP", + LDAPResultControlNotFound: "The controls required to perform the requested operation were not found", + LDAPResultNoResultsReturned: "No results were returned from the server", + LDAPResultMoreResultsToReturn: "There are more results in the chain of results", + LDAPResultClientLoop: "A loop has been detected. For example when following referrals", + LDAPResultReferralLimitExceeded: "The referral hop limit has been exceeded", + LDAPResultCanceled: "Operation was canceled", + LDAPResultNoSuchOperation: "Server has no knowledge of the operation requested for cancellation", + LDAPResultTooLate: "Too late to cancel the outstanding operation", + LDAPResultCannotCancel: "The identified operation does not support cancellation or the cancel operation cannot be performed", + LDAPResultAssertionFailed: "An assertion control given in the LDAP operation evaluated to false causing the operation to not be performed", + LDAPResultSyncRefreshRequired: "Refresh Required", + LDAPResultInvalidResponse: "Invalid Response", + LDAPResultAmbiguousResponse: "Ambiguous Response", + LDAPResultTLSNotSupported: "Tls Not Supported", + LDAPResultIntermediateResponse: "Intermediate Response", + LDAPResultUnknownType: "Unknown Type", + LDAPResultAuthorizationDenied: "Authorization Denied", + + ErrorNetwork: "Network Error", + ErrorFilterCompile: "Filter Compile Error", + ErrorFilterDecompile: "Filter Decompile Error", + ErrorDebugging: "Debugging Error", + ErrorUnexpectedMessage: "Unexpected Message", + ErrorUnexpectedResponse: "Unexpected Response", + ErrorEmptyPassword: "Empty password not allowed by the client", +} + +// Error holds LDAP error information +type Error struct { + // Err is the underlying error + Err error + // ResultCode is the LDAP error code + ResultCode uint16 + // MatchedDN is the matchedDN returned if any + MatchedDN string +} + +func (e *Error) Error() string { + return fmt.Sprintf("LDAP Result Code %d %q: %s", e.ResultCode, LDAPResultCodeMap[e.ResultCode], e.Err.Error()) +} + +// GetLDAPError creates an Error out of a BER packet representing a LDAPResult +// The return is an error object. It can be casted to a Error structure. +// This function returns nil if resultCode in the LDAPResult sequence is success(0). +func GetLDAPError(packet *ber.Packet) error { + if packet == nil { + return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty packet")} + } else if len(packet.Children) >= 2 { + response := packet.Children[1] + if response == nil { + return &Error{ResultCode: ErrorUnexpectedResponse, Err: fmt.Errorf("Empty response in packet")} + } + if response.ClassType == ber.ClassApplication && response.TagType == ber.TypeConstructed && len(response.Children) >= 3 { + resultCode := uint16(response.Children[0].Value.(int64)) + if resultCode == 0 { // No error + return nil + } + return &Error{ResultCode: resultCode, MatchedDN: response.Children[1].Value.(string), + Err: fmt.Errorf("%s", response.Children[2].Value.(string))} + } + } + + return &Error{ResultCode: ErrorNetwork, Err: fmt.Errorf("Invalid packet format")} +} + +// NewError creates an LDAP error with the given code and underlying error +func NewError(resultCode uint16, err error) error { + return &Error{ResultCode: resultCode, Err: err} +} + +// IsErrorWithCode returns true if the given error is an LDAP error with the given result code +func IsErrorWithCode(err error, desiredResultCode uint16) bool { + if err == nil { + return false + } + + serverError, ok := err.(*Error) + if !ok { + return false + } + + return serverError.ResultCode == desiredResultCode +} diff --git a/vendor/github.com/go-ldap/ldap/filter.go b/vendor/github.com/go-ldap/ldap/filter.go new file mode 100644 index 0000000..4cc4207 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/filter.go @@ -0,0 +1,465 @@ +package ldap + +import ( + "bytes" + hexpac "encoding/hex" + "errors" + "fmt" + "strings" + "unicode/utf8" + + "gopkg.in/asn1-ber.v1" +) + +// Filter choices +const ( + FilterAnd = 0 + FilterOr = 1 + FilterNot = 2 + FilterEqualityMatch = 3 + FilterSubstrings = 4 + FilterGreaterOrEqual = 5 + FilterLessOrEqual = 6 + FilterPresent = 7 + FilterApproxMatch = 8 + FilterExtensibleMatch = 9 +) + +// FilterMap contains human readable descriptions of Filter choices +var FilterMap = map[uint64]string{ + FilterAnd: "And", + FilterOr: "Or", + FilterNot: "Not", + FilterEqualityMatch: "Equality Match", + FilterSubstrings: "Substrings", + FilterGreaterOrEqual: "Greater Or Equal", + FilterLessOrEqual: "Less Or Equal", + FilterPresent: "Present", + FilterApproxMatch: "Approx Match", + FilterExtensibleMatch: "Extensible Match", +} + +// SubstringFilter options +const ( + FilterSubstringsInitial = 0 + FilterSubstringsAny = 1 + FilterSubstringsFinal = 2 +) + +// FilterSubstringsMap contains human readable descriptions of SubstringFilter choices +var FilterSubstringsMap = map[uint64]string{ + FilterSubstringsInitial: "Substrings Initial", + FilterSubstringsAny: "Substrings Any", + FilterSubstringsFinal: "Substrings Final", +} + +// MatchingRuleAssertion choices +const ( + MatchingRuleAssertionMatchingRule = 1 + MatchingRuleAssertionType = 2 + MatchingRuleAssertionMatchValue = 3 + MatchingRuleAssertionDNAttributes = 4 +) + +// MatchingRuleAssertionMap contains human readable descriptions of MatchingRuleAssertion choices +var MatchingRuleAssertionMap = map[uint64]string{ + MatchingRuleAssertionMatchingRule: "Matching Rule Assertion Matching Rule", + MatchingRuleAssertionType: "Matching Rule Assertion Type", + MatchingRuleAssertionMatchValue: "Matching Rule Assertion Match Value", + MatchingRuleAssertionDNAttributes: "Matching Rule Assertion DN Attributes", +} + +// CompileFilter converts a string representation of a filter into a BER-encoded packet +func CompileFilter(filter string) (*ber.Packet, error) { + if len(filter) == 0 || filter[0] != '(' { + return nil, NewError(ErrorFilterCompile, errors.New("ldap: filter does not start with an '('")) + } + packet, pos, err := compileFilter(filter, 1) + if err != nil { + return nil, err + } + switch { + case pos > len(filter): + return nil, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + case pos < len(filter): + return nil, NewError(ErrorFilterCompile, errors.New("ldap: finished compiling filter with extra at end: "+fmt.Sprint(filter[pos:]))) + } + return packet, nil +} + +// DecompileFilter converts a packet representation of a filter into a string representation +func DecompileFilter(packet *ber.Packet) (ret string, err error) { + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorFilterDecompile, errors.New("ldap: error decompiling filter")) + } + }() + ret = "(" + err = nil + childStr := "" + + switch packet.Tag { + case FilterAnd: + ret += "&" + for _, child := range packet.Children { + childStr, err = DecompileFilter(child) + if err != nil { + return + } + ret += childStr + } + case FilterOr: + ret += "|" + for _, child := range packet.Children { + childStr, err = DecompileFilter(child) + if err != nil { + return + } + ret += childStr + } + case FilterNot: + ret += "!" + childStr, err = DecompileFilter(packet.Children[0]) + if err != nil { + return + } + ret += childStr + + case FilterSubstrings: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "=" + for i, child := range packet.Children[1].Children { + if i == 0 && child.Tag != FilterSubstringsInitial { + ret += "*" + } + ret += EscapeFilter(ber.DecodeString(child.Data.Bytes())) + if child.Tag != FilterSubstringsFinal { + ret += "*" + } + } + case FilterEqualityMatch: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterGreaterOrEqual: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += ">=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterLessOrEqual: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "<=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterPresent: + ret += ber.DecodeString(packet.Data.Bytes()) + ret += "=*" + case FilterApproxMatch: + ret += ber.DecodeString(packet.Children[0].Data.Bytes()) + ret += "~=" + ret += EscapeFilter(ber.DecodeString(packet.Children[1].Data.Bytes())) + case FilterExtensibleMatch: + attr := "" + dnAttributes := false + matchingRule := "" + value := "" + + for _, child := range packet.Children { + switch child.Tag { + case MatchingRuleAssertionMatchingRule: + matchingRule = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionType: + attr = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionMatchValue: + value = ber.DecodeString(child.Data.Bytes()) + case MatchingRuleAssertionDNAttributes: + dnAttributes = child.Value.(bool) + } + } + + if len(attr) > 0 { + ret += attr + } + if dnAttributes { + ret += ":dn" + } + if len(matchingRule) > 0 { + ret += ":" + ret += matchingRule + } + ret += ":=" + ret += EscapeFilter(value) + } + + ret += ")" + return +} + +func compileFilterSet(filter string, pos int, parent *ber.Packet) (int, error) { + for pos < len(filter) && filter[pos] == '(' { + child, newPos, err := compileFilter(filter, pos+1) + if err != nil { + return pos, err + } + pos = newPos + parent.AppendChild(child) + } + if pos == len(filter) { + return pos, NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + } + + return pos + 1, nil +} + +func compileFilter(filter string, pos int) (*ber.Packet, int, error) { + var ( + packet *ber.Packet + err error + ) + + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorFilterCompile, errors.New("ldap: error compiling filter")) + } + }() + newPos := pos + + currentRune, currentWidth := utf8.DecodeRuneInString(filter[newPos:]) + + switch currentRune { + case utf8.RuneError: + return nil, 0, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos)) + case '(': + packet, newPos, err = compileFilter(filter, pos+currentWidth) + newPos++ + return packet, newPos, err + case '&': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterAnd, nil, FilterMap[FilterAnd]) + newPos, err = compileFilterSet(filter, pos+currentWidth, packet) + return packet, newPos, err + case '|': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterOr, nil, FilterMap[FilterOr]) + newPos, err = compileFilterSet(filter, pos+currentWidth, packet) + return packet, newPos, err + case '!': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterNot, nil, FilterMap[FilterNot]) + var child *ber.Packet + child, newPos, err = compileFilter(filter, pos+currentWidth) + packet.AppendChild(child) + return packet, newPos, err + default: + const ( + stateReadingAttr = 0 + stateReadingExtensibleMatchingRule = 1 + stateReadingCondition = 2 + ) + + state := stateReadingAttr + + attribute := "" + extensibleDNAttributes := false + extensibleMatchingRule := "" + condition := "" + + for newPos < len(filter) { + remainingFilter := filter[newPos:] + currentRune, currentWidth = utf8.DecodeRuneInString(remainingFilter) + if currentRune == ')' { + break + } + if currentRune == utf8.RuneError { + return packet, newPos, NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", newPos)) + } + + switch state { + case stateReadingAttr: + switch { + // Extensible rule, with only DN-matching + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + extensibleDNAttributes = true + state = stateReadingCondition + newPos += 5 + + // Extensible rule, with DN-matching and a matching OID + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":dn:"): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + extensibleDNAttributes = true + state = stateReadingExtensibleMatchingRule + newPos += 4 + + // Extensible rule, with attr only + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + state = stateReadingCondition + newPos += 2 + + // Extensible rule, with no DN attribute matching + case currentRune == ':': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterExtensibleMatch, nil, FilterMap[FilterExtensibleMatch]) + state = stateReadingExtensibleMatchingRule + newPos++ + + // Equality condition + case currentRune == '=': + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterEqualityMatch, nil, FilterMap[FilterEqualityMatch]) + state = stateReadingCondition + newPos++ + + // Greater-than or equal + case currentRune == '>' && strings.HasPrefix(remainingFilter, ">="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterGreaterOrEqual, nil, FilterMap[FilterGreaterOrEqual]) + state = stateReadingCondition + newPos += 2 + + // Less-than or equal + case currentRune == '<' && strings.HasPrefix(remainingFilter, "<="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterLessOrEqual, nil, FilterMap[FilterLessOrEqual]) + state = stateReadingCondition + newPos += 2 + + // Approx + case currentRune == '~' && strings.HasPrefix(remainingFilter, "~="): + packet = ber.Encode(ber.ClassContext, ber.TypeConstructed, FilterApproxMatch, nil, FilterMap[FilterApproxMatch]) + state = stateReadingCondition + newPos += 2 + + // Still reading the attribute name + default: + attribute += fmt.Sprintf("%c", currentRune) + newPos += currentWidth + } + + case stateReadingExtensibleMatchingRule: + switch { + + // Matching rule OID is done + case currentRune == ':' && strings.HasPrefix(remainingFilter, ":="): + state = stateReadingCondition + newPos += 2 + + // Still reading the matching rule oid + default: + extensibleMatchingRule += fmt.Sprintf("%c", currentRune) + newPos += currentWidth + } + + case stateReadingCondition: + // append to the condition + condition += fmt.Sprintf("%c", currentRune) + newPos += currentWidth + } + } + + if newPos == len(filter) { + err = NewError(ErrorFilterCompile, errors.New("ldap: unexpected end of filter")) + return packet, newPos, err + } + if packet == nil { + err = NewError(ErrorFilterCompile, errors.New("ldap: error parsing filter")) + return packet, newPos, err + } + + switch { + case packet.Tag == FilterExtensibleMatch: + // MatchingRuleAssertion ::= SEQUENCE { + // matchingRule [1] MatchingRuleID OPTIONAL, + // type [2] AttributeDescription OPTIONAL, + // matchValue [3] AssertionValue, + // dnAttributes [4] BOOLEAN DEFAULT FALSE + // } + + // Include the matching rule oid, if specified + if len(extensibleMatchingRule) > 0 { + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchingRule, extensibleMatchingRule, MatchingRuleAssertionMap[MatchingRuleAssertionMatchingRule])) + } + + // Include the attribute, if specified + if len(attribute) > 0 { + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionType, attribute, MatchingRuleAssertionMap[MatchingRuleAssertionType])) + } + + // Add the value (only required child) + encodedString, encodeErr := escapedStringToEncodedBytes(condition) + if encodeErr != nil { + return packet, newPos, encodeErr + } + packet.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionMatchValue, encodedString, MatchingRuleAssertionMap[MatchingRuleAssertionMatchValue])) + + // Defaults to false, so only include in the sequence if true + if extensibleDNAttributes { + packet.AppendChild(ber.NewBoolean(ber.ClassContext, ber.TypePrimitive, MatchingRuleAssertionDNAttributes, extensibleDNAttributes, MatchingRuleAssertionMap[MatchingRuleAssertionDNAttributes])) + } + + case packet.Tag == FilterEqualityMatch && condition == "*": + packet = ber.NewString(ber.ClassContext, ber.TypePrimitive, FilterPresent, attribute, FilterMap[FilterPresent]) + case packet.Tag == FilterEqualityMatch && strings.Contains(condition, "*"): + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) + packet.Tag = FilterSubstrings + packet.Description = FilterMap[uint64(packet.Tag)] + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Substrings") + parts := strings.Split(condition, "*") + for i, part := range parts { + if part == "" { + continue + } + var tag ber.Tag + switch i { + case 0: + tag = FilterSubstringsInitial + case len(parts) - 1: + tag = FilterSubstringsFinal + default: + tag = FilterSubstringsAny + } + encodedString, encodeErr := escapedStringToEncodedBytes(part) + if encodeErr != nil { + return packet, newPos, encodeErr + } + seq.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, tag, encodedString, FilterSubstringsMap[uint64(tag)])) + } + packet.AppendChild(seq) + default: + encodedString, encodeErr := escapedStringToEncodedBytes(condition) + if encodeErr != nil { + return packet, newPos, encodeErr + } + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) + packet.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, encodedString, "Condition")) + } + + newPos += currentWidth + return packet, newPos, err + } +} + +// Convert from "ABC\xx\xx\xx" form to literal bytes for transport +func escapedStringToEncodedBytes(escapedString string) (string, error) { + var buffer bytes.Buffer + i := 0 + for i < len(escapedString) { + currentRune, currentWidth := utf8.DecodeRuneInString(escapedString[i:]) + if currentRune == utf8.RuneError { + return "", NewError(ErrorFilterCompile, fmt.Errorf("ldap: error reading rune at position %d", i)) + } + + // Check for escaped hex characters and convert them to their literal value for transport. + if currentRune == '\\' { + // http://tools.ietf.org/search/rfc4515 + // \ (%x5C) is not a valid character unless it is followed by two HEX characters due to not + // being a member of UTF1SUBSET. + if i+2 > len(escapedString) { + return "", NewError(ErrorFilterCompile, errors.New("ldap: missing characters for escape in filter")) + } + escByte, decodeErr := hexpac.DecodeString(escapedString[i+1 : i+3]) + if decodeErr != nil { + return "", NewError(ErrorFilterCompile, errors.New("ldap: invalid characters for escape in filter")) + } + buffer.WriteByte(escByte[0]) + i += 2 // +1 from end of loop, so 3 total for \xx. + } else { + buffer.WriteRune(currentRune) + } + + i += currentWidth + } + return buffer.String(), nil +} diff --git a/vendor/github.com/go-ldap/ldap/ldap.go b/vendor/github.com/go-ldap/ldap/ldap.go new file mode 100644 index 0000000..d766667 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/ldap.go @@ -0,0 +1,338 @@ +package ldap + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + + "gopkg.in/asn1-ber.v1" +) + +// LDAP Application Codes +const ( + ApplicationBindRequest = 0 + ApplicationBindResponse = 1 + ApplicationUnbindRequest = 2 + ApplicationSearchRequest = 3 + ApplicationSearchResultEntry = 4 + ApplicationSearchResultDone = 5 + ApplicationModifyRequest = 6 + ApplicationModifyResponse = 7 + ApplicationAddRequest = 8 + ApplicationAddResponse = 9 + ApplicationDelRequest = 10 + ApplicationDelResponse = 11 + ApplicationModifyDNRequest = 12 + ApplicationModifyDNResponse = 13 + ApplicationCompareRequest = 14 + ApplicationCompareResponse = 15 + ApplicationAbandonRequest = 16 + ApplicationSearchResultReference = 19 + ApplicationExtendedRequest = 23 + ApplicationExtendedResponse = 24 +) + +// ApplicationMap contains human readable descriptions of LDAP Application Codes +var ApplicationMap = map[uint8]string{ + ApplicationBindRequest: "Bind Request", + ApplicationBindResponse: "Bind Response", + ApplicationUnbindRequest: "Unbind Request", + ApplicationSearchRequest: "Search Request", + ApplicationSearchResultEntry: "Search Result Entry", + ApplicationSearchResultDone: "Search Result Done", + ApplicationModifyRequest: "Modify Request", + ApplicationModifyResponse: "Modify Response", + ApplicationAddRequest: "Add Request", + ApplicationAddResponse: "Add Response", + ApplicationDelRequest: "Del Request", + ApplicationDelResponse: "Del Response", + ApplicationModifyDNRequest: "Modify DN Request", + ApplicationModifyDNResponse: "Modify DN Response", + ApplicationCompareRequest: "Compare Request", + ApplicationCompareResponse: "Compare Response", + ApplicationAbandonRequest: "Abandon Request", + ApplicationSearchResultReference: "Search Result Reference", + ApplicationExtendedRequest: "Extended Request", + ApplicationExtendedResponse: "Extended Response", +} + +// Ldap Behera Password Policy Draft 10 (https://tools.ietf.org/html/draft-behera-ldap-password-policy-10) +const ( + BeheraPasswordExpired = 0 + BeheraAccountLocked = 1 + BeheraChangeAfterReset = 2 + BeheraPasswordModNotAllowed = 3 + BeheraMustSupplyOldPassword = 4 + BeheraInsufficientPasswordQuality = 5 + BeheraPasswordTooShort = 6 + BeheraPasswordTooYoung = 7 + BeheraPasswordInHistory = 8 +) + +// BeheraPasswordPolicyErrorMap contains human readable descriptions of Behera Password Policy error codes +var BeheraPasswordPolicyErrorMap = map[int8]string{ + BeheraPasswordExpired: "Password expired", + BeheraAccountLocked: "Account locked", + BeheraChangeAfterReset: "Password must be changed", + BeheraPasswordModNotAllowed: "Policy prevents password modification", + BeheraMustSupplyOldPassword: "Policy requires old password in order to change password", + BeheraInsufficientPasswordQuality: "Password fails quality checks", + BeheraPasswordTooShort: "Password is too short for policy", + BeheraPasswordTooYoung: "Password has been changed too recently", + BeheraPasswordInHistory: "New password is in list of old passwords", +} + +// Adds descriptions to an LDAP Response packet for debugging +func addLDAPDescriptions(packet *ber.Packet) (err error) { + defer func() { + if r := recover(); r != nil { + err = NewError(ErrorDebugging, errors.New("ldap: cannot process packet to add descriptions")) + } + }() + packet.Description = "LDAP Response" + packet.Children[0].Description = "Message ID" + + application := uint8(packet.Children[1].Tag) + packet.Children[1].Description = ApplicationMap[application] + + switch application { + case ApplicationBindRequest: + err = addRequestDescriptions(packet) + case ApplicationBindResponse: + err = addDefaultLDAPResponseDescriptions(packet) + case ApplicationUnbindRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchResultEntry: + packet.Children[1].Children[0].Description = "Object Name" + packet.Children[1].Children[1].Description = "Attributes" + for _, child := range packet.Children[1].Children[1].Children { + child.Description = "Attribute" + child.Children[0].Description = "Attribute Name" + child.Children[1].Description = "Attribute Values" + for _, grandchild := range child.Children[1].Children { + grandchild.Description = "Attribute Value" + } + } + if len(packet.Children) == 3 { + err = addControlDescriptions(packet.Children[2]) + } + case ApplicationSearchResultDone: + err = addDefaultLDAPResponseDescriptions(packet) + case ApplicationModifyRequest: + err = addRequestDescriptions(packet) + case ApplicationModifyResponse: + case ApplicationAddRequest: + err = addRequestDescriptions(packet) + case ApplicationAddResponse: + case ApplicationDelRequest: + err = addRequestDescriptions(packet) + case ApplicationDelResponse: + case ApplicationModifyDNRequest: + err = addRequestDescriptions(packet) + case ApplicationModifyDNResponse: + case ApplicationCompareRequest: + err = addRequestDescriptions(packet) + case ApplicationCompareResponse: + case ApplicationAbandonRequest: + err = addRequestDescriptions(packet) + case ApplicationSearchResultReference: + case ApplicationExtendedRequest: + err = addRequestDescriptions(packet) + case ApplicationExtendedResponse: + } + + return err +} + +func addControlDescriptions(packet *ber.Packet) error { + packet.Description = "Controls" + for _, child := range packet.Children { + var value *ber.Packet + controlType := "" + child.Description = "Control" + switch len(child.Children) { + case 0: + // at least one child is required for control type + return fmt.Errorf("at least one child is required for control type") + + case 1: + // just type, no criticality or value + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + + case 2: + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + // Children[1] could be criticality or value (both are optional) + // duck-type on whether this is a boolean + if _, ok := child.Children[1].Value.(bool); ok { + child.Children[1].Description = "Criticality" + } else { + child.Children[1].Description = "Control Value" + value = child.Children[1] + } + + case 3: + // criticality and value present + controlType = child.Children[0].Value.(string) + child.Children[0].Description = "Control Type (" + ControlTypeMap[controlType] + ")" + child.Children[1].Description = "Criticality" + child.Children[2].Description = "Control Value" + value = child.Children[2] + + default: + // more than 3 children is invalid + return fmt.Errorf("more than 3 children for control packet found") + } + + if value == nil { + continue + } + switch controlType { + case ControlTypePaging: + value.Description += " (Paging)" + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + valueChildren.Children[1].Value = valueChildren.Children[1].Data.Bytes() + value.AppendChild(valueChildren) + } + value.Children[0].Description = "Real Search Control Value" + value.Children[0].Children[0].Description = "Paging Size" + value.Children[0].Children[1].Description = "Cookie" + + case ControlTypeBeheraPasswordPolicy: + value.Description += " (Password Policy - Behera Draft)" + if value.Value != nil { + valueChildren, err := ber.DecodePacketErr(value.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + value.Data.Truncate(0) + value.Value = nil + value.AppendChild(valueChildren) + } + sequence := value.Children[0] + for _, child := range sequence.Children { + if child.Tag == 0 { + //Warning + warningPacket := child.Children[0] + packet, err := ber.DecodePacketErr(warningPacket.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int64) + if ok { + if warningPacket.Tag == 0 { + //timeBeforeExpiration + value.Description += " (TimeBeforeExpiration)" + warningPacket.Value = val + } else if warningPacket.Tag == 1 { + //graceAuthNsRemaining + value.Description += " (GraceAuthNsRemaining)" + warningPacket.Value = val + } + } + } else if child.Tag == 1 { + // Error + packet, err := ber.DecodePacketErr(child.Data.Bytes()) + if err != nil { + return fmt.Errorf("failed to decode data bytes: %s", err) + } + val, ok := packet.Value.(int8) + if !ok { + val = -1 + } + child.Description = "Error" + child.Value = val + } + } + } + } + return nil +} + +func addRequestDescriptions(packet *ber.Packet) error { + packet.Description = "LDAP Request" + packet.Children[0].Description = "Message ID" + packet.Children[1].Description = ApplicationMap[uint8(packet.Children[1].Tag)] + if len(packet.Children) == 3 { + return addControlDescriptions(packet.Children[2]) + } + return nil +} + +func addDefaultLDAPResponseDescriptions(packet *ber.Packet) error { + err := GetLDAPError(packet) + packet.Children[1].Children[0].Description = "Result Code (" + LDAPResultCodeMap[err.(*Error).ResultCode] + ")" + packet.Children[1].Children[1].Description = "Matched DN (" + err.(*Error).MatchedDN + ")" + packet.Children[1].Children[2].Description = "Error Message" + if len(packet.Children[1].Children) > 3 { + packet.Children[1].Children[3].Description = "Referral" + } + if len(packet.Children) == 3 { + return addControlDescriptions(packet.Children[2]) + } + return nil +} + +// DebugBinaryFile reads and prints packets from the given filename +func DebugBinaryFile(fileName string) error { + file, err := ioutil.ReadFile(fileName) + if err != nil { + return NewError(ErrorDebugging, err) + } + ber.PrintBytes(os.Stdout, file, "") + packet, err := ber.DecodePacketErr(file) + if err != nil { + return fmt.Errorf("failed to decode packet: %s", err) + } + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + + return nil +} + +var hex = "0123456789abcdef" + +func mustEscape(c byte) bool { + return c > 0x7f || c == '(' || c == ')' || c == '\\' || c == '*' || c == 0 +} + +// EscapeFilter escapes from the provided LDAP filter string the special +// characters in the set `()*\` and those out of the range 0 < c < 0x80, +// as defined in RFC4515. +func EscapeFilter(filter string) string { + escape := 0 + for i := 0; i < len(filter); i++ { + if mustEscape(filter[i]) { + escape++ + } + } + if escape == 0 { + return filter + } + buf := make([]byte, len(filter)+escape*2) + for i, j := 0, 0; i < len(filter); i++ { + c := filter[i] + if mustEscape(c) { + buf[j+0] = '\\' + buf[j+1] = hex[c>>4] + buf[j+2] = hex[c&0xf] + j += 3 + } else { + buf[j] = c + j++ + } + } + return string(buf) +} diff --git a/vendor/github.com/go-ldap/ldap/moddn.go b/vendor/github.com/go-ldap/ldap/moddn.go new file mode 100644 index 0000000..803279d --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/moddn.go @@ -0,0 +1,104 @@ +// Package ldap - moddn.go contains ModifyDN functionality +// +// https://tools.ietf.org/html/rfc4511 +// ModifyDNRequest ::= [APPLICATION 12] SEQUENCE { +// entry LDAPDN, +// newrdn RelativeLDAPDN, +// deleteoldrdn BOOLEAN, +// newSuperior [0] LDAPDN OPTIONAL } +// +// +package ldap + +import ( + "errors" + "log" + + "gopkg.in/asn1-ber.v1" +) + +// ModifyDNRequest holds the request to modify a DN +type ModifyDNRequest struct { + DN string + NewRDN string + DeleteOldRDN bool + NewSuperior string +} + +// NewModifyDNRequest creates a new request which can be passed to ModifyDN(). +// +// To move an object in the tree, set the "newSup" to the new parent entry DN. Use an +// empty string for just changing the object's RDN. +// +// For moving the object without renaming, the "rdn" must be the first +// RDN of the given DN. +// +// A call like +// mdnReq := NewModifyDNRequest("uid=someone,dc=example,dc=org", "uid=newname", true, "") +// will setup the request to just rename uid=someone,dc=example,dc=org to +// uid=newname,dc=example,dc=org. +func NewModifyDNRequest(dn string, rdn string, delOld bool, newSup string) *ModifyDNRequest { + return &ModifyDNRequest{ + DN: dn, + NewRDN: rdn, + DeleteOldRDN: delOld, + NewSuperior: newSup, + } +} + +func (m ModifyDNRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyDNRequest, nil, "Modify DN Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN")) + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.NewRDN, "New RDN")) + request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, m.DeleteOldRDN, "Delete old RDN")) + if m.NewSuperior != "" { + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, m.NewSuperior, "New Superior")) + } + return request +} + +// ModifyDN renames the given DN and optionally move to another base (when the "newSup" argument +// to NewModifyDNRequest() is not ""). +func (l *Conn) ModifyDN(m *ModifyDNRequest) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + packet.AppendChild(m.encode()) + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationModifyDNResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + + l.Debug.Printf("%d: returning", msgCtx.id) + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/modify.go b/vendor/github.com/go-ldap/ldap/modify.go new file mode 100644 index 0000000..d83e622 --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/modify.go @@ -0,0 +1,173 @@ +// File contains Modify functionality +// +// https://tools.ietf.org/html/rfc4511 +// +// ModifyRequest ::= [APPLICATION 6] SEQUENCE { +// object LDAPDN, +// changes SEQUENCE OF change SEQUENCE { +// operation ENUMERATED { +// add (0), +// delete (1), +// replace (2), +// ... }, +// modification PartialAttribute } } +// +// PartialAttribute ::= SEQUENCE { +// type AttributeDescription, +// vals SET OF value AttributeValue } +// +// AttributeDescription ::= LDAPString +// -- Constrained to +// -- [RFC4512] +// +// AttributeValue ::= OCTET STRING +// + +package ldap + +import ( + "errors" + "log" + + "gopkg.in/asn1-ber.v1" +) + +// Change operation choices +const ( + AddAttribute = 0 + DeleteAttribute = 1 + ReplaceAttribute = 2 +) + +// PartialAttribute for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type PartialAttribute struct { + // Type is the type of the partial attribute + Type string + // Vals are the values of the partial attribute + Vals []string +} + +func (p *PartialAttribute) encode() *ber.Packet { + seq := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "PartialAttribute") + seq.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, p.Type, "Type")) + set := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSet, nil, "AttributeValue") + for _, value := range p.Vals { + set.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, value, "Vals")) + } + seq.AppendChild(set) + return seq +} + +// Change for a ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type Change struct { + // Operation is the type of change to be made + Operation uint + // Modification is the attribute to be modified + Modification PartialAttribute +} + +func (c *Change) encode() *ber.Packet { + change := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Change") + change.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(c.Operation), "Operation")) + change.AppendChild(c.Modification.encode()) + return change +} + +// ModifyRequest as defined in https://tools.ietf.org/html/rfc4511 +type ModifyRequest struct { + // DN is the distinguishedName of the directory entry to modify + DN string + // Changes contain the attributes to modify + Changes []Change + // Controls hold optional controls to send with the request + Controls []Control +} + +// Add appends the given attribute to the list of changes to be made +func (m *ModifyRequest) Add(attrType string, attrVals []string) { + m.appendChange(AddAttribute, attrType, attrVals) +} + +// Delete appends the given attribute to the list of changes to be made +func (m *ModifyRequest) Delete(attrType string, attrVals []string) { + m.appendChange(DeleteAttribute, attrType, attrVals) +} + +// Replace appends the given attribute to the list of changes to be made +func (m *ModifyRequest) Replace(attrType string, attrVals []string) { + m.appendChange(ReplaceAttribute, attrType, attrVals) +} + +func (m *ModifyRequest) appendChange(operation uint, attrType string, attrVals []string) { + m.Changes = append(m.Changes, Change{operation, PartialAttribute{Type: attrType, Vals: attrVals}}) +} + +func (m ModifyRequest) encode() *ber.Packet { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationModifyRequest, nil, "Modify Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, m.DN, "DN")) + changes := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Changes") + for _, change := range m.Changes { + changes.AppendChild(change.encode()) + } + request.AppendChild(changes) + return request +} + +// NewModifyRequest creates a modify request for the given DN +func NewModifyRequest( + dn string, + controls []Control, +) *ModifyRequest { + return &ModifyRequest{ + DN: dn, + Controls: controls, + } +} + +// Modify performs the ModifyRequest +func (l *Conn) Modify(modifyRequest *ModifyRequest) error { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + packet.AppendChild(modifyRequest.encode()) + if len(modifyRequest.Controls) > 0 { + packet.AppendChild(encodeControls(modifyRequest.Controls)) + } + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return err + } + defer l.finishMessage(msgCtx) + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationModifyResponse { + err := GetLDAPError(packet) + if err != nil { + return err + } + } else { + log.Printf("Unexpected Response: %d", packet.Children[1].Tag) + } + + l.Debug.Printf("%d: returning", msgCtx.id) + return nil +} diff --git a/vendor/github.com/go-ldap/ldap/passwdmodify.go b/vendor/github.com/go-ldap/ldap/passwdmodify.go new file mode 100644 index 0000000..06bc21d --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/passwdmodify.go @@ -0,0 +1,157 @@ +// This file contains the password modify extended operation as specified in rfc 3062 +// +// https://tools.ietf.org/html/rfc3062 +// + +package ldap + +import ( + "errors" + "fmt" + + "gopkg.in/asn1-ber.v1" +) + +const ( + passwordModifyOID = "1.3.6.1.4.1.4203.1.11.1" +) + +// PasswordModifyRequest implements the Password Modify Extended Operation as defined in https://www.ietf.org/rfc/rfc3062.txt +type PasswordModifyRequest struct { + // UserIdentity is an optional string representation of the user associated with the request. + // This string may or may not be an LDAPDN [RFC2253]. + // If no UserIdentity field is present, the request acts up upon the password of the user currently associated with the LDAP session + UserIdentity string + // OldPassword, if present, contains the user's current password + OldPassword string + // NewPassword, if present, contains the desired password for this user + NewPassword string +} + +// PasswordModifyResult holds the server response to a PasswordModifyRequest +type PasswordModifyResult struct { + // GeneratedPassword holds a password generated by the server, if present + GeneratedPassword string + // Referral are the returned referral + Referral string +} + +func (r *PasswordModifyRequest) encode() (*ber.Packet, error) { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationExtendedRequest, nil, "Password Modify Extended Operation") + request.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, passwordModifyOID, "Extended Request Name: Password Modify OID")) + extendedRequestValue := ber.Encode(ber.ClassContext, ber.TypePrimitive, 1, nil, "Extended Request Value: Password Modify Request") + passwordModifyRequestValue := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Password Modify Request") + if r.UserIdentity != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 0, r.UserIdentity, "User Identity")) + } + if r.OldPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 1, r.OldPassword, "Old Password")) + } + if r.NewPassword != "" { + passwordModifyRequestValue.AppendChild(ber.NewString(ber.ClassContext, ber.TypePrimitive, 2, r.NewPassword, "New Password")) + } + + extendedRequestValue.AppendChild(passwordModifyRequestValue) + request.AppendChild(extendedRequestValue) + + return request, nil +} + +// NewPasswordModifyRequest creates a new PasswordModifyRequest +// +// According to the RFC 3602: +// userIdentity is a string representing the user associated with the request. +// This string may or may not be an LDAPDN (RFC 2253). +// If userIdentity is empty then the operation will act on the user associated +// with the session. +// +// oldPassword is the current user's password, it can be empty or it can be +// needed depending on the session user access rights (usually an administrator +// can change a user's password without knowing the current one) and the +// password policy (see pwdSafeModify password policy's attribute) +// +// newPassword is the desired user's password. If empty the server can return +// an error or generate a new password that will be available in the +// PasswordModifyResult.GeneratedPassword +// +func NewPasswordModifyRequest(userIdentity string, oldPassword string, newPassword string) *PasswordModifyRequest { + return &PasswordModifyRequest{ + UserIdentity: userIdentity, + OldPassword: oldPassword, + NewPassword: newPassword, + } +} + +// PasswordModify performs the modification request +func (l *Conn) PasswordModify(passwordModifyRequest *PasswordModifyRequest) (*PasswordModifyResult, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + + encodedPasswordModifyRequest, err := passwordModifyRequest.encode() + if err != nil { + return nil, err + } + packet.AppendChild(encodedPasswordModifyRequest) + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + result := &PasswordModifyResult{} + + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if packet == nil { + return nil, NewError(ErrorNetwork, errors.New("ldap: could not retrieve message")) + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + + if packet.Children[1].Tag == ApplicationExtendedResponse { + err := GetLDAPError(packet) + if err != nil { + if IsErrorWithCode(err, LDAPResultReferral) { + for _, child := range packet.Children[1].Children { + if child.Tag == 3 { + result.Referral = child.Children[0].Value.(string) + } + } + } + return result, err + } + } else { + return nil, NewError(ErrorUnexpectedResponse, fmt.Errorf("unexpected Response: %d", packet.Children[1].Tag)) + } + + extendedResponse := packet.Children[1] + for _, child := range extendedResponse.Children { + if child.Tag == 11 { + passwordModifyResponseValue := ber.DecodePacket(child.Data.Bytes()) + if len(passwordModifyResponseValue.Children) == 1 { + if passwordModifyResponseValue.Children[0].Tag == 0 { + result.GeneratedPassword = ber.DecodeString(passwordModifyResponseValue.Children[0].Data.Bytes()) + } + } + } + } + + return result, nil +} diff --git a/vendor/github.com/go-ldap/ldap/search.go b/vendor/github.com/go-ldap/ldap/search.go new file mode 100644 index 0000000..3aa6dac --- /dev/null +++ b/vendor/github.com/go-ldap/ldap/search.go @@ -0,0 +1,450 @@ +// File contains Search functionality +// +// https://tools.ietf.org/html/rfc4511 +// +// SearchRequest ::= [APPLICATION 3] SEQUENCE { +// baseObject LDAPDN, +// scope ENUMERATED { +// baseObject (0), +// singleLevel (1), +// wholeSubtree (2), +// ... }, +// derefAliases ENUMERATED { +// neverDerefAliases (0), +// derefInSearching (1), +// derefFindingBaseObj (2), +// derefAlways (3) }, +// sizeLimit INTEGER (0 .. maxInt), +// timeLimit INTEGER (0 .. maxInt), +// typesOnly BOOLEAN, +// filter Filter, +// attributes AttributeSelection } +// +// AttributeSelection ::= SEQUENCE OF selector LDAPString +// -- The LDAPString is constrained to +// -- in Section 4.5.1.8 +// +// Filter ::= CHOICE { +// and [0] SET SIZE (1..MAX) OF filter Filter, +// or [1] SET SIZE (1..MAX) OF filter Filter, +// not [2] Filter, +// equalityMatch [3] AttributeValueAssertion, +// substrings [4] SubstringFilter, +// greaterOrEqual [5] AttributeValueAssertion, +// lessOrEqual [6] AttributeValueAssertion, +// present [7] AttributeDescription, +// approxMatch [8] AttributeValueAssertion, +// extensibleMatch [9] MatchingRuleAssertion, +// ... } +// +// SubstringFilter ::= SEQUENCE { +// type AttributeDescription, +// substrings SEQUENCE SIZE (1..MAX) OF substring CHOICE { +// initial [0] AssertionValue, -- can occur at most once +// any [1] AssertionValue, +// final [2] AssertionValue } -- can occur at most once +// } +// +// MatchingRuleAssertion ::= SEQUENCE { +// matchingRule [1] MatchingRuleId OPTIONAL, +// type [2] AttributeDescription OPTIONAL, +// matchValue [3] AssertionValue, +// dnAttributes [4] BOOLEAN DEFAULT FALSE } +// +// + +package ldap + +import ( + "errors" + "fmt" + "sort" + "strings" + + "gopkg.in/asn1-ber.v1" +) + +// scope choices +const ( + ScopeBaseObject = 0 + ScopeSingleLevel = 1 + ScopeWholeSubtree = 2 +) + +// ScopeMap contains human readable descriptions of scope choices +var ScopeMap = map[int]string{ + ScopeBaseObject: "Base Object", + ScopeSingleLevel: "Single Level", + ScopeWholeSubtree: "Whole Subtree", +} + +// derefAliases +const ( + NeverDerefAliases = 0 + DerefInSearching = 1 + DerefFindingBaseObj = 2 + DerefAlways = 3 +) + +// DerefMap contains human readable descriptions of derefAliases choices +var DerefMap = map[int]string{ + NeverDerefAliases: "NeverDerefAliases", + DerefInSearching: "DerefInSearching", + DerefFindingBaseObj: "DerefFindingBaseObj", + DerefAlways: "DerefAlways", +} + +// NewEntry returns an Entry object with the specified distinguished name and attribute key-value pairs. +// The map of attributes is accessed in alphabetical order of the keys in order to ensure that, for the +// same input map of attributes, the output entry will contain the same order of attributes +func NewEntry(dn string, attributes map[string][]string) *Entry { + var attributeNames []string + for attributeName := range attributes { + attributeNames = append(attributeNames, attributeName) + } + sort.Strings(attributeNames) + + var encodedAttributes []*EntryAttribute + for _, attributeName := range attributeNames { + encodedAttributes = append(encodedAttributes, NewEntryAttribute(attributeName, attributes[attributeName])) + } + return &Entry{ + DN: dn, + Attributes: encodedAttributes, + } +} + +// Entry represents a single search result entry +type Entry struct { + // DN is the distinguished name of the entry + DN string + // Attributes are the returned attributes for the entry + Attributes []*EntryAttribute +} + +// GetAttributeValues returns the values for the named attribute, or an empty list +func (e *Entry) GetAttributeValues(attribute string) []string { + for _, attr := range e.Attributes { + if attr.Name == attribute { + return attr.Values + } + } + return []string{} +} + +// GetRawAttributeValues returns the byte values for the named attribute, or an empty list +func (e *Entry) GetRawAttributeValues(attribute string) [][]byte { + for _, attr := range e.Attributes { + if attr.Name == attribute { + return attr.ByteValues + } + } + return [][]byte{} +} + +// GetAttributeValue returns the first value for the named attribute, or "" +func (e *Entry) GetAttributeValue(attribute string) string { + values := e.GetAttributeValues(attribute) + if len(values) == 0 { + return "" + } + return values[0] +} + +// GetRawAttributeValue returns the first value for the named attribute, or an empty slice +func (e *Entry) GetRawAttributeValue(attribute string) []byte { + values := e.GetRawAttributeValues(attribute) + if len(values) == 0 { + return []byte{} + } + return values[0] +} + +// Print outputs a human-readable description +func (e *Entry) Print() { + fmt.Printf("DN: %s\n", e.DN) + for _, attr := range e.Attributes { + attr.Print() + } +} + +// PrettyPrint outputs a human-readable description indenting +func (e *Entry) PrettyPrint(indent int) { + fmt.Printf("%sDN: %s\n", strings.Repeat(" ", indent), e.DN) + for _, attr := range e.Attributes { + attr.PrettyPrint(indent + 2) + } +} + +// NewEntryAttribute returns a new EntryAttribute with the desired key-value pair +func NewEntryAttribute(name string, values []string) *EntryAttribute { + var bytes [][]byte + for _, value := range values { + bytes = append(bytes, []byte(value)) + } + return &EntryAttribute{ + Name: name, + Values: values, + ByteValues: bytes, + } +} + +// EntryAttribute holds a single attribute +type EntryAttribute struct { + // Name is the name of the attribute + Name string + // Values contain the string values of the attribute + Values []string + // ByteValues contain the raw values of the attribute + ByteValues [][]byte +} + +// Print outputs a human-readable description +func (e *EntryAttribute) Print() { + fmt.Printf("%s: %s\n", e.Name, e.Values) +} + +// PrettyPrint outputs a human-readable description with indenting +func (e *EntryAttribute) PrettyPrint(indent int) { + fmt.Printf("%s%s: %s\n", strings.Repeat(" ", indent), e.Name, e.Values) +} + +// SearchResult holds the server's response to a search request +type SearchResult struct { + // Entries are the returned entries + Entries []*Entry + // Referrals are the returned referrals + Referrals []string + // Controls are the returned controls + Controls []Control +} + +// Print outputs a human-readable description +func (s *SearchResult) Print() { + for _, entry := range s.Entries { + entry.Print() + } +} + +// PrettyPrint outputs a human-readable description with indenting +func (s *SearchResult) PrettyPrint(indent int) { + for _, entry := range s.Entries { + entry.PrettyPrint(indent) + } +} + +// SearchRequest represents a search request to send to the server +type SearchRequest struct { + BaseDN string + Scope int + DerefAliases int + SizeLimit int + TimeLimit int + TypesOnly bool + Filter string + Attributes []string + Controls []Control +} + +func (s *SearchRequest) encode() (*ber.Packet, error) { + request := ber.Encode(ber.ClassApplication, ber.TypeConstructed, ApplicationSearchRequest, nil, "Search Request") + request.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, s.BaseDN, "Base DN")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.Scope), "Scope")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagEnumerated, uint64(s.DerefAliases), "Deref Aliases")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.SizeLimit), "Size Limit")) + request.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, uint64(s.TimeLimit), "Time Limit")) + request.AppendChild(ber.NewBoolean(ber.ClassUniversal, ber.TypePrimitive, ber.TagBoolean, s.TypesOnly, "Types Only")) + // compile and encode filter + filterPacket, err := CompileFilter(s.Filter) + if err != nil { + return nil, err + } + request.AppendChild(filterPacket) + // encode attributes + attributesPacket := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "Attributes") + for _, attribute := range s.Attributes { + attributesPacket.AppendChild(ber.NewString(ber.ClassUniversal, ber.TypePrimitive, ber.TagOctetString, attribute, "Attribute")) + } + request.AppendChild(attributesPacket) + return request, nil +} + +// NewSearchRequest creates a new search request +func NewSearchRequest( + BaseDN string, + Scope, DerefAliases, SizeLimit, TimeLimit int, + TypesOnly bool, + Filter string, + Attributes []string, + Controls []Control, +) *SearchRequest { + return &SearchRequest{ + BaseDN: BaseDN, + Scope: Scope, + DerefAliases: DerefAliases, + SizeLimit: SizeLimit, + TimeLimit: TimeLimit, + TypesOnly: TypesOnly, + Filter: Filter, + Attributes: Attributes, + Controls: Controls, + } +} + +// SearchWithPaging accepts a search request and desired page size in order to execute LDAP queries to fulfill the +// search request. All paged LDAP query responses will be buffered and the final result will be returned atomically. +// The following four cases are possible given the arguments: +// - given SearchRequest missing a control of type ControlTypePaging: we will add one with the desired paging size +// - given SearchRequest contains a control of type ControlTypePaging that isn't actually a ControlPaging: fail without issuing any queries +// - given SearchRequest contains a control of type ControlTypePaging with pagingSize equal to the size requested: no change to the search request +// - given SearchRequest contains a control of type ControlTypePaging with pagingSize not equal to the size requested: fail without issuing any queries +// A requested pagingSize of 0 is interpreted as no limit by LDAP servers. +func (l *Conn) SearchWithPaging(searchRequest *SearchRequest, pagingSize uint32) (*SearchResult, error) { + var pagingControl *ControlPaging + + control := FindControl(searchRequest.Controls, ControlTypePaging) + if control == nil { + pagingControl = NewControlPaging(pagingSize) + searchRequest.Controls = append(searchRequest.Controls, pagingControl) + } else { + castControl, ok := control.(*ControlPaging) + if !ok { + return nil, fmt.Errorf("expected paging control to be of type *ControlPaging, got %v", control) + } + if castControl.PagingSize != pagingSize { + return nil, fmt.Errorf("paging size given in search request (%d) conflicts with size given in search call (%d)", castControl.PagingSize, pagingSize) + } + pagingControl = castControl + } + + searchResult := new(SearchResult) + for { + result, err := l.Search(searchRequest) + l.Debug.Printf("Looking for Paging Control...") + if err != nil { + return searchResult, err + } + if result == nil { + return searchResult, NewError(ErrorNetwork, errors.New("ldap: packet not received")) + } + + for _, entry := range result.Entries { + searchResult.Entries = append(searchResult.Entries, entry) + } + for _, referral := range result.Referrals { + searchResult.Referrals = append(searchResult.Referrals, referral) + } + for _, control := range result.Controls { + searchResult.Controls = append(searchResult.Controls, control) + } + + l.Debug.Printf("Looking for Paging Control...") + pagingResult := FindControl(result.Controls, ControlTypePaging) + if pagingResult == nil { + pagingControl = nil + l.Debug.Printf("Could not find paging control. Breaking...") + break + } + + cookie := pagingResult.(*ControlPaging).Cookie + if len(cookie) == 0 { + pagingControl = nil + l.Debug.Printf("Could not find cookie. Breaking...") + break + } + pagingControl.SetCookie(cookie) + } + + if pagingControl != nil { + l.Debug.Printf("Abandoning Paging...") + pagingControl.PagingSize = 0 + l.Search(searchRequest) + } + + return searchResult, nil +} + +// Search performs the given search request +func (l *Conn) Search(searchRequest *SearchRequest) (*SearchResult, error) { + packet := ber.Encode(ber.ClassUniversal, ber.TypeConstructed, ber.TagSequence, nil, "LDAP Request") + packet.AppendChild(ber.NewInteger(ber.ClassUniversal, ber.TypePrimitive, ber.TagInteger, l.nextMessageID(), "MessageID")) + // encode search request + encodedSearchRequest, err := searchRequest.encode() + if err != nil { + return nil, err + } + packet.AppendChild(encodedSearchRequest) + // encode search controls + if len(searchRequest.Controls) > 0 { + packet.AppendChild(encodeControls(searchRequest.Controls)) + } + + l.Debug.PrintPacket(packet) + + msgCtx, err := l.sendMessage(packet) + if err != nil { + return nil, err + } + defer l.finishMessage(msgCtx) + + result := &SearchResult{ + Entries: make([]*Entry, 0), + Referrals: make([]string, 0), + Controls: make([]Control, 0)} + + foundSearchResultDone := false + for !foundSearchResultDone { + l.Debug.Printf("%d: waiting for response", msgCtx.id) + packetResponse, ok := <-msgCtx.responses + if !ok { + return nil, NewError(ErrorNetwork, errors.New("ldap: response channel closed")) + } + packet, err = packetResponse.ReadPacket() + l.Debug.Printf("%d: got response %p", msgCtx.id, packet) + if err != nil { + return nil, err + } + + if l.Debug { + if err := addLDAPDescriptions(packet); err != nil { + return nil, err + } + ber.PrintPacket(packet) + } + + switch packet.Children[1].Tag { + case 4: + entry := new(Entry) + entry.DN = packet.Children[1].Children[0].Value.(string) + for _, child := range packet.Children[1].Children[1].Children { + attr := new(EntryAttribute) + attr.Name = child.Children[0].Value.(string) + for _, value := range child.Children[1].Children { + attr.Values = append(attr.Values, value.Value.(string)) + attr.ByteValues = append(attr.ByteValues, value.ByteValue) + } + entry.Attributes = append(entry.Attributes, attr) + } + result.Entries = append(result.Entries, entry) + case 5: + err := GetLDAPError(packet) + if err != nil { + return nil, err + } + if len(packet.Children) == 3 { + for _, child := range packet.Children[2].Children { + decodedChild, err := DecodeControl(child) + if err != nil { + return nil, fmt.Errorf("failed to decode child control: %s", err) + } + result.Controls = append(result.Controls, decodedChild) + } + } + foundSearchResultDone = true + case 19: + result.Referrals = append(result.Referrals, packet.Children[1].Children[0].Value.(string)) + } + } + l.Debug.Printf("%d: returning", msgCtx.id) + return result, nil +} diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md new file mode 100644 index 0000000..c356960 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CHANGELOG.md @@ -0,0 +1,6 @@ +# CHANGELOG + +## v1.0.0-rc1 + +This is the first logged release. Major changes (including breaking changes) +have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md new file mode 100644 index 0000000..5d37e29 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +Logr is open to pull-requests, provided they fit within the intended scope of +the project. Specifically, this library aims to be VERY small and minimalist, +with no external dependencies. + +## Compatibility + +This project intends to follow [semantic versioning](http://semver.org) and +is very strict about compatibility. Any proposed changes MUST follow those +rules. + +## Performance + +As a logging library, logr must be as light-weight as possible. Any proposed +code change must include results of running the [benchmark](./benchmark) +before and after the change. diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/github.com/go-logr/logr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md new file mode 100644 index 0000000..69b0b40 --- /dev/null +++ b/vendor/github.com/go-logr/logr/README.md @@ -0,0 +1,275 @@ +# A minimal logging API for Go + +logr offers an(other) opinion on how Go programs and libraries can do logging +without becoming coupled to a particular logging implementation. This is not +an implementation of logging - it is an API. In fact it is two APIs with two +different sets of users. + +The `Logger` type is intended for application and library authors. It provides +a relatively small API which can be used everywhere you want to emit logs. It +defers the actual act of writing logs (to files, to stdout, or whatever) to the +`LogSink` interface. + +The `LogSink` interface is intended for logging library implementers. It is a +pure interface which can be implemented by logging frameworks to provide the actual logging +functionality. + +This decoupling allows application and library developers to write code in +terms of `logr.Logger` (which has very low dependency fan-out) while the +implementation of logging is managed "up stack" (e.g. in or near `main()`.) +Application developers can then switch out implementations as necessary. + +Many people assert that libraries should not be logging, and as such efforts +like this are pointless. Those people are welcome to convince the authors of +the tens-of-thousands of libraries that *DO* write logs that they are all +wrong. In the meantime, logr takes a more practical approach. + +## Typical usage + +Somewhere, early in an application's life, it will make a decision about which +logging library (implementation) it actually wants to use. Something like: + +``` + func main() { + // ... other setup code ... + + // Create the "root" logger. We have chosen the "logimpl" implementation, + // which takes some initial parameters and returns a logr.Logger. + logger := logimpl.New(param1, param2) + + // ... other setup code ... +``` + +Most apps will call into other libraries, create structures to govern the flow, +etc. The `logr.Logger` object can be passed to these other libraries, stored +in structs, or even used as a package-global variable, if needed. For example: + +``` + app := createTheAppObject(logger) + app.Run() +``` + +Outside of this early setup, no other packages need to know about the choice of +implementation. They write logs in terms of the `logr.Logger` that they +received: + +``` + type appObject struct { + // ... other fields ... + logger logr.Logger + // ... other fields ... + } + + func (app *appObject) Run() { + app.logger.Info("starting up", "timestamp", time.Now()) + + // ... app code ... +``` + +## Background + +If the Go standard library had defined an interface for logging, this project +probably would not be needed. Alas, here we are. + +### Inspiration + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what +he has to say, and it largely aligns with our own experiences. + +### Differences from Dave's ideas + +The main differences are: + +1. Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. We disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. This +package restricts the logging API to just 2 types of logs: info and error. + +Info logs are things you want to tell the user which are not errors. Error +logs are, well, errors. If your code receives an `error` from a subordinate +function call and is logging that `error` *and not returning it*, use error +logs. + +2. Verbosity-levels on info logs. This gives developers a chance to indicate +arbitrary grades of importance for info logs, without assigning names with +semantic meaning such as "warning", "trace", and "debug." Superficially this +may feel very similar, but the primary difference is the lack of semantics. +Because verbosity is a numerical value, it's safe to assume that an app running +with higher verbosity means more (and less important) logs will be generated. + +## Implementations (non-exhaustive) + +There are implementations for the following logging libraries: + +- **a function**: [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) +- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) +- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) +- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) +- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) +- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) +- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) + +## FAQ + +### Conceptual + +#### Why structured logging? + +- **Structured logs are more easily queryable**: Since you've got + key-value pairs, it's much easier to query your structured logs for + particular values by filtering on the contents of a particular key -- + think searching request logs for error codes, Kubernetes reconcilers for + the name and namespace of the reconciled object, etc. + +- **Structured logging makes it easier to have cross-referenceable logs**: + Similarly to searchability, if you maintain conventions around your + keys, it becomes easy to gather all log lines related to a particular + concept. + +- **Structured logs allow better dimensions of filtering**: if you have + structure to your logs, you've got more precise control over how much + information is logged -- you might choose in a particular configuration + to log certain keys but not others, only log lines where a certain key + matches a certain value, etc., instead of just having v-levels and names + to key off of. + +- **Structured logs better represent structured data**: sometimes, the + data that you want to log is inherently structured (think tuple-link + objects.) Structured logs allow you to preserve that structure when + outputting. + +#### Why V-levels? + +**V-levels give operators an easy way to control the chattiness of log +operations**. V-levels provide a way for a given package to distinguish +the relative importance or verbosity of a given log message. Then, if +a particular logger or package is logging too many messages, the user +of the package can simply change the v-levels for that library. + +#### Why not named levels, like Info/Warning/Error? + +Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences +from Dave's ideas](#differences-from-daves-ideas). + +#### Why not allow format strings, too? + +**Format strings negate many of the benefits of structured logs**: + +- They're not easily searchable without resorting to fuzzy searching, + regular expressions, etc. + +- They don't store structured data well, since contents are flattened into + a string. + +- They're not cross-referenceable. + +- They don't compress easily, since the message is not constant. + +(Unless you turn positional parameters into key-value pairs with numerical +keys, at which point you've gotten key-value logging with meaningless +keys.) + +### Practical + +#### Why key-value pairs, and not a map? + +Key-value pairs are *much* easier to optimize, especially around +allocations. Zap (a structured logger that inspired logr's interface) has +[performance measurements](https://github.com/uber-go/zap#performance) +that show this quite nicely. + +While the interface ends up being a little less obvious, you get +potentially better performance, plus avoid making users type +`map[string]string{}` every time they want to log. + +#### What if my V-levels differ between libraries? + +That's fine. Control your V-levels on a per-logger basis, and use the +`WithName` method to pass different loggers to different libraries. + +Generally, you should take care to ensure that you have relatively +consistent V-levels within a given logger, however, as this makes deciding +on what verbosity of logs to request easier. + +#### But I really want to use a format string! + +That's not actually a question. Assuming your question is "how do +I convert my mental model of logging with format strings to logging with +constant messages": + +1. Figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message. + +2. For every place you'd write a format specifier, look to the word before + it, and add that as a key value pair. + +For instance, consider the following examples (all taken from spots in the +Kubernetes codebase): + +- `klog.V(4).Infof("Client is returning errors: code %v, error %v", + responseCode, err)` becomes `logger.Error(err, "client returned an + error", "code", responseCode)` + +- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", + seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after + response when requesting url", "attempt", retries, "after + seconds", seconds, "url", url)` + +If you *really* must use a format string, use it in a key's value, and +call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to +reflect over type %T")` becomes `logger.Info("unable to reflect over +type", "type", fmt.Sprintf("%T"))`. In general though, the cases where +this is necessary should be few and far between. + +#### How do I choose my V-levels? + +This is basically the only hard constraint: increase V-levels to denote +more verbose or more debug-y logs. + +Otherwise, you can start out with `0` as "you always want to see this", +`1` as "common logging that you might *possibly* want to turn off", and +`10` as "I would like to performance-test your log collection stack." + +Then gradually choose levels in between as you need them, working your way +down from 10 (for debug and trace style logs) and up from 1 (for chattier +info-type logs.) + +#### How do I choose my keys? + +Keys are fairly flexible, and can hold more or less any string +value. For best compatibility with implementations and consistency +with existing code in other projects, there are a few conventions you +should consider. + +- Make your keys human-readable. +- Constant keys are generally a good idea. +- Be consistent across your codebase. +- Keys should naturally match parts of the message string. +- Use lower case for simple keys and + [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for + more complex ones. Kubernetes is one example of a project that has + [adopted that + convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). + +While key names are mostly unrestricted (and spaces are acceptable), +it's generally a good idea to stick to printable ascii characters, or at +least match the general character set of your log lines. + +#### Why should keys be constant values? + +The point of structured logging is to make later log processing easier. Your +keys are, effectively, the schema of each log message. If you use different +keys across instances of the same log line, you will make your structured logs +much harder to use. `Sprintf()` is for values, not for keys! + +#### Why is this not a pure interface? + +The Logger type is implemented as a struct in order to allow the Go compiler to +optimize things like high-V `Info` logs that are not triggered. Not all of +these implementations are implemented yet, but this structure was suggested as +a way to ensure they *can* be implemented. All of the real work is behind the +`LogSink` interface. + +[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go new file mode 100644 index 0000000..9d92a38 --- /dev/null +++ b/vendor/github.com/go-logr/logr/discard.go @@ -0,0 +1,54 @@ +/* +Copyright 2020 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// Discard returns a Logger that discards all messages logged to it. It can be +// used whenever the caller is not interested in the logs. Logger instances +// produced by this function always compare as equal. +func Discard() Logger { + return Logger{ + level: 0, + sink: discardLogSink{}, + } +} + +// discardLogSink is a LogSink that discards all messages. +type discardLogSink struct{} + +// Verify that it actually implements the interface +var _ LogSink = discardLogSink{} + +func (l discardLogSink) Init(RuntimeInfo) { +} + +func (l discardLogSink) Enabled(int) bool { + return false +} + +func (l discardLogSink) Info(int, string, ...interface{}) { +} + +func (l discardLogSink) Error(error, string, ...interface{}) { +} + +func (l discardLogSink) WithValues(...interface{}) LogSink { + return l +} + +func (l discardLogSink) WithName(string) LogSink { + return l +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 0000000..bb36d5e --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,411 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site. This will not call String or +// Error methods on values. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. Alternatively, funcr can also be embedded inside +// a custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods. +package funcr + +import ( + "bytes" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, opts)) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), opts Options) logr.LogSink { + l := &fnlogger{ + Formatter: NewFormatter(opts), + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // Verbosity tells funcr which V logs to be write. Higher values enable + // more logs. + Verbosity int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +const timestampFmt = "2006-01-02 15:04:05.000000" + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +func flatten(kvList ...interface{}) string { + if len(kvList)%2 != 0 { + kvList = append(kvList, "") + } + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + k = fmt.Sprintf("", i/2) + } + v := kvList[i+1] + + if i > 0 { + buf.WriteRune(' ') + } + buf.WriteRune('"') + buf.WriteString(k) + buf.WriteRune('"') + buf.WriteRune('=') + buf.WriteString(pretty(v)) + } + return buf.String() +} + +func pretty(value interface{}) string { + return prettyWithFlags(value, 0) +} + +const ( + flagRawString = 0x1 +) + +// TODO: This is not fast. Most of the overhead goes here. +func prettyWithFlags(value interface{}, flags uint32) string { + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + if flags&flagRawString > 0 { + return v + } + // This is empirically faster than strings.Builder. + return `"` + v + `"` + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + if flags&flagRawString > 0 { + return v.String() + } + // This is empirically faster than strings.Builder. + return `"` + v.String() + `"` + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Struct: + buf.WriteRune('{') + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if i > 0 { + buf.WriteRune(',') + } + buf.WriteRune('"') + name := f.Name + if tag, found := f.Tag.Lookup("json"); found { + if comma := strings.Index(tag, ","); comma != -1 { + name = tag[:comma] + } else { + name = tag + } + } + buf.WriteString(name) + buf.WriteRune('"') + buf.WriteRune(':') + buf.WriteString(pretty(v.Field(i).Interface())) + } + buf.WriteRune('}') + return buf.String() + case reflect.Slice, reflect.Array: + buf.WriteRune('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteRune(',') + } + e := v.Index(i) + buf.WriteString(pretty(e.Interface())) + } + buf.WriteRune(']') + return buf.String() + case reflect.Map: + buf.WriteRune('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteRune(',') + } + // JSON only does string keys. + buf.WriteRune('"') + buf.WriteString(prettyWithFlags(it.Key().Interface(), flagRawString)) + buf.WriteRune('"') + buf.WriteRune(':') + buf.WriteString(pretty(it.Value().Interface())) + i++ + } + buf.WriteRune('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + return pretty(v.Elem().Interface()) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +type callerID struct { + File string `json:"file"` + Line int `json:"line"` +} + +// NewFormatter constructs a Formatter. +func NewFormatter(opts Options) Formatter { + f := Formatter{ + prefix: "", + values: nil, + depth: 0, + logCaller: opts.LogCaller, + logTimestamp: opts.LogTimestamp, + verbosity: opts.Verbosity, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + prefix string + values []interface{} + depth int + logCaller MessageClass + logTimestamp bool + verbosity int +} + +func (f Formatter) caller() callerID { + // +1 for this frame, +1 for Info/Error. + _, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return callerID{"", 0} + } + return callerID{filepath.Base(file), line} +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo flattens an Info log message into strings. +// The prefix will be empty when no names were set. +func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + if f.logTimestamp { + args = append(args, "ts", time.Now().Format(timestampFmt)) + } + if f.logCaller == All || f.logCaller == Info { + args = append(args, "caller", f.caller()) + } + args = append(args, "level", level, "msg", msg) + args = append(args, f.values...) + args = append(args, kvList...) + return f.prefix, flatten(args...) +} + +// FormatError flattens an Error log message into strings. +// The prefix will be empty when no names were set. +func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + if f.logTimestamp { + args = append(args, "ts", time.Now().Format(timestampFmt)) + } + if f.logCaller == All || f.logCaller == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + args = append(args, f.values...) + args = append(args, kvList...) + return f.prefix, flatten(args...) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []interface{}) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/go-logr/logr/go.mod b/vendor/github.com/go-logr/logr/go.mod new file mode 100644 index 0000000..7baec9b --- /dev/null +++ b/vendor/github.com/go-logr/logr/go.mod @@ -0,0 +1,3 @@ +module github.com/go-logr/logr + +go 1.16 diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go new file mode 100644 index 0000000..4e84ab7 --- /dev/null +++ b/vendor/github.com/go-logr/logr/logr.go @@ -0,0 +1,479 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This design derives from Dave Cheney's blog: +// http://dave.cheney.net/2015/11/05/lets-talk-about-logging + +// Package logr defines a general-purpose logging API and abstract interfaces +// to back that API. Packages in the Go ecosystem can depend on this package, +// while callers can implement logging with whatever backend is appropriate. +// +// # Usage +// +// Logging is done using a Logger instance. Logger is a concrete type with +// methods, which defers the actual logging to a LogSink interface. The main +// methods of Logger are Info() and Error(). Arguments to Info() and Error() +// are key/value pairs rather than printf-style formatted strings, emphasizing +// "structured logging". +// +// With Go's standard log package, we might write: +// log.Printf("setting target value %s", targetValue) +// +// With logr's structured logging, we'd write: +// logger.Info("setting target", "value", targetValue) +// +// Errors are much the same. Instead of: +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// We'd write: +// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// Info() and Error() are very similar, but they are separate methods so that +// LogSink implementations can choose to do things like attach additional +// information (such as stack traces) on calls to Error(). +// +// # Verbosity +// +// Often we want to log information only when the application in "verbose +// mode". To write log lines that are more verbose, Logger has a V() method. +// The higher the V-level of a log line, the less critical it is considered. +// Log-lines with V-levels that are not enabled (as per the LogSink) will not +// be written. Level V(0) is the default, and logger.V(0).Info() has the same +// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// +// Where we might have written: +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } +// +// We can write: +// logger.V(2).Info("an unusual thing happened") +// +// # Logger Names +// +// Logger instances can have name strings so that all messages logged through +// that instance have additional context. For example, you might want to add +// a subsystem name: +// +// logger.WithName("compactor").Info("started", "time", time.Now()) +// +// The WithName() method returns a new Logger, which can be passed to +// constructors or other functions for further use. Repeated use of WithName() +// will accumulate name "segments". These name segments will be joined in some +// way by the LogSink implementation. It is strongly recommended that name +// segments contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the +// joining operation (e.g. whitespace, commas, periods, slashes, brackets, +// quotes, etc). +// +// # Saved Values +// +// Logger instances can store any number of key/value pairs, which will be +// logged alongside all messages logged through that instance. For example, +// you might want to create a Logger instance per managed object: +// +// With the standard log package, we might write: +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr we'd write: +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) +// +// # Best Practices +// +// Logger has very few hard rules, with the goal that LogSink implementations +// might have a lot of freedom to differentiate. There are, however, some +// things to consider. +// +// The log message consists of a constant message attached to the log line. +// This should generally be a simple description of what's occurring, and should +// never be a format string. Variable information can then be attached using +// named values. +// +// Keys are arbitrary strings, but should generally be constant values. Values +// may be any Go value, but how the value is formatted is determined by the +// LogSink implementation. +// +// # Key Naming Conventions +// +// Keys are not strictly required to conform to any specification or regex, but +// it is recommended that they: +// * be human-readable and meaningful (not auto-generated or simple ordinals) +// * be constant (not dependent on input data) +// * contain only printable characters +// * not contain whitespace or punctuation +// * use lower case for simple keys and lowerCamelCase for more complex ones +// +// These guidelines help ensure that log data is processed properly regardless +// of the log implementation. For example, log implementations will try to +// output JSON data or will store data for later database (e.g. SQL) queries. +// +// While users are generally free to use key names of their choice, it's +// generally best to avoid using the following keys, as they're frequently used +// by implementations: +// +// * "caller": the calling information (file/line) of a particular log line. +// * "error": the underlying error value in the `Error` method. +// * "level": the log level. +// * "logger": the name of the associated logger. +// * "msg": the log message. +// * "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message). +// * "ts": the timestamp for a log line. +// +// Implementations are encouraged to make use of these keys to represent the +// above concepts, when necessary (for example, in a pure-JSON output form, it +// would be necessary to represent at least message and timestamp as ordinary +// named values). +// +// # Break Glass +// +// Implementations may choose to give callers access to the underlying +// logging implementation. The recommended pattern for this is: +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } +// +// Logger grants access to the sink to enable type assertions like this: +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink()(impl.Underlier) { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } +// +// Custom `With*` functions can be implemented by copying the complete +// Logger struct and replacing the sink in the copy: +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } +// +// Don't use New to construct a new Logger with a LogSink retrieved from an +// existing Logger. Source code attribution might not work correctly and +// unexported fields in Logger get lost. +// +// Beware that the same LogSink instance may be shared by different logger +// instances. Calling functions that modify the LogSink will affect all of +// those. +package logr + +import ( + "context" +) + +// New returns a new Logger instance. This is primarily used by libraries +// implementing LogSink, rather than end users. +func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) + sink.Init(runtimeInfo) + return logger +} + +// setSink stores the sink and updates any related fields. It mutates the +// logger and thus is only safe to use for loggers that are not currently being +// used concurrently. +func (l *Logger) setSink(sink LogSink) { + l.sink = sink +} + +// GetSink returns the stored sink. +func (l Logger) GetSink() LogSink { + return l.sink +} + +// WithSink returns a copy of the logger with the new sink. +func (l Logger) WithSink(sink LogSink) Logger { + l.setSink(sink) + return l +} + +// Logger is an interface to an abstract logging implementation. This is a +// concrete type for performance reasons, but all the real work is passed on to +// a LogSink. Implementations of LogSink should provide their own constructors +// that return Logger, not LogSink. +// +// The underlying sink can be accessed through GetSink and be modified through +// WithSink. This enables the implementation of custom extensions (see "Break +// Glass" in the package documentation). Normally the sink should be used only +// indirectly. +type Logger struct { + sink LogSink + level int +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info logs. +func (l Logger) Enabled() bool { + return l.sink.Enabled(l.level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to the log +// line. The key/value pairs can then be used to add additional variable +// information. The key/value pairs must alternate string keys and arbitrary +// values. +func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.Enabled() { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Info(l.level, msg, keysAndValues...) + } +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to Info, but may have unique behavior, and should be +// preferred for logging errors (see the package documentations for more +// information). +// +// The msg argument should be used to add context to any underlying error, +// while the err argument should be used to attach the actual error that +// triggered this log line, if present. +func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Error(err, msg, keysAndValues...) +} + +// V returns a new Logger instance for a specific verbosity level, relative to +// this Logger. In other words, V-levels are additive. A higher verbosity +// level means a log message is less important. Negative V-levels are treated +// as 0. +func (l Logger) V(level int) Logger { + if level < 0 { + level = 0 + } + l.level += level + return l +} + +// WithValues returns a new Logger instance with additional key/value pairs. +// See Info for documentation on how key/value pairs work. +func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + l.setSink(l.sink.WithValues(keysAndValues...)) + return l +} + +// WithName returns a new Logger instance with the specified name element added +// to the Logger's name. Successive calls with WithName append additional +// suffixes to the Logger's name. It's strongly recommended that name segments +// contain only letters, digits, and hyphens (see the package documentation for +// more information). +func (l Logger) WithName(name string) Logger { + l.setSink(l.sink.WithName(name)) + return l +} + +// WithCallDepth returns a Logger instance that offsets the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// it will be called and the result returned. If the implementation does not +// support CallDepthLogSink, the original Logger will be returned. +// +// To skip one level, WithCallStackHelper() should be used instead of +// WithCallDepth(1) because it works with implementions that support the +// CallDepthLogSink and/or CallStackHelperLogSink interfaces. +func (l Logger) WithCallDepth(depth int) Logger { + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } + return l +} + +// WithCallStackHelper returns a new Logger instance that skips the direct +// caller when logging call site information, if possible. This is useful for +// users who have helper functions between the "real" call site and the actual +// calls to Logger methods and want to support loggers which depend on marking +// each individual helper function, like loggers based on testing.T. +// +// In addition to using that new logger instance, callers also must call the +// returned function. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// WithCallDepth(1) will be called to produce a new logger. If it supports a +// WithCallStackHelper() method, that will be also called. If the +// implementation does not support either of these, the original Logger will be +// returned. +func (l Logger) WithCallStackHelper() (func(), Logger) { + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + helper = withHelper.GetCallStackHelper() + } else { + helper = func() {} + } + return helper, l +} + +// contextKey is how we find Loggers in a context.Context. +type contextKey struct{} + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// RuntimeInfo holds information that the logr "core" library knows which +// LogSinks might want to know. +type RuntimeInfo struct { + // CallDepth is the number of call frames the logr library adds between the + // end-user and the LogSink. LogSink implementations which choose to print + // the original logging site (e.g. file & line) should climb this many + // additional frames to find it. + CallDepth int +} + +// runtimeInfo is a static global. It must not be changed at run time. +var runtimeInfo = RuntimeInfo{ + CallDepth: 1, +} + +// LogSink represents a logging implementation. End-users will generally not +// interact with this type. +type LogSink interface { + // Init receives optional information about the logr library for LogSink + // implementations that need it. + Init(info RuntimeInfo) + + // Enabled tests whether this LogSink is enabled at the specified V-level. + // For example, commandline flags might be used to set the logging + // verbosity and disable some info logs. + Enabled(level int) bool + + // Info logs a non-error message with the given key/value pairs as context. + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. + Info(level int, msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. + Error(err error, msg string, keysAndValues ...interface{}) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. + WithValues(keysAndValues ...interface{}) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink +} + +// CallDepthLogSink represents a Logger that knows how to climb the call stack +// to identify the original call site and can offset the depth by a specified +// number of frames. This is useful for users who have helper functions +// between the "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as file, +// function, or line) would otherwise log information about the intermediate +// helper functions. +// +// This is an optional interface and implementations are not required to +// support it. +type CallDepthLogSink interface { + // WithCallDepth returns a LogSink that will offset the call + // stack by the specified number of frames when logging call + // site information. + // + // If depth is 0, the LogSink should skip exactly the number + // of call frames defined in RuntimeInfo.CallDepth when Info + // or Error are called, i.e. the attribution should be to the + // direct caller of Logger.Info or Logger.Error. + // + // If depth is 1 the attribution should skip 1 call frame, and so on. + // Successive calls to this are additive. + WithCallDepth(depth int) LogSink +} + +// CallStackHelperLogSink represents a Logger that knows how to climb +// the call stack to identify the original call site and can skip +// intermediate helper functions if they mark themselves as +// helper. Go's testing package uses that approach. +// +// This is useful for users who have helper functions between the +// "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as +// file, function, or line) would otherwise log information about the +// intermediate helper functions. +// +// This is an optional interface and implementations are not required +// to support it. Implementations that choose to support this must not +// simply implement it as WithCallDepth(1), because +// Logger.WithCallStackHelper will call both methods if they are +// present. This should only be implemented for LogSinks that actually +// need it, as with testing.T. +type CallStackHelperLogSink interface { + // GetCallStackHelper returns a function that must be called + // to mark the direct caller as helper function when logging + // call site information. + GetCallStackHelper() func() +} diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 0000000..3faf46d --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,4 @@ +# Minimal Go logging using logr and Go's standard library + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://godoc.org/std/log). diff --git a/vendor/github.com/go-logr/stdr/go.mod b/vendor/github.com/go-logr/stdr/go.mod new file mode 100644 index 0000000..e5bbef7 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/go.mod @@ -0,0 +1,5 @@ +module github.com/go-logr/stdr + +go 1.16 + +require github.com/go-logr/logr v1.1.0 diff --git a/vendor/github.com/go-logr/stdr/go.sum b/vendor/github.com/go-logr/stdr/go.sum new file mode 100644 index 0000000..722ae57 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/go.sum @@ -0,0 +1,2 @@ +github.com/go-logr/logr v1.1.0 h1:nAbevmWlS2Ic4m4+/An5NXkaGqlqpbBgdcuThZxnZyI= +github.com/go-logr/logr v1.1.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 0000000..93a8aab --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/github.com/godbus/dbus/.travis.yml b/vendor/github.com/godbus/dbus/.travis.yml new file mode 100644 index 0000000..2e1bbb7 --- /dev/null +++ b/vendor/github.com/godbus/dbus/.travis.yml @@ -0,0 +1,40 @@ +dist: precise +language: go +go_import_path: github.com/godbus/dbus +sudo: true + +go: + - 1.6.3 + - 1.7.3 + - tip + +env: + global: + matrix: + - TARGET=amd64 + - TARGET=arm64 + - TARGET=arm + - TARGET=386 + - TARGET=ppc64le + +matrix: + fast_finish: true + allow_failures: + - go: tip + exclude: + - go: tip + env: TARGET=arm + - go: tip + env: TARGET=arm64 + - go: tip + env: TARGET=386 + - go: tip + env: TARGET=ppc64le + +addons: + apt: + packages: + - dbus + - dbus-x11 + +before_install: diff --git a/vendor/github.com/godbus/dbus/CONTRIBUTING.md b/vendor/github.com/godbus/dbus/CONTRIBUTING.md new file mode 100644 index 0000000..c88f9b2 --- /dev/null +++ b/vendor/github.com/godbus/dbus/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# How to Contribute + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.markdown) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +