copy langext & timeext from bm
This commit is contained in:
commit
568d7bd5e3
34
.gitignore
vendored
Normal file
34
.gitignore
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
|
||||||
|
##########################################################################
|
||||||
|
|
||||||
|
.idea/**/workspace.xml
|
||||||
|
.idea/**/tasks.xml
|
||||||
|
.idea/**/usage.statistics.xml
|
||||||
|
.idea/**/dictionaries
|
||||||
|
.idea/**/shelf
|
||||||
|
.idea/**/aws.xml
|
||||||
|
.idea/**/contentModel.xml
|
||||||
|
.idea/**/dataSources/
|
||||||
|
.idea/**/dataSources.ids
|
||||||
|
.idea/**/dataSources.local.xml
|
||||||
|
.idea/**/sqlDataSources.xml
|
||||||
|
.idea/**/dynamic.xml
|
||||||
|
.idea/**/uiDesigner.xml
|
||||||
|
.idea/**/dbnavigator.xml
|
||||||
|
.idea/**/gradle.xml
|
||||||
|
.idea/**/libraries
|
||||||
|
.idea/**/mongoSettings.xml
|
||||||
|
.idea/replstate.xml
|
||||||
|
.idea/sonarlint/
|
||||||
|
.idea/httpRequests
|
||||||
|
.idea/caches/build_file_checksums.ser
|
||||||
|
.idea/**/sonarlint/
|
||||||
|
.idea/**/sonarIssues.xml
|
||||||
|
.idea/**/markdown-navigator.xml
|
||||||
|
.idea/**/markdown-navigator-enh.xml
|
||||||
|
.idea/**/markdown-navigator/
|
||||||
|
.idea/$CACHE_FILE$
|
||||||
|
.idea/codestream.xml
|
||||||
|
.idea/**/azureSettings.xml
|
||||||
|
|
||||||
|
##########################################################################
|
8
.idea/.gitignore
generated
vendored
Normal file
8
.idea/.gitignore
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# Default ignored files
|
||||||
|
/shelf/
|
||||||
|
/workspace.xml
|
||||||
|
# Editor-based HTTP Client requests
|
||||||
|
/httpRequests/
|
||||||
|
# Datasource local storage ignored files
|
||||||
|
/dataSources/
|
||||||
|
/dataSources.local.xml
|
9
.idea/goext.iml
generated
Normal file
9
.idea/goext.iml
generated
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<module type="WEB_MODULE" version="4">
|
||||||
|
<component name="Go" enabled="true" />
|
||||||
|
<component name="NewModuleRootManager">
|
||||||
|
<content url="file://$MODULE_DIR$" />
|
||||||
|
<orderEntry type="inheritedJdk" />
|
||||||
|
<orderEntry type="sourceFolder" forTests="false" />
|
||||||
|
</component>
|
||||||
|
</module>
|
11
.idea/inspectionProfiles/Project_Default.xml
generated
Normal file
11
.idea/inspectionProfiles/Project_Default.xml
generated
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
<component name="InspectionProjectProfileManager">
|
||||||
|
<profile version="1.0">
|
||||||
|
<option name="myName" value="Project Default" />
|
||||||
|
<inspection_tool class="LanguageDetectionInspection" enabled="false" level="WARNING" enabled_by_default="false" />
|
||||||
|
<inspection_tool class="SpellCheckingInspection" enabled="false" level="TYPO" enabled_by_default="false">
|
||||||
|
<option name="processCode" value="true" />
|
||||||
|
<option name="processLiterals" value="true" />
|
||||||
|
<option name="processComments" value="true" />
|
||||||
|
</inspection_tool>
|
||||||
|
</profile>
|
||||||
|
</component>
|
8
.idea/modules.xml
generated
Normal file
8
.idea/modules.xml
generated
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project version="4">
|
||||||
|
<component name="ProjectModuleManager">
|
||||||
|
<modules>
|
||||||
|
<module fileurl="file://$PROJECT_DIR$/.idea/goext.iml" filepath="$PROJECT_DIR$/.idea/goext.iml" />
|
||||||
|
</modules>
|
||||||
|
</component>
|
||||||
|
</project>
|
6
.idea/vcs.xml
generated
Normal file
6
.idea/vcs.xml
generated
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project version="4">
|
||||||
|
<component name="VcsDirectoryMappings">
|
||||||
|
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
||||||
|
</component>
|
||||||
|
</project>
|
202
LICENSE
Normal file
202
LICENSE
Normal file
@ -0,0 +1,202 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
7
README.md
Normal file
7
README.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
BFB goext library
|
||||||
|
=================
|
||||||
|
|
||||||
|
A collection of general & useful library methods
|
||||||
|
|
||||||
|
Every subfolder is a seperate dependency and can be imported individually
|
||||||
|
|
0
error/bfberror/TODO
Normal file
0
error/bfberror/TODO
Normal file
3
error/go.mod
Normal file
3
error/go.mod
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
module blackforestbytes.com/goext/error
|
||||||
|
|
||||||
|
go 1.19
|
69
gin/ginext/bindings.go
Normal file
69
gin/ginext/bindings.go
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
package ginext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bringman.de/common/shared/bmerror"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/go-playground/validator/v10"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ShouldBind(g *gin.Context, uri interface{}, query interface{}, body interface{}) error {
|
||||||
|
if uri != nil {
|
||||||
|
if err := g.ShouldBindUri(uri); err != nil {
|
||||||
|
if vErrs, ok := err.(validator.ValidationErrors); ok {
|
||||||
|
return bmerror.Wrap(vErrs, "Could not validate request parameter (uri)").
|
||||||
|
Errs("inner", convertValidationErrors(vErrs)).
|
||||||
|
WithType(bmerror.ErrQueryValidation).
|
||||||
|
WithStatuscode(http.StatusBadRequest).
|
||||||
|
Build()
|
||||||
|
} else {
|
||||||
|
return bmerror.Wrap(err, "Could not parse request parameter (uri)").
|
||||||
|
WithType(bmerror.ErrQueryParse).
|
||||||
|
WithStatuscode(http.StatusBadRequest).
|
||||||
|
Build()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query != nil {
|
||||||
|
if err := g.ShouldBindQuery(query); err != nil {
|
||||||
|
if vErrs, ok := err.(validator.ValidationErrors); ok {
|
||||||
|
return bmerror.Wrap(vErrs, "Could not validate request parameter (query)").
|
||||||
|
Errs("inner", convertValidationErrors(vErrs)).
|
||||||
|
WithType(bmerror.ErrQueryValidation).
|
||||||
|
WithStatuscode(http.StatusBadRequest).
|
||||||
|
Build()
|
||||||
|
} else {
|
||||||
|
return bmerror.Wrap(err, "Could not parse request parameter (query)").
|
||||||
|
WithType(bmerror.ErrQueryParse).
|
||||||
|
WithStatuscode(http.StatusBadRequest).
|
||||||
|
Build()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if body != nil {
|
||||||
|
if err := g.ShouldBindJSON(body); err != nil {
|
||||||
|
if vErrs, ok := err.(validator.ValidationErrors); ok {
|
||||||
|
return bmerror.Wrap(vErrs, "Could not validate request parameter (body:json)").
|
||||||
|
Errs("inner", convertValidationErrors(vErrs)).
|
||||||
|
WithType(bmerror.ErrQueryValidation).
|
||||||
|
WithStatuscode(http.StatusBadRequest).
|
||||||
|
Build()
|
||||||
|
} else {
|
||||||
|
return bmerror.Wrap(err, "Could not parse request parameter (body:json)").
|
||||||
|
WithType(bmerror.ErrQueryParse).
|
||||||
|
WithStatuscode(http.StatusBadRequest).
|
||||||
|
Build()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertValidationErrors(e validator.ValidationErrors) []error {
|
||||||
|
r := make([]error, 0, len(e))
|
||||||
|
for _, v := range e {
|
||||||
|
r = append(r, v)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
42
gin/ginext/gin.go
Normal file
42
gin/ginext/gin.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
package ginext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bringman.de/common/shared/bmerror"
|
||||||
|
"context"
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewEngine() *gin.Engine {
|
||||||
|
engine := gin.New()
|
||||||
|
|
||||||
|
engine.RedirectFixedPath = false
|
||||||
|
engine.RedirectTrailingSlash = false
|
||||||
|
|
||||||
|
engine.Use(gin.CustomRecovery(func(c *gin.Context, err interface{}) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
bmerror.
|
||||||
|
New(bmerror.ErrGinPanic, "gin request caused panic").
|
||||||
|
Interface("panic-object", err).
|
||||||
|
Stack().
|
||||||
|
GinReq(ctx, c, c.Request).
|
||||||
|
WithStatuscode(http.StatusInternalServerError).
|
||||||
|
Output(ctx, c)
|
||||||
|
}))
|
||||||
|
|
||||||
|
return engine
|
||||||
|
}
|
||||||
|
|
||||||
|
func NoRouteHandler() func(c *gin.Context) {
|
||||||
|
return func(g *gin.Context) {
|
||||||
|
bmerror.New(bmerror.ErrRouteNotFound, "Route not found").
|
||||||
|
Str("FullPath", g.FullPath()).
|
||||||
|
Str("Method", g.Request.Method).
|
||||||
|
Str("URL", g.Request.URL.String()).
|
||||||
|
Str("RequestURI", g.Request.RequestURI).
|
||||||
|
Str("Proto", g.Request.Proto).
|
||||||
|
Any("Header", g.Request.Header).
|
||||||
|
Output(context.Background(), g)
|
||||||
|
}
|
||||||
|
}
|
3
gin/go.mod
Normal file
3
gin/go.mod
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
module blackforestbytes.com/goext/gin
|
||||||
|
|
||||||
|
go 1.19
|
22
lang/cryptext/hash.go
Normal file
22
lang/cryptext/hash.go
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
package cryptext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
func StrSha256(v string) string {
|
||||||
|
h := sha256.New()
|
||||||
|
h.Write([]byte(v))
|
||||||
|
bs := h.Sum(nil)
|
||||||
|
sh := fmt.Sprintf("%x", bs)
|
||||||
|
return sh
|
||||||
|
}
|
||||||
|
|
||||||
|
func BytesSha256(v []byte) string {
|
||||||
|
h := sha256.New()
|
||||||
|
h.Write(v)
|
||||||
|
bs := h.Sum(nil)
|
||||||
|
sh := fmt.Sprintf("%x", bs)
|
||||||
|
return sh
|
||||||
|
}
|
25
lang/cryptext/hash_test.go
Normal file
25
lang/cryptext/hash_test.go
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
package cryptext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStrSha256(t *testing.T) {
|
||||||
|
assertEqual(t, StrSha256(""), "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||||
|
assertEqual(t, StrSha256("0"), "5feceb66ffc86f38d952786c6d696c79c2dbc239dd4e91b46729d73a27fb57e9")
|
||||||
|
assertEqual(t, StrSha256("80085"), "b3786e141d65638ad8a98173e26b5f6a53c927737b23ff31fb1843937250f44b")
|
||||||
|
assertEqual(t, StrSha256("Hello World"), "a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBytesSha256(t *testing.T) {
|
||||||
|
assertEqual(t, BytesSha256([]byte{}), "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
|
||||||
|
assertEqual(t, BytesSha256([]byte{0}), "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d")
|
||||||
|
assertEqual(t, BytesSha256([]byte{128}), "76be8b528d0075f7aae98d6fa57a6d3c83ae480a8469e668d7b0af968995ac71")
|
||||||
|
assertEqual(t, BytesSha256([]byte{0, 1, 2, 4, 8, 16, 32, 64, 128, 255}), "55016a318ba538e00123c736b2a8b6db368d00e7e25727547655b653e5853603")
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertEqual(t *testing.T, actual string, expected string) {
|
||||||
|
if actual != expected {
|
||||||
|
t.Errorf("values differ: Actual: '%v', Expected: '%v'", actual, expected)
|
||||||
|
}
|
||||||
|
}
|
56
lang/dataext/bufferedReadCloser.go
Normal file
56
lang/dataext/bufferedReadCloser.go
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
package dataext
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
type BufferedReadCloser interface {
|
||||||
|
io.ReadCloser
|
||||||
|
BufferedAll() ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type bufferedReadCloser struct {
|
||||||
|
buffer []byte
|
||||||
|
inner io.ReadCloser
|
||||||
|
finished bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bufferedReadCloser) Read(p []byte) (int, error) {
|
||||||
|
|
||||||
|
n, err := b.inner.Read(p)
|
||||||
|
if n > 0 {
|
||||||
|
b.buffer = append(b.buffer, p[0:n]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
b.finished = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBufferedReadCloser(sub io.ReadCloser) BufferedReadCloser {
|
||||||
|
return &bufferedReadCloser{
|
||||||
|
buffer: make([]byte, 0, 1024),
|
||||||
|
inner: sub,
|
||||||
|
finished: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bufferedReadCloser) Close() error {
|
||||||
|
err := b.inner.Close()
|
||||||
|
if err != nil {
|
||||||
|
b.finished = true
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bufferedReadCloser) BufferedAll() ([]byte, error) {
|
||||||
|
arr := make([]byte, 1024)
|
||||||
|
for !b.finished {
|
||||||
|
_, err := b.Read(arr)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.buffer, nil
|
||||||
|
}
|
151
lang/dataext/lruMap.go
Normal file
151
lang/dataext/lruMap.go
Normal file
@ -0,0 +1,151 @@
|
|||||||
|
package dataext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
//
|
||||||
|
// This is an LRU (Least-Recently-Used) cache based on a double linked list
|
||||||
|
// All the work we do below is to have a cache where we can easily remove the least-used element
|
||||||
|
// (to ensure that the cache is never bigger than maxsize items)
|
||||||
|
//
|
||||||
|
// The cache algorithm the following properties:
|
||||||
|
// - Memory footprint is O(n), with neglible overhead
|
||||||
|
// - The cache is multi-threading safe (sync.Mutex)
|
||||||
|
// - Inserts are O(1)
|
||||||
|
// - Gets are O(1)
|
||||||
|
// - Re-Shuffles are O(1) (= an element is removed on Insert because teh cache was full)
|
||||||
|
//
|
||||||
|
// There are also a bunch of unit tests to ensure that the cache is always in a consistent state
|
||||||
|
//
|
||||||
|
|
||||||
|
type LRUData interface{}
|
||||||
|
|
||||||
|
type LRUMap struct {
|
||||||
|
maxsize int
|
||||||
|
lock sync.Mutex
|
||||||
|
|
||||||
|
cache map[string]*cacheNode
|
||||||
|
|
||||||
|
lfuHead *cacheNode
|
||||||
|
lfuTail *cacheNode
|
||||||
|
}
|
||||||
|
|
||||||
|
type cacheNode struct {
|
||||||
|
key string
|
||||||
|
data LRUData
|
||||||
|
parent *cacheNode
|
||||||
|
child *cacheNode
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewLRUMap(size int) *LRUMap {
|
||||||
|
if size <= 2 && size != 0 {
|
||||||
|
panic("Size must be > 2 (or 0)")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &LRUMap{
|
||||||
|
maxsize: size,
|
||||||
|
lock: sync.Mutex{},
|
||||||
|
cache: make(map[string]*cacheNode, size+1),
|
||||||
|
lfuHead: nil,
|
||||||
|
lfuTail: nil,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *LRUMap) Put(key string, value LRUData) {
|
||||||
|
if c.maxsize == 0 {
|
||||||
|
return // cache disabled
|
||||||
|
}
|
||||||
|
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
node, exists := c.cache[key]
|
||||||
|
|
||||||
|
if exists {
|
||||||
|
// key already in data: only update LFU and value
|
||||||
|
c.moveNodeToTop(node)
|
||||||
|
node.data = value
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// key does not exist: insert into map and add to top of LFU
|
||||||
|
node = &cacheNode{
|
||||||
|
key: key,
|
||||||
|
data: value,
|
||||||
|
parent: nil,
|
||||||
|
child: c.lfuHead,
|
||||||
|
}
|
||||||
|
if c.lfuHead == nil && c.lfuTail == nil { // special case - previously the cache was empty (head == tail == nil)
|
||||||
|
c.lfuHead = node
|
||||||
|
c.lfuTail = node
|
||||||
|
} else {
|
||||||
|
c.lfuHead = node
|
||||||
|
node.child.parent = node
|
||||||
|
}
|
||||||
|
c.cache[key] = node
|
||||||
|
|
||||||
|
if len(c.cache) > c.maxsize { // maxsize is always > 2
|
||||||
|
tail := c.lfuTail
|
||||||
|
delete(c.cache, tail.key)
|
||||||
|
c.lfuTail = tail.parent
|
||||||
|
c.lfuTail.child = nil
|
||||||
|
tail.parent = nil
|
||||||
|
tail.child = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *LRUMap) TryGet(key string) (LRUData, bool) {
|
||||||
|
if c.maxsize == 0 {
|
||||||
|
return nil, false // cache disabled
|
||||||
|
}
|
||||||
|
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
val, ok := c.cache[key]
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
c.moveNodeToTop(val)
|
||||||
|
return val.data, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *LRUMap) moveNodeToTop(node *cacheNode) {
|
||||||
|
// (only called in critical section !)
|
||||||
|
|
||||||
|
if c.lfuHead == node { // fast case
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 1 unlink
|
||||||
|
|
||||||
|
parent := node.parent
|
||||||
|
child := node.child
|
||||||
|
if parent != nil {
|
||||||
|
parent.child = child
|
||||||
|
}
|
||||||
|
if child != nil {
|
||||||
|
child.parent = parent
|
||||||
|
}
|
||||||
|
if node == c.lfuHead {
|
||||||
|
c.lfuHead = node.child
|
||||||
|
}
|
||||||
|
if node == c.lfuTail {
|
||||||
|
c.lfuTail = node.parent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 2 re-insert at top
|
||||||
|
node.parent = nil
|
||||||
|
node.child = c.lfuHead
|
||||||
|
c.lfuHead = node
|
||||||
|
if node.child != nil {
|
||||||
|
node.child.parent = node
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *LRUMap) Size() int {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
return len(c.cache)
|
||||||
|
}
|
269
lang/dataext/lruMap_test.go
Normal file
269
lang/dataext/lruMap_test.go
Normal file
@ -0,0 +1,269 @@
|
|||||||
|
package dataext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||||
|
"math/rand"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rand.Seed(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResultCache1(t *testing.T) {
|
||||||
|
cache := NewLRUMap(8)
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
key := randomKey()
|
||||||
|
val := randomVal()
|
||||||
|
|
||||||
|
if cache.Size() != 0 {
|
||||||
|
t.Errorf("cache size expected == 0, actual == %v", cache.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := cache.TryGet(key); ok {
|
||||||
|
t.Errorf("empty cache TryGet returned value")
|
||||||
|
}
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
cache.Put(key, val)
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
if cache.Size() != 1 {
|
||||||
|
t.Errorf("cache size expected == 1, actual == %v", cache.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheval, ok := cache.TryGet(key)
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("cache TryGet returned no value")
|
||||||
|
}
|
||||||
|
if !eq(cacheval, val) {
|
||||||
|
t.Errorf("cache TryGet returned different value (%+v <> %+v)", cacheval, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := cache.TryGet(randomKey()); ok {
|
||||||
|
t.Errorf("cache TryGet returned a value for non-existant key")
|
||||||
|
}
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResultCache2(t *testing.T) {
|
||||||
|
cache := NewLRUMap(8)
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
key1 := "key1"
|
||||||
|
val1 := randomVal()
|
||||||
|
cache.Put(key1, val1)
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
key2 := "key2"
|
||||||
|
val2 := randomVal()
|
||||||
|
cache.Put(key2, val2)
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
key3 := "key3"
|
||||||
|
val3 := randomVal()
|
||||||
|
cache.Put(key3, val3)
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
key4 := "key4"
|
||||||
|
val4 := randomVal()
|
||||||
|
cache.Put(key4, val4)
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
if _, ok := cache.TryGet(key1); !ok {
|
||||||
|
t.Errorf("cache TryGet returned no value")
|
||||||
|
}
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
if _, ok := cache.TryGet(key2); !ok {
|
||||||
|
t.Errorf("cache TryGet returned no value")
|
||||||
|
}
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
if _, ok := cache.TryGet(key3); !ok {
|
||||||
|
t.Errorf("cache TryGet returned no value")
|
||||||
|
}
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
if _, ok := cache.TryGet(key4); !ok {
|
||||||
|
t.Errorf("cache TryGet returned no value")
|
||||||
|
}
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
if _, ok := cache.TryGet(randomKey()); ok {
|
||||||
|
t.Errorf("cache TryGet returned a value for non-existant key")
|
||||||
|
}
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
if cache.Size() != 4 {
|
||||||
|
t.Errorf("cache size expected == 4, actual == %v", cache.Size())
|
||||||
|
}
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
cache.Put(key4, val4) // same key again
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
if cache.Size() != 4 {
|
||||||
|
t.Errorf("cache size expected == 4, actual == %v", cache.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.Put(randomKey(), randomVal())
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
cache.Put(randomKey(), randomVal())
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
cache.Put(randomKey(), randomVal())
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
cache.Put(randomKey(), randomVal())
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
if cache.Size() != 8 {
|
||||||
|
t.Errorf("cache size expected == 8, actual == %v", cache.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.Put(randomKey(), randomVal()) // drops key1
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
if cache.Size() != 8 {
|
||||||
|
t.Errorf("cache size expected == 8, actual == %v", cache.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := cache.TryGet(key1); ok {
|
||||||
|
t.Errorf("[key1] should be dropped from cache")
|
||||||
|
}
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
if _, ok := cache.TryGet(key2); !ok { // moves key2 to most-recently used
|
||||||
|
t.Errorf("[key2] should still be in cache")
|
||||||
|
}
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
cache.Put(randomKey(), randomVal()) // drops key3
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
if cache.Size() != 8 {
|
||||||
|
t.Errorf("cache size expected == 8, actual == %v", cache.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := cache.TryGet(key3); ok {
|
||||||
|
t.Errorf("[key3] should be dropped from cache")
|
||||||
|
}
|
||||||
|
if _, ok := cache.TryGet(key2); !ok {
|
||||||
|
t.Errorf("[key2] should still be in cache")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResultCache3(t *testing.T) {
|
||||||
|
cache := NewLRUMap(8)
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
key1 := "key1"
|
||||||
|
val1 := randomVal()
|
||||||
|
val2 := randomVal()
|
||||||
|
|
||||||
|
cache.Put(key1, val1)
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
if val, ok := cache.TryGet(key1); !ok || !eq(val, val1) {
|
||||||
|
t.Errorf("Value in cache should be [val1]")
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.Put(key1, val2)
|
||||||
|
verifyLRUList(cache, t)
|
||||||
|
|
||||||
|
if val, ok := cache.TryGet(key1); !ok || !eq(val, val2) {
|
||||||
|
t.Errorf("Value in cache should be [val2]")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// does a basic consistency check over the internal cache representation
|
||||||
|
func verifyLRUList(cache *LRUMap, t *testing.T) {
|
||||||
|
size := 0
|
||||||
|
|
||||||
|
tailFound := false
|
||||||
|
headFound := false
|
||||||
|
curr := cache.lfuHead
|
||||||
|
for curr != nil {
|
||||||
|
size++
|
||||||
|
|
||||||
|
if curr.parent == nil {
|
||||||
|
headFound = true
|
||||||
|
if curr != cache.lfuHead {
|
||||||
|
t.Errorf("head != lfuHead")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if curr.child == nil {
|
||||||
|
tailFound = true
|
||||||
|
if curr != cache.lfuTail {
|
||||||
|
t.Errorf("tail != lfuTail")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if curr.child != nil {
|
||||||
|
if curr.child.parent != curr {
|
||||||
|
t.Errorf("error in child <-> parent link")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if curr.parent != nil {
|
||||||
|
if curr.parent.child != curr {
|
||||||
|
t.Errorf("error in parent <-> child link")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
curr = curr.child
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.Size() > 0 && cache.lfuHead == nil {
|
||||||
|
t.Errorf("no head in cache")
|
||||||
|
}
|
||||||
|
if cache.Size() > 0 && cache.lfuTail == nil {
|
||||||
|
t.Errorf("no tail in cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.Size() == 0 && cache.lfuHead != nil {
|
||||||
|
t.Errorf("dangling head in cache")
|
||||||
|
}
|
||||||
|
if cache.Size() == 0 && cache.lfuTail != nil {
|
||||||
|
t.Errorf("dangling tail in cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.Size() > 0 && !headFound {
|
||||||
|
t.Errorf("head not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.Size() > 0 && !tailFound {
|
||||||
|
t.Errorf("tail not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
if size != cache.Size() {
|
||||||
|
t.Errorf("error size mismatch (%v <> %v)", size, cache.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.Size() > cache.maxsize {
|
||||||
|
t.Errorf("too many items: %v", cache.Size())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func randomKey() string {
|
||||||
|
return strconv.FormatInt(rand.Int63(), 16)
|
||||||
|
}
|
||||||
|
|
||||||
|
func randomVal() LRUData {
|
||||||
|
v := primitive.NewObjectID()
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
func eq(a LRUData, b LRUData) bool {
|
||||||
|
v1, ok1 := a.(*primitive.ObjectID)
|
||||||
|
v2, ok2 := b.(*primitive.ObjectID)
|
||||||
|
if ok1 && ok2 {
|
||||||
|
if v1 == nil || v2 == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return v1.Hex() == v2.Hex()
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
65
lang/dataext/syncSet.go
Normal file
65
lang/dataext/syncSet.go
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
package dataext
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
type SyncStringSet struct {
|
||||||
|
data map[string]bool
|
||||||
|
lock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SyncStringSet) Add(value string) bool {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
if s.data == nil {
|
||||||
|
s.data = make(map[string]bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok := s.data[value]
|
||||||
|
s.data[value] = true
|
||||||
|
|
||||||
|
return !ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SyncStringSet) AddAll(values []string) {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
if s.data == nil {
|
||||||
|
s.data = make(map[string]bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, value := range values {
|
||||||
|
s.data[value] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SyncStringSet) Contains(value string) bool {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
if s.data == nil {
|
||||||
|
s.data = make(map[string]bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok := s.data[value]
|
||||||
|
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SyncStringSet) Get() []string {
|
||||||
|
s.lock.Lock()
|
||||||
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
|
if s.data == nil {
|
||||||
|
s.data = make(map[string]bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
r := make([]string, 0, len(s.data))
|
||||||
|
|
||||||
|
for k := range s.data {
|
||||||
|
r = append(r, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
return r
|
||||||
|
}
|
3
lang/go.mod
Normal file
3
lang/go.mod
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
module blackforestbytes.com/goext/lang
|
||||||
|
|
||||||
|
go 1.19
|
134
lang/langext/array.go
Normal file
134
lang/langext/array.go
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
package langext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ForceArray[T any](v []T) []T {
|
||||||
|
if v == nil {
|
||||||
|
return make([]T, 0)
|
||||||
|
} else {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReverseArray[T any](v []T) {
|
||||||
|
for i, j := 0, len(v)-1; i < j; i, j = i+1, j-1 {
|
||||||
|
v[i], v[j] = v[j], v[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func InArray[T comparable](needle T, haystack []T) bool {
|
||||||
|
for _, v := range haystack {
|
||||||
|
if v == needle {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func ArrUnique[T comparable](array []T) []T {
|
||||||
|
m := make(map[T]bool, len(array))
|
||||||
|
for _, v := range array {
|
||||||
|
m[v] = true
|
||||||
|
}
|
||||||
|
result := make([]T, 0, len(m))
|
||||||
|
for v := range m {
|
||||||
|
result = append(result, v)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func ArrEqualsExact[T comparable](arr1 []T, arr2 []T) bool {
|
||||||
|
if len(arr1) != len(arr2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range arr1 {
|
||||||
|
if arr1[i] != arr2[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func ArrAll(arr interface{}, fn func(int) bool) bool {
|
||||||
|
av := reflect.ValueOf(arr)
|
||||||
|
for i := 0; i < av.Len(); i++ {
|
||||||
|
if !fn(i) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func ArrAllErr(arr interface{}, fn func(int) (bool, error)) (bool, error) {
|
||||||
|
av := reflect.ValueOf(arr)
|
||||||
|
for i := 0; i < av.Len(); i++ {
|
||||||
|
v, err := fn(i)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if !v {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ArrNone(arr interface{}, fn func(int) bool) bool {
|
||||||
|
av := reflect.ValueOf(arr)
|
||||||
|
for i := 0; i < av.Len(); i++ {
|
||||||
|
if fn(i) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func ArrNoneErr(arr interface{}, fn func(int) (bool, error)) (bool, error) {
|
||||||
|
av := reflect.ValueOf(arr)
|
||||||
|
for i := 0; i < av.Len(); i++ {
|
||||||
|
v, err := fn(i)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if v {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ArrAny(arr interface{}, fn func(int) bool) bool {
|
||||||
|
av := reflect.ValueOf(arr)
|
||||||
|
for i := 0; i < av.Len(); i++ {
|
||||||
|
if fn(i) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func ArrAnyErr(arr interface{}, fn func(int) (bool, error)) (bool, error) {
|
||||||
|
av := reflect.ValueOf(arr)
|
||||||
|
for i := 0; i < av.Len(); i++ {
|
||||||
|
v, err := fn(i)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if v {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func AddToSet[T comparable](set []T, add T) []T {
|
||||||
|
for _, v := range set {
|
||||||
|
if v == add {
|
||||||
|
return set
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return append(set, add)
|
||||||
|
}
|
16
lang/langext/bytes.go
Normal file
16
lang/langext/bytes.go
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
package langext
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
func FormatBytesToSI(b uint64) string {
|
||||||
|
const unit = 1000
|
||||||
|
if b < unit {
|
||||||
|
return fmt.Sprintf("%d B", b)
|
||||||
|
}
|
||||||
|
div, exp := uint64(unit), 0
|
||||||
|
for n := b / unit; n >= unit; n /= unit {
|
||||||
|
div *= unit
|
||||||
|
exp++
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "kMGTPE"[exp])
|
||||||
|
}
|
62
lang/langext/coalesce.go
Normal file
62
lang/langext/coalesce.go
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
package langext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Coalesce[T any](v *T, def T) T {
|
||||||
|
if v == nil {
|
||||||
|
return def
|
||||||
|
} else {
|
||||||
|
return *v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CoalesceString(s *string, def string) string {
|
||||||
|
if s == nil {
|
||||||
|
return def
|
||||||
|
} else {
|
||||||
|
return *s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CoalesceInt(i *int, def int) int {
|
||||||
|
if i == nil {
|
||||||
|
return def
|
||||||
|
} else {
|
||||||
|
return *i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CoalesceInt32(i *int32, def int32) int32 {
|
||||||
|
if i == nil {
|
||||||
|
return def
|
||||||
|
} else {
|
||||||
|
return *i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CoalesceBool(b *bool, def bool) bool {
|
||||||
|
if b == nil {
|
||||||
|
return def
|
||||||
|
} else {
|
||||||
|
return *b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CoalesceTime(t *time.Time, def time.Time) time.Time {
|
||||||
|
if t == nil {
|
||||||
|
return def
|
||||||
|
} else {
|
||||||
|
return *t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CoalesceStringer(s fmt.Stringer, def string) string {
|
||||||
|
if IsNil(s) {
|
||||||
|
return def
|
||||||
|
} else {
|
||||||
|
return s.String()
|
||||||
|
}
|
||||||
|
}
|
32
lang/langext/compare.go
Normal file
32
lang/langext/compare.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
package langext
|
||||||
|
|
||||||
|
func CompareIntArr(arr1 []int, arr2 []int) bool {
|
||||||
|
|
||||||
|
for i := 0; i < len(arr1) || i < len(arr2); i++ {
|
||||||
|
|
||||||
|
if i < len(arr1) && i < len(arr2) {
|
||||||
|
|
||||||
|
if arr1[i] < arr2[i] {
|
||||||
|
return true
|
||||||
|
} else if arr1[i] > arr2[i] {
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if i < len(arr1) {
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
} else { // if i < len(arr2)
|
||||||
|
|
||||||
|
return false
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
36
lang/langext/pointer.go
Normal file
36
lang/langext/pointer.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
package langext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Ptr[T any](v T) *T {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
func PtrInt32(v int32) *int32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
func PtrInt64(v int64) *int64 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
func PtrFloat32(v float32) *float32 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
func PtrFloat64(v float64) *float64 {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsNil(i interface{}) bool {
|
||||||
|
if i == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
switch reflect.TypeOf(i).Kind() {
|
||||||
|
case reflect.Ptr, reflect.Map, reflect.Array, reflect.Chan, reflect.Slice:
|
||||||
|
return reflect.ValueOf(i).IsNil()
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
63
lang/langext/string.go
Normal file
63
lang/langext/string.go
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
package langext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func StrLimit(val string, maxlen int, suffix string) string {
|
||||||
|
if len(val) > maxlen {
|
||||||
|
return val[0:maxlen-len(suffix)] + suffix
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
func StrSplit(val string, sep string, allowEmpty bool) []string {
|
||||||
|
var arr []string
|
||||||
|
for _, k := range strings.Split(val, sep) {
|
||||||
|
if allowEmpty || k != "" {
|
||||||
|
arr = append(arr, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return arr
|
||||||
|
}
|
||||||
|
|
||||||
|
func StrPadRight(str string, pad string, padlen int) string {
|
||||||
|
if pad == "" {
|
||||||
|
pad = " "
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(str) >= padlen {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
return str + strings.Repeat(pad, padlen-len(str))[0:(padlen-len(str))]
|
||||||
|
}
|
||||||
|
|
||||||
|
func StrPadLeft(str string, pad string, padlen int) string {
|
||||||
|
if pad == "" {
|
||||||
|
pad = " "
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(str) >= padlen {
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Repeat(pad, padlen-len(str))[0:(padlen-len(str))] + str
|
||||||
|
}
|
||||||
|
|
||||||
|
func DeRefStringer(v fmt.Stringer) *string {
|
||||||
|
if v == nil {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return Ptr(v.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConvertStringerArray[T fmt.Stringer](inarr []T) []string {
|
||||||
|
result := make([]string, 0, len(inarr))
|
||||||
|
for _, v := range inarr {
|
||||||
|
result = append(result, v.String())
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
41
lang/mathext/clamp.go
Normal file
41
lang/mathext/clamp.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package mathext
|
||||||
|
|
||||||
|
func ClampInt(v int, lo int, hi int) int {
|
||||||
|
if v < lo {
|
||||||
|
return lo
|
||||||
|
} else if v > hi {
|
||||||
|
return hi
|
||||||
|
} else {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ClampInt32(v int32, lo int32, hi int32) int32 {
|
||||||
|
if v < lo {
|
||||||
|
return lo
|
||||||
|
} else if v > hi {
|
||||||
|
return hi
|
||||||
|
} else {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ClampFloat32(v float32, lo float32, hi float32) float32 {
|
||||||
|
if v < lo {
|
||||||
|
return lo
|
||||||
|
} else if v > hi {
|
||||||
|
return hi
|
||||||
|
} else {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ClampFloat64(v float64, lo float64, hi float64) float64 {
|
||||||
|
if v < lo {
|
||||||
|
return lo
|
||||||
|
} else if v > hi {
|
||||||
|
return hi
|
||||||
|
} else {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
}
|
7
lang/mathext/float.go
Normal file
7
lang/mathext/float.go
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
package mathext
|
||||||
|
|
||||||
|
import "math"
|
||||||
|
|
||||||
|
func Float64EpsilonEq(v1 float64, v2 float64, eps float64) bool {
|
||||||
|
return math.Abs(v1-v2) <= eps
|
||||||
|
}
|
13
lang/mathext/math.go
Normal file
13
lang/mathext/math.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
package mathext
|
||||||
|
|
||||||
|
func AvgFloat64(arr []float64) float64 {
|
||||||
|
return SumFloat64(arr) / float64(len(arr))
|
||||||
|
}
|
||||||
|
|
||||||
|
func SumFloat64(arr []float64) float64 {
|
||||||
|
sum := 0.0
|
||||||
|
for _, v := range arr {
|
||||||
|
sum += v
|
||||||
|
}
|
||||||
|
return sum
|
||||||
|
}
|
41
lang/mathext/statistics.go
Normal file
41
lang/mathext/statistics.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package mathext
|
||||||
|
|
||||||
|
func Sum(v []float64) float64 {
|
||||||
|
total := float64(0)
|
||||||
|
for _, v := range v {
|
||||||
|
total += v
|
||||||
|
}
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
|
||||||
|
func Mean(v []float64) float64 {
|
||||||
|
return Sum(v) / float64(len(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Median(v []float64) float64 {
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
return v[len(v)/2]
|
||||||
|
} else {
|
||||||
|
return (v[len(v)/2-1] + v[len(v)/2]) / float64(2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Min(v []float64) float64 {
|
||||||
|
r := v[0]
|
||||||
|
for _, val := range v {
|
||||||
|
if val < r {
|
||||||
|
r = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func Max(v []float64) float64 {
|
||||||
|
r := v[0]
|
||||||
|
for _, val := range v {
|
||||||
|
if val > r {
|
||||||
|
r = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
55
lang/timeext/duration.go
Normal file
55
lang/timeext/duration.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
package timeext
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
func FromSeconds(v int) time.Duration {
|
||||||
|
return time.Duration(int64(v) * int64(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromSecondsInt32(v int32) time.Duration {
|
||||||
|
return time.Duration(int64(v) * int64(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromSecondsInt64(v int64) time.Duration {
|
||||||
|
return time.Duration(v * int64(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromSecondsFloat32(v float32) time.Duration {
|
||||||
|
return time.Duration(int64(v * float32(time.Second)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromSecondsFloat64(v float64) time.Duration {
|
||||||
|
return time.Duration(int64(v * float64(time.Second)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromSecondsFloat(v float64) time.Duration {
|
||||||
|
return time.Duration(int64(v * float64(time.Second)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromMinutes(v int) time.Duration {
|
||||||
|
return time.Duration(int64(v) * int64(time.Minute))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromMinutesFloat(v float64) time.Duration {
|
||||||
|
return time.Duration(int64(v * float64(time.Minute)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromMinutesFloat64(v float64) time.Duration {
|
||||||
|
return time.Duration(int64(v * float64(time.Minute)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromHoursFloat64(v float64) time.Duration {
|
||||||
|
return time.Duration(int64(v * float64(time.Hour)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromDays(v int) time.Duration {
|
||||||
|
return time.Duration(int64(v) * int64(24) * int64(time.Hour))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromMilliseconds(v int) time.Duration {
|
||||||
|
return time.Duration(int64(v) * int64(time.Millisecond))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FromMillisecondsFloat(v float64) time.Duration {
|
||||||
|
return time.Duration(int64(v * float64(time.Millisecond)))
|
||||||
|
}
|
77
lang/timeext/range.go
Normal file
77
lang/timeext/range.go
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
package timeext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OpenTimeRange struct {
|
||||||
|
From *time.Time
|
||||||
|
To *time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r OpenTimeRange) String() string {
|
||||||
|
if r.From == nil && r.To == nil {
|
||||||
|
return "[]"
|
||||||
|
} else if r.From != nil && r.To != nil {
|
||||||
|
return fmt.Sprintf("[%v - %v]", r.From, r.To)
|
||||||
|
} else if r.From != nil {
|
||||||
|
return fmt.Sprintf("[%v - *]", r.From)
|
||||||
|
} else if r.To != nil {
|
||||||
|
return fmt.Sprintf("[* - %v]", r.To)
|
||||||
|
} else {
|
||||||
|
return "[??]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r OpenTimeRange) Contains(v time.Time) bool {
|
||||||
|
if r.From != nil && v.Before(*r.From) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if r.To != nil && !v.Before(*r.To) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewOpenTimeRange(from *time.Time, to *time.Time) *OpenTimeRange {
|
||||||
|
if from == nil && to == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &OpenTimeRange{
|
||||||
|
From: from,
|
||||||
|
To: to,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r OpenTimeRange) ToMongoPipeline(key string) []interface{} {
|
||||||
|
type bsonM map[string]interface{}
|
||||||
|
type bsonE struct {
|
||||||
|
Key string
|
||||||
|
Value interface{}
|
||||||
|
}
|
||||||
|
type bsonD []bsonE
|
||||||
|
|
||||||
|
pipeline := make([]interface{}, 0)
|
||||||
|
|
||||||
|
if r.From != nil {
|
||||||
|
pipeline = append(pipeline, bsonD{{Key: "$match", Value: bsonM{key: bsonM{"$ne": nil, "$gt": r.From}}}})
|
||||||
|
}
|
||||||
|
if r.To != nil {
|
||||||
|
pipeline = append(pipeline, bsonD{{Key: "$match", Value: bsonM{key: bsonM{"$ne": nil, "$lt": r.To}}}})
|
||||||
|
}
|
||||||
|
|
||||||
|
return pipeline
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *OpenTimeRange) AppendToMongoPipeline(pipeline []interface{}, key string) []interface{} {
|
||||||
|
if r == nil {
|
||||||
|
return pipeline
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range r.ToMongoPipeline(key) {
|
||||||
|
pipeline = append(pipeline, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pipeline
|
||||||
|
}
|
127
lang/timeext/time.go
Normal file
127
lang/timeext/time.go
Normal file
@ -0,0 +1,127 @@
|
|||||||
|
package timeext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var TimezoneBerlin *time.Location
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
var err error
|
||||||
|
TimezoneBerlin, err = time.LoadLocation("Europe/Berlin")
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("Could not load Timezone: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeToDatePart returns a timestamp at the start of the day which contains t (= 00:00:00)
|
||||||
|
func TimeToDatePart(t time.Time) time.Time {
|
||||||
|
t = t.In(TimezoneBerlin)
|
||||||
|
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeToWeekStart returns a timestamp at the start of the week which contains t (= Monday 00:00:00)
|
||||||
|
func TimeToWeekStart(t time.Time) time.Time {
|
||||||
|
t = TimeToDatePart(t)
|
||||||
|
|
||||||
|
delta := time.Duration(((int64(t.Weekday()) + 6) % 7) * 24 * int64(time.Hour))
|
||||||
|
t = t.Add(-1 * delta)
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeToMonthStart returns a timestamp at the start of the month which contains t (= yyyy-MM-00 00:00:00)
|
||||||
|
func TimeToMonthStart(t time.Time) time.Time {
|
||||||
|
t = t.In(TimezoneBerlin)
|
||||||
|
return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeToMonthEnd returns a timestamp at the end of the month which contains t (= yyyy-MM-31 23:59:59.999999999)
|
||||||
|
func TimeToMonthEnd(t time.Time) time.Time {
|
||||||
|
return TimeToMonthStart(t).AddDate(0, 1, 0).Add(-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeToYearStart returns a timestamp at the start of the year which contains t (= yyyy-01-01 00:00:00)
|
||||||
|
func TimeToYearStart(t time.Time) time.Time {
|
||||||
|
t = t.In(TimezoneBerlin)
|
||||||
|
return time.Date(t.Year(), 1, 1, 0, 0, 0, 0, t.Location())
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeToYearEnd returns a timestamp at the end of the month which contains t (= yyyy-12-31 23:59:59.999999999)
|
||||||
|
func TimeToYearEnd(t time.Time) time.Time {
|
||||||
|
return TimeToYearStart(t).AddDate(1, 0, 0).Add(-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSameDayIncludingDateBoundaries returns true if t1 and t2 are part of the same day (TZ/Berlin), the boundaries of the day are
|
||||||
|
// inclusive, this means 2021-09-15T00:00:00 is still part of the day 2021-09-14
|
||||||
|
func IsSameDayIncludingDateBoundaries(t1 time.Time, t2 time.Time) bool {
|
||||||
|
dp1 := TimeToDatePart(t1)
|
||||||
|
dp2 := TimeToDatePart(t2)
|
||||||
|
|
||||||
|
if dp1.Equal(dp2) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if dp1.AddDate(0, 0, 1).Equal(dp2) && dp2.Equal(t2) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDatePartEqual returns true if a and b have the same date part (`yyyy`, `MM` and `dd`)
|
||||||
|
func IsDatePartEqual(a time.Time, b time.Time) bool {
|
||||||
|
yy1, mm1, dd1 := a.In(TimezoneBerlin).Date()
|
||||||
|
yy2, mm2, dd2 := b.In(TimezoneBerlin).Date()
|
||||||
|
|
||||||
|
return yy1 == yy2 && mm1 == mm2 && dd1 == dd2
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithTimePart returns a timestamp with the date-part (`yyyy`, `MM`, `dd`) from base
|
||||||
|
// and the time (`HH`, `mm`, `ss`) from the parameter
|
||||||
|
func WithTimePart(base time.Time, hour, minute, second int) time.Time {
|
||||||
|
datepart := TimeToDatePart(base)
|
||||||
|
|
||||||
|
delta := time.Duration(hour*int(time.Hour) + minute*int(time.Minute) + second*int(time.Second))
|
||||||
|
|
||||||
|
return datepart.Add(delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CombineDateAndTime returns a timestamp with the date-part (`yyyy`, `MM`, `dd`) from the d parameter
|
||||||
|
// and the time (`HH`, `mm`, `ss`) from the t parameter
|
||||||
|
func CombineDateAndTime(d time.Time, t time.Time) time.Time {
|
||||||
|
datepart := TimeToDatePart(d)
|
||||||
|
|
||||||
|
delta := time.Duration(t.Hour()*int(time.Hour) + t.Minute()*int(time.Minute) + t.Second()*int(time.Second) + t.Nanosecond()*int(time.Nanosecond))
|
||||||
|
|
||||||
|
return datepart.Add(delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSunday returns true if t is a sunday (in TZ/Berlin)
|
||||||
|
func IsSunday(t time.Time) bool {
|
||||||
|
if t.In(TimezoneBerlin).Weekday() == time.Sunday {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func DurationFromTime(hours int, minutes int, seconds int) time.Duration {
|
||||||
|
return time.Duration(hours*int(time.Hour) + minutes*int(time.Minute) + seconds*int(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Min(a time.Time, b time.Time) time.Time {
|
||||||
|
if a.UnixNano() < b.UnixNano() {
|
||||||
|
return a
|
||||||
|
} else {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Max(a time.Time, b time.Time) time.Time {
|
||||||
|
if a.UnixNano() > b.UnixNano() {
|
||||||
|
return a
|
||||||
|
} else {
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
}
|
23
lang/timeext/translation.go
Normal file
23
lang/timeext/translation.go
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package timeext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var longDayNames = []string{
|
||||||
|
"Sonntag",
|
||||||
|
"Montag",
|
||||||
|
"Dienstag",
|
||||||
|
"Mittwoch", // meine Kerle
|
||||||
|
"Donnerstag",
|
||||||
|
"Freitag",
|
||||||
|
"Samstag",
|
||||||
|
}
|
||||||
|
|
||||||
|
func WeekdayNameGerman(d time.Weekday) string {
|
||||||
|
if time.Sunday <= d && d <= time.Saturday {
|
||||||
|
return longDayNames[d]
|
||||||
|
}
|
||||||
|
return "%!Weekday(" + strconv.Itoa(int(d)) + ")"
|
||||||
|
}
|
72
lang/timeext/weekcount.go
Normal file
72
lang/timeext/weekcount.go
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
package timeext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var isoWeekCountAggregate map[int]int
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
isoWeekCountAggregate = make(map[int]int)
|
||||||
|
for y := 1900; y <= time.Now().Year(); y++ {
|
||||||
|
GetAggregateIsoWeekCount(y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetAggregateIsoWeekCount(year int) int {
|
||||||
|
if v, ok := isoWeekCountAggregate[year]; ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
if year == 1900 {
|
||||||
|
isoWeekCountAggregate[year] = 0
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if year < 1900 {
|
||||||
|
s := 0
|
||||||
|
for yy := year; yy < 1900; yy++ {
|
||||||
|
s += GetIsoWeekCount(yy)
|
||||||
|
}
|
||||||
|
w := -s
|
||||||
|
isoWeekCountAggregate[year] = w
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
w := GetIsoWeekCount(year)
|
||||||
|
|
||||||
|
w += GetAggregateIsoWeekCount(year - 1)
|
||||||
|
|
||||||
|
isoWeekCountAggregate[year] = w
|
||||||
|
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetIsoWeekCount(year int) int {
|
||||||
|
_, w1 := time.Date(year+0, 12, 27, 0, 0, 0, 0, TimezoneBerlin).ISOWeek()
|
||||||
|
_, w2 := time.Date(year+0, 12, 31, 0, 0, 0, 0, TimezoneBerlin).ISOWeek()
|
||||||
|
_, w3 := time.Date(year+1, 1, 4, 0, 0, 0, 0, TimezoneBerlin).ISOWeek()
|
||||||
|
|
||||||
|
w1 -= 1
|
||||||
|
w2 -= 1
|
||||||
|
w3 -= 1
|
||||||
|
|
||||||
|
w := w1
|
||||||
|
if w2 > w {
|
||||||
|
w = w2
|
||||||
|
}
|
||||||
|
if w3 > w {
|
||||||
|
w = w3
|
||||||
|
}
|
||||||
|
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetGlobalWeeknumber(t time.Time) int {
|
||||||
|
y, w := t.ISOWeek()
|
||||||
|
w -= 1
|
||||||
|
if y <= 1900 {
|
||||||
|
w -= 1
|
||||||
|
}
|
||||||
|
return GetAggregateIsoWeekCount(y-1) + w
|
||||||
|
}
|
89
lang/totpext/totp.go
Normal file
89
lang/totpext/totp.go
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
package totpext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/base32"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// https://datatracker.ietf.org/doc/html/rfc6238
|
||||||
|
// https://datatracker.ietf.org/doc/html/rfc4226
|
||||||
|
// https://datatracker.ietf.org/doc/html/rfc2104
|
||||||
|
|
||||||
|
// https://en.wikipedia.org/wiki/Universal_2nd_Factor
|
||||||
|
// https://en.wikipedia.org/wiki/HMAC-based_one-time_password
|
||||||
|
// https://en.wikipedia.org/wiki/HMAC
|
||||||
|
|
||||||
|
func TOTP(key []byte) string {
|
||||||
|
t := time.Now().Unix() / 30
|
||||||
|
return generateTOTP(sha1.New, key, t, 6)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Validate(key []byte, totp string) bool {
|
||||||
|
t := time.Now().Unix() / 30
|
||||||
|
|
||||||
|
if generateTOTP(sha1.New, key, t, 6) == totp {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if generateTOTP(sha1.New, key, t-1, 6) == totp {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if generateTOTP(sha1.New, key, t+1, 6) == totp {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateSecret() ([]byte, error) {
|
||||||
|
secret := make([]byte, 20)
|
||||||
|
_, err := rand.Read(secret)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return secret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateTOTP(algo func() hash.Hash, secret []byte, time int64, returnDigits int) string {
|
||||||
|
msg := make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint64(msg, uint64(time))
|
||||||
|
|
||||||
|
mac := hmac.New(algo, secret)
|
||||||
|
mac.Write(msg)
|
||||||
|
hmacResult := mac.Sum(nil)
|
||||||
|
|
||||||
|
offsetBits := hmacResult[len(hmacResult)-1] & 0x0F
|
||||||
|
|
||||||
|
p := hmacResult[offsetBits : offsetBits+4]
|
||||||
|
|
||||||
|
truncated := binary.BigEndian.Uint32(p) & 0x7FFFFFFF // Last 31 bits
|
||||||
|
|
||||||
|
val := strconv.Itoa(int(truncated))
|
||||||
|
for len(val) < returnDigits {
|
||||||
|
val = "0" + val
|
||||||
|
}
|
||||||
|
val = val[len(val)-returnDigits:]
|
||||||
|
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateOTPAuth(ccn string, key []byte, accountmail string, issuer string) string {
|
||||||
|
|
||||||
|
return fmt.Sprintf("otpauth://totp/%v:%v?secret=%v&issuer=%v&algorithm=%v&period=%v&digits=%v",
|
||||||
|
ccn,
|
||||||
|
url.QueryEscape(accountmail),
|
||||||
|
base32.StdEncoding.EncodeToString(key),
|
||||||
|
issuer,
|
||||||
|
"SHA1",
|
||||||
|
"30",
|
||||||
|
"6")
|
||||||
|
}
|
182
lang/zipext/zip.go
Normal file
182
lang/zipext/zip.go
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
package zipext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"archive/zip"
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"compress/flate"
|
||||||
|
"compress/gzip"
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errAlreadyClosed = errors.New("already closed")
|
||||||
|
var errZipNotEnabled = errors.New("zip not enabled")
|
||||||
|
var errTgzNotEnabled = errors.New("tgz not enabled")
|
||||||
|
|
||||||
|
type MemoryZip struct {
|
||||||
|
zipEnabled bool
|
||||||
|
zipbuffer *bytes.Buffer
|
||||||
|
zipwriter *zip.Writer
|
||||||
|
|
||||||
|
tarEnabled bool
|
||||||
|
tarbuffer *bytes.Buffer
|
||||||
|
tarwriter *tar.Writer
|
||||||
|
|
||||||
|
open bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMemoryZip(enableGZip, enableTarGZ bool) *MemoryZip {
|
||||||
|
|
||||||
|
var bz *bytes.Buffer = nil
|
||||||
|
var z *zip.Writer = nil
|
||||||
|
|
||||||
|
var bt *bytes.Buffer = nil
|
||||||
|
var t *tar.Writer = nil
|
||||||
|
|
||||||
|
if enableGZip {
|
||||||
|
bz = new(bytes.Buffer)
|
||||||
|
z = zip.NewWriter(bz)
|
||||||
|
}
|
||||||
|
|
||||||
|
if enableTarGZ {
|
||||||
|
bt = new(bytes.Buffer)
|
||||||
|
t = tar.NewWriter(bt)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &MemoryZip{
|
||||||
|
|
||||||
|
zipEnabled: enableGZip,
|
||||||
|
zipbuffer: bz,
|
||||||
|
zipwriter: z,
|
||||||
|
|
||||||
|
tarEnabled: enableTarGZ,
|
||||||
|
tarbuffer: bz,
|
||||||
|
tarwriter: t,
|
||||||
|
|
||||||
|
open: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (z *MemoryZip) AddFile(path string, data []byte) error {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if !z.open {
|
||||||
|
return errAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if z.zipEnabled {
|
||||||
|
zipheader, err := z.zipwriter.CreateHeader(&zip.FileHeader{
|
||||||
|
Name: path,
|
||||||
|
Method: zip.Deflate,
|
||||||
|
Modified: time.Now(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = zipheader.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if z.tarEnabled {
|
||||||
|
tarheader := &tar.Header{
|
||||||
|
Name: path,
|
||||||
|
ModTime: time.Now(),
|
||||||
|
Typeflag: tar.TypeReg,
|
||||||
|
Size: int64(len(data)),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = z.tarwriter.WriteHeader(tarheader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = z.tarwriter.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (z *MemoryZip) GetZip() ([]byte, error) {
|
||||||
|
if !z.zipEnabled {
|
||||||
|
return nil, errZipNotEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
if z.open {
|
||||||
|
err := z.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return z.zipbuffer.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (z *MemoryZip) GetTarGz() ([]byte, error) {
|
||||||
|
if !z.tarEnabled {
|
||||||
|
return nil, errTgzNotEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
if z.open {
|
||||||
|
err := z.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b := new(bytes.Buffer)
|
||||||
|
|
||||||
|
gf, err := gzip.NewWriterLevel(b, flate.BestCompression)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fw := bufio.NewWriter(gf)
|
||||||
|
_, err = fw.Write(z.tarbuffer.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = fw.Flush()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = gf.Close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (z *MemoryZip) Close() error {
|
||||||
|
if !z.open {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
z.open = false
|
||||||
|
|
||||||
|
if z.zipEnabled {
|
||||||
|
err := z.zipwriter.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if z.tarEnabled {
|
||||||
|
err := z.tarwriter.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
6
mongo/go.mod
Normal file
6
mongo/go.mod
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
module blackforestbytes.com/goext/mongo
|
||||||
|
|
||||||
|
require (
|
||||||
|
go.mongodb.org/mongo-driver v1.5.3
|
||||||
|
)
|
||||||
|
go 1.19
|
0
mongo/mongoext/TODO
Normal file
0
mongo/mongoext/TODO
Normal file
Loading…
Reference in New Issue
Block a user