mirror of
https://github.com/skidoodle/ipinfo.git
synced 2026-04-28 01:27:34 +02:00
add domain whois/dns support, refactor codebase
This commit is contained in:
@@ -8,3 +8,4 @@
|
|||||||
.env
|
.env
|
||||||
*.mmdb
|
*.mmdb
|
||||||
.geoipupdate.lock
|
.geoipupdate.lock
|
||||||
|
.idea/
|
||||||
@@ -1,16 +1,20 @@
|
|||||||
module skidoodle/ipinfo
|
module ipinfo
|
||||||
|
|
||||||
go 1.24.0
|
go 1.25.1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/joho/godotenv v1.5.1
|
github.com/joho/godotenv v1.5.1
|
||||||
|
github.com/likexian/whois v1.15.6
|
||||||
|
github.com/likexian/whois-parser v1.24.20
|
||||||
github.com/oschwald/maxminddb-golang v1.13.1
|
github.com/oschwald/maxminddb-golang v1.13.1
|
||||||
github.com/pkg/errors v0.9.1
|
golang.org/x/net v0.44.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
|
github.com/likexian/gokit v0.25.15 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/stretchr/testify v1.10.0 // indirect
|
github.com/stretchr/testify v1.10.0 // indirect
|
||||||
golang.org/x/sys v0.35.0 // indirect
|
golang.org/x/sys v0.36.0 // indirect
|
||||||
|
golang.org/x/text v0.29.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -2,17 +2,23 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||||
|
github.com/likexian/gokit v0.25.15 h1:QjospM1eXhdMMHwZRpMKKAHY/Wig9wgcREmLtf9NslY=
|
||||||
|
github.com/likexian/gokit v0.25.15/go.mod h1:S2QisdsxLEHWeD/XI0QMVeggp+jbxYqUxMvSBil7MRg=
|
||||||
|
github.com/likexian/whois v1.15.6 h1:hizngFHJTNQDlhwhU+FEGyPGxy8bRnf25gHDNrSB4Ag=
|
||||||
|
github.com/likexian/whois v1.15.6/go.mod h1:vx3kt3sZ4mx4XFgpaNp3GXQCZQIzAoyrUAkRtJwoM2I=
|
||||||
|
github.com/likexian/whois-parser v1.24.20 h1:oxEkRi0GxgqWQRLDMJpXU1EhgWmLmkqEFZ2ChXTeQLE=
|
||||||
|
github.com/likexian/whois-parser v1.24.20/go.mod h1:rAtaofg2luol09H+ogDzGIfcG8ig1NtM5R16uQADDz4=
|
||||||
github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE=
|
github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE=
|
||||||
github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8=
|
github.com/oschwald/maxminddb-golang v1.13.1/go.mod h1:K4pgV9N/GcK694KSTmVSDTODk4IsCNThNdTmnaBZ/F8=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
|
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
||||||
|
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
@@ -10,12 +10,18 @@ import (
|
|||||||
func main() {
|
func main() {
|
||||||
resp, err := http.Get("http://localhost:3000/health")
|
resp, err := http.Get("http://localhost:3000/health")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Error performing health check: %v", err)
|
slog.Error("error performing healthcheck", "err", err)
|
||||||
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
|
||||||
|
defer func() {
|
||||||
|
if cerr := resp.Body.Close(); err != nil {
|
||||||
|
slog.Warn("failed to close response body", "err", cerr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
log.Printf("Health check failed: Status code %d", resp.StatusCode)
|
slog.Error("healthcheck failed", "status", resp.StatusCode)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,48 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// cachedItem represents a generic item in the cache.
|
||||||
|
type cachedItem struct {
|
||||||
|
data any
|
||||||
|
time time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache provides a thread-safe, generic caching mechanism with a TTL.
|
||||||
|
type Cache struct {
|
||||||
|
store sync.Map
|
||||||
|
ttl time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCache creates a new generic cache with the specified TTL.
|
||||||
|
func NewCache(ttl time.Duration) *Cache {
|
||||||
|
return &Cache{
|
||||||
|
ttl: ttl,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set adds a new entry to the cache.
|
||||||
|
func (c *Cache) Set(key any, data any) {
|
||||||
|
c.store.Store(key, cachedItem{
|
||||||
|
data: data,
|
||||||
|
time: time.Now(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves an entry from the cache.
|
||||||
|
func (c *Cache) Get(key any) (any, bool) {
|
||||||
|
if item, ok := c.store.Load(key); ok {
|
||||||
|
cached := item.(cachedItem)
|
||||||
|
if time.Since(cached.time) < c.ttl {
|
||||||
|
return cached.data, true
|
||||||
|
}
|
||||||
|
c.store.Delete(key)
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Global cache with a 10-minute TTL.
|
||||||
|
var cache = NewCache(10 * time.Minute)
|
||||||
@@ -1,271 +0,0 @@
|
|||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
db "skidoodle/ipinfo/internal/db"
|
|
||||||
iputils "skidoodle/ipinfo/utils/iputils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DataStruct represents the structure of the IP data returned by the API.
|
|
||||||
type DataStruct struct {
|
|
||||||
IP *string `json:"ip"`
|
|
||||||
Hostname *string `json:"hostname"`
|
|
||||||
Org *string `json:"org"`
|
|
||||||
City *string `json:"city"`
|
|
||||||
Region *string `json:"region"`
|
|
||||||
Country *string `json:"country"`
|
|
||||||
Timezone *string `json:"timezone"`
|
|
||||||
Loc *string `json:"loc"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ASNDataResponse represents the structure of the ASN data returned by the API.
|
|
||||||
type ASNDataResponse struct {
|
|
||||||
Details Details `json:"details"`
|
|
||||||
Prefixes PrefixInfo `json:"prefixes"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Details represents the structure of the ASN details returned by the API.
|
|
||||||
type Details struct {
|
|
||||||
ASN uint `json:"asn"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrefixInfo represents the structure of the ASN prefix information returned by the API.
|
|
||||||
type PrefixInfo struct {
|
|
||||||
IPv4 []string `json:"ipv4"`
|
|
||||||
IPv6 []string `json:"ipv6"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Global caches with 10 minute TTL
|
|
||||||
var ipCache = NewIPCache(10 * time.Minute)
|
|
||||||
var asnCache = NewASNCache(10 * time.Minute)
|
|
||||||
|
|
||||||
// cachedIPData represents a cached IP lookup result.
|
|
||||||
type cachedIPData struct {
|
|
||||||
data *DataStruct
|
|
||||||
time time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// cachedASNData represents a cached ASN lookup result.
|
|
||||||
type cachedASNData struct {
|
|
||||||
data *ASNDataResponse
|
|
||||||
time time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPCache provides thread-safe caching of IP lookup results
|
|
||||||
type IPCache struct {
|
|
||||||
cache sync.Map
|
|
||||||
ttl time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPCache creates a new IP cache with the specified TTL
|
|
||||||
func NewIPCache(ttl time.Duration) *IPCache {
|
|
||||||
return &IPCache{
|
|
||||||
ttl: ttl,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set adds a new entry to the IP cache
|
|
||||||
func (c *IPCache) Set(ipStr string, data *DataStruct) {
|
|
||||||
c.cache.Store(ipStr, cachedIPData{
|
|
||||||
data: data,
|
|
||||||
time: time.Now(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get retrieves an entry from the IP cache
|
|
||||||
func (c *IPCache) Get(ipStr string) (*DataStruct, bool) {
|
|
||||||
if cachedData, ok := c.cache.Load(ipStr); ok {
|
|
||||||
cached := cachedData.(cachedIPData)
|
|
||||||
if time.Since(cached.time) < c.ttl {
|
|
||||||
return cached.data, true
|
|
||||||
}
|
|
||||||
c.cache.Delete(ipStr)
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ASNCache provides thread-safe caching of ASN lookup results
|
|
||||||
type ASNCache struct {
|
|
||||||
cache sync.Map
|
|
||||||
ttl time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewASNCache creates a new ASN cache with the specified TTL
|
|
||||||
func NewASNCache(ttl time.Duration) *ASNCache {
|
|
||||||
return &ASNCache{
|
|
||||||
ttl: ttl,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set adds a new entry to the ASN cache
|
|
||||||
func (c *ASNCache) Set(asn uint, data *ASNDataResponse) {
|
|
||||||
c.cache.Store(asn, cachedASNData{
|
|
||||||
data: data,
|
|
||||||
time: time.Now(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get retrieves an entry from the ASN cache
|
|
||||||
func (c *ASNCache) Get(asn uint) (*ASNDataResponse, bool) {
|
|
||||||
if cachedData, ok := c.cache.Load(asn); ok {
|
|
||||||
cached := cachedData.(cachedASNData)
|
|
||||||
if time.Since(cached.time) < c.ttl {
|
|
||||||
return cached.data, true
|
|
||||||
}
|
|
||||||
c.cache.Delete(asn)
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupIPData looks up IP data in the databases with caching
|
|
||||||
func LookupIPData(geoIP *db.GeoIPManager, ip net.IP) *DataStruct {
|
|
||||||
if data, found := ipCache.Get(ip.String()); found {
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
var cityRecord struct {
|
|
||||||
City struct {
|
|
||||||
Names map[string]string `maxminddb:"names"`
|
|
||||||
} `maxminddb:"city"`
|
|
||||||
Subdivisions []struct {
|
|
||||||
Names map[string]string `maxminddb:"names"`
|
|
||||||
} `maxminddb:"subdivisions"`
|
|
||||||
Country struct {
|
|
||||||
IsoCode string `maxminddb:"iso_code"`
|
|
||||||
Names map[string]string `maxminddb:"names"`
|
|
||||||
} `maxminddb:"country"`
|
|
||||||
Location struct {
|
|
||||||
Latitude float64 `maxminddb:"latitude"`
|
|
||||||
Longitude float64 `maxminddb:"longitude"`
|
|
||||||
Timezone string `maxminddb:"time_zone"`
|
|
||||||
} `maxminddb:"location"`
|
|
||||||
}
|
|
||||||
|
|
||||||
cityDB := geoIP.GetCityDB()
|
|
||||||
err := cityDB.Lookup(ip, &cityRecord)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Error looking up city data: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var asnRecord db.ASNRecord
|
|
||||||
asnDB := geoIP.GetASNDB()
|
|
||||||
err = asnDB.Lookup(ip, &asnRecord)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Error looking up ASN data: %v", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
hostname, err := net.LookupAddr(ip.String())
|
|
||||||
if err != nil || len(hostname) == 0 {
|
|
||||||
hostname = []string{""}
|
|
||||||
}
|
|
||||||
|
|
||||||
var sd *string
|
|
||||||
if len(cityRecord.Subdivisions) > 0 {
|
|
||||||
sd = ToPtr(cityRecord.Subdivisions[0].Names["en"])
|
|
||||||
}
|
|
||||||
|
|
||||||
data := &DataStruct{
|
|
||||||
IP: ToPtr(ip.String()),
|
|
||||||
Hostname: ToPtr(strings.TrimSuffix(hostname[0], ".")),
|
|
||||||
Org: ToPtr(fmt.Sprintf("AS%d %s", asnRecord.AutonomousSystemNumber, asnRecord.AutonomousSystemOrganization)),
|
|
||||||
City: ToPtr(cityRecord.City.Names["en"]),
|
|
||||||
Region: sd,
|
|
||||||
Country: ToPtr(cityRecord.Country.IsoCode),
|
|
||||||
Timezone: ToPtr(cityRecord.Location.Timezone),
|
|
||||||
Loc: ToPtr(fmt.Sprintf("%.4f,%.4f", cityRecord.Location.Latitude, cityRecord.Location.Longitude)),
|
|
||||||
}
|
|
||||||
|
|
||||||
ipCache.Set(ip.String(), data)
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupASNData looks up ASN data in the databases with caching
|
|
||||||
func LookupASNData(geoIP *db.GeoIPManager, targetASN uint) (*ASNDataResponse, error) {
|
|
||||||
if data, found := asnCache.Get(targetASN); found {
|
|
||||||
return data, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
prefixes := geoIP.GetASNPrefixes(targetASN)
|
|
||||||
if len(prefixes) == 0 {
|
|
||||||
return nil, fmt.Errorf("no prefixes found for ASN %d in the database", targetASN)
|
|
||||||
}
|
|
||||||
|
|
||||||
var orgName string
|
|
||||||
var ipv4Prefixes, ipv6Prefixes []string
|
|
||||||
|
|
||||||
var record db.ASNRecord
|
|
||||||
if err := geoIP.GetASNDB().Lookup(prefixes[0].IP, &record); err == nil {
|
|
||||||
orgName = record.AutonomousSystemOrganization
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, prefix := range prefixes {
|
|
||||||
prefixStr := prefix.String()
|
|
||||||
if strings.Contains(prefixStr, ":") {
|
|
||||||
ipv6Prefixes = append(ipv6Prefixes, prefixStr)
|
|
||||||
} else {
|
|
||||||
ipv4Prefixes = append(ipv4Prefixes, prefixStr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(ipv4Prefixes)
|
|
||||||
sort.Strings(ipv6Prefixes)
|
|
||||||
|
|
||||||
response := &ASNDataResponse{
|
|
||||||
Details: Details{
|
|
||||||
ASN: targetASN,
|
|
||||||
Name: orgName,
|
|
||||||
},
|
|
||||||
Prefixes: PrefixInfo{
|
|
||||||
IPv4: ipv4Prefixes,
|
|
||||||
IPv6: ipv6Prefixes,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
asnCache.Set(targetASN, response)
|
|
||||||
|
|
||||||
return response, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToPtr converts string to pointer
|
|
||||||
func ToPtr(s string) *string {
|
|
||||||
if s == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &s
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsBogon checks if the IP is a bogon IP
|
|
||||||
func IsBogon(ip net.IP) bool {
|
|
||||||
for _, net := range iputils.BogonNets {
|
|
||||||
if net.Contains(ip) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRealIP extracts the client's real IP address from request headers
|
|
||||||
func GetRealIP(r *http.Request) string {
|
|
||||||
for _, header := range []string{"CF-Connecting-IP", "X-Real-IP", "X-Forwarded-For"} {
|
|
||||||
if ip := r.Header.Get(header); ip != "" {
|
|
||||||
return strings.TrimSpace(strings.Split(ip, ",")[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
|
||||||
if err != nil {
|
|
||||||
return r.RemoteAddr
|
|
||||||
}
|
|
||||||
return host
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,221 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"net"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"ipinfo/internal/db"
|
||||||
|
|
||||||
|
"github.com/likexian/whois-parser"
|
||||||
|
"golang.org/x/net/publicsuffix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LookupIPData looks up IP data in the databases with caching.
|
||||||
|
func LookupIPData(geoIP *db.GeoIPManager, ip net.IP) *DataStruct {
|
||||||
|
ipStr := ip.String()
|
||||||
|
if data, found := cache.Get(ipStr); found {
|
||||||
|
return data.(*DataStruct)
|
||||||
|
}
|
||||||
|
|
||||||
|
var cityRecord struct {
|
||||||
|
City struct {
|
||||||
|
Names map[string]string `maxminddb:"names"`
|
||||||
|
} `maxminddb:"city"`
|
||||||
|
Subdivisions []struct {
|
||||||
|
Names map[string]string `maxminddb:"names"`
|
||||||
|
} `maxminddb:"subdivisions"`
|
||||||
|
Country struct {
|
||||||
|
IsoCode string `maxminddb:"iso_code"`
|
||||||
|
} `maxminddb:"country"`
|
||||||
|
Location struct {
|
||||||
|
Latitude float64 `maxminddb:"latitude"`
|
||||||
|
Longitude float64 `maxminddb:"longitude"`
|
||||||
|
Timezone string `maxminddb:"time_zone"`
|
||||||
|
} `maxminddb:"location"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := geoIP.GetCityDB().Lookup(ip, &cityRecord); err != nil {
|
||||||
|
slog.Error("failed to look up city data", "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var asnRecord db.ASNRecord
|
||||||
|
if err := geoIP.GetASNDB().Lookup(ip, &asnRecord); err != nil {
|
||||||
|
slog.Error("failed to look up asn data", "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
hostname, _ := net.LookupAddr(ipStr)
|
||||||
|
hostnameStr := ""
|
||||||
|
if len(hostname) > 0 {
|
||||||
|
hostnameStr = strings.TrimSuffix(hostname[0], ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
var region *string
|
||||||
|
if len(cityRecord.Subdivisions) > 0 {
|
||||||
|
region = ToPtr(cityRecord.Subdivisions[0].Names["en"])
|
||||||
|
}
|
||||||
|
|
||||||
|
data := &DataStruct{
|
||||||
|
IP: ToPtr(ipStr),
|
||||||
|
Hostname: ToPtr(hostnameStr),
|
||||||
|
Org: ToPtr(fmt.Sprintf("AS%d %s", asnRecord.AutonomousSystemNumber, asnRecord.AutonomousSystemOrganization)),
|
||||||
|
City: ToPtr(cityRecord.City.Names["en"]),
|
||||||
|
Region: region,
|
||||||
|
Country: ToPtr(cityRecord.Country.IsoCode),
|
||||||
|
Timezone: ToPtr(cityRecord.Location.Timezone),
|
||||||
|
Loc: ToPtr(fmt.Sprintf("%.4f,%.4f", cityRecord.Location.Latitude, cityRecord.Location.Longitude)),
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.Set(ipStr, data)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupASNData looks up ASN data in the databases with caching.
|
||||||
|
func LookupASNData(geoIP *db.GeoIPManager, targetASN uint) (*ASNDataResponse, error) {
|
||||||
|
if data, found := cache.Get(targetASN); found {
|
||||||
|
return data.(*ASNDataResponse), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixes := geoIP.GetASNPrefixes(targetASN)
|
||||||
|
if len(prefixes) == 0 {
|
||||||
|
return nil, fmt.Errorf("no prefixes found for as%d in the database", targetASN)
|
||||||
|
}
|
||||||
|
|
||||||
|
var orgName string
|
||||||
|
var record db.ASNRecord
|
||||||
|
if err := geoIP.GetASNDB().Lookup(prefixes[0].IP, &record); err == nil {
|
||||||
|
orgName = record.AutonomousSystemOrganization
|
||||||
|
}
|
||||||
|
|
||||||
|
var ipv4Prefixes, ipv6Prefixes []string
|
||||||
|
for _, prefix := range prefixes {
|
||||||
|
prefixStr := prefix.String()
|
||||||
|
if strings.Contains(prefixStr, ":") {
|
||||||
|
ipv6Prefixes = append(ipv6Prefixes, prefixStr)
|
||||||
|
} else {
|
||||||
|
ipv4Prefixes = append(ipv4Prefixes, prefixStr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(ipv4Prefixes)
|
||||||
|
sort.Strings(ipv6Prefixes)
|
||||||
|
|
||||||
|
response := &ASNDataResponse{
|
||||||
|
Details: ASNDetails{
|
||||||
|
ASN: targetASN,
|
||||||
|
Name: orgName,
|
||||||
|
},
|
||||||
|
Prefixes: ASNPrefixInfo{
|
||||||
|
IPv4: ipv4Prefixes,
|
||||||
|
IPv6: ipv6Prefixes,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.Set(targetASN, response)
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupDomainData looks up domain data with caching.
|
||||||
|
func LookupDomainData(domain string) (*DomainDataResponse, error) {
|
||||||
|
if data, found := cache.Get(domain); found {
|
||||||
|
return data.(*DomainDataResponse), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
eTLD, err := publicsuffix.EffectiveTLDPlusOne(domain)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid domain: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
whoisRaw, err := performWhoisWithFallback(eTLD)
|
||||||
|
var whoisResult interface{}
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("whois lookup failed after fallback", "domain", eTLD, "err", err)
|
||||||
|
whoisResult = fmt.Sprintf("whois lookup failed: %v", err)
|
||||||
|
} else {
|
||||||
|
parsed, parseErr := whoisparser.Parse(whoisRaw)
|
||||||
|
if parseErr != nil {
|
||||||
|
slog.Warn("failed to parse whois data, returning raw text", "domain", eTLD, "err", parseErr)
|
||||||
|
whoisResult = whoisRaw
|
||||||
|
} else {
|
||||||
|
whoisResult = formatWhois(parsed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dnsData := DNSData{}
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var mu sync.Mutex
|
||||||
|
|
||||||
|
lookupTasks := []func(){
|
||||||
|
func() { // A and AAAA records
|
||||||
|
ips, err := net.LookupIP(domain)
|
||||||
|
if err == nil {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
for _, ip := range ips {
|
||||||
|
if ip.To4() != nil {
|
||||||
|
dnsData.A = append(dnsData.A, ip.String())
|
||||||
|
} else {
|
||||||
|
dnsData.AAAA = append(dnsData.AAAA, ip.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func() { // CNAME record
|
||||||
|
cname, err := net.LookupCNAME(domain)
|
||||||
|
if err == nil && cname != domain+"." && cname != "" {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
dnsData.CNAME = strings.TrimSuffix(cname, ".")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func() { // MX records
|
||||||
|
mxs, err := net.LookupMX(domain)
|
||||||
|
if err == nil {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
for _, mx := range mxs {
|
||||||
|
dnsData.MX = append(dnsData.MX, fmt.Sprintf("%d %s", mx.Pref, strings.TrimSuffix(mx.Host, ".")))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func() { // TXT records
|
||||||
|
txts, err := net.LookupTXT(domain)
|
||||||
|
if err == nil {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
dnsData.TXT = append(dnsData.TXT, txts...)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
func() { // NS records
|
||||||
|
nss, err := net.LookupNS(eTLD)
|
||||||
|
if err == nil {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
for _, ns := range nss {
|
||||||
|
dnsData.NS = append(dnsData.NS, strings.TrimSuffix(ns.Host, "."))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(len(lookupTasks))
|
||||||
|
for _, task := range lookupTasks {
|
||||||
|
go func(t func()) {
|
||||||
|
defer wg.Done()
|
||||||
|
t()
|
||||||
|
}(task)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
response := &DomainDataResponse{
|
||||||
|
Whois: whoisResult,
|
||||||
|
DNS: dnsData,
|
||||||
|
}
|
||||||
|
|
||||||
|
cache.Set(domain, response)
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,93 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
// DataStruct represents the structure of the IP data returned by the API.
|
||||||
|
type DataStruct struct {
|
||||||
|
IP *string `json:"ip"`
|
||||||
|
Hostname *string `json:"hostname"`
|
||||||
|
Org *string `json:"org"`
|
||||||
|
City *string `json:"city"`
|
||||||
|
Region *string `json:"region"`
|
||||||
|
Country *string `json:"country"`
|
||||||
|
Timezone *string `json:"timezone"`
|
||||||
|
Loc *string `json:"loc"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ASNDataResponse represents the structure of the ASN data returned by the API.
|
||||||
|
type ASNDataResponse struct {
|
||||||
|
Details ASNDetails `json:"details"`
|
||||||
|
Prefixes ASNPrefixInfo `json:"prefixes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ASNDetails represents the structure of the ASN details returned by the API.
|
||||||
|
type ASNDetails struct {
|
||||||
|
ASN uint `json:"asn"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ASNPrefixInfo represents the structure of the ASN prefix information returned by the API.
|
||||||
|
type ASNPrefixInfo struct {
|
||||||
|
IPv4 []string `json:"ipv4"`
|
||||||
|
IPv6 []string `json:"ipv6"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DomainDataResponse represents the structure of the domain data returned by the API.
|
||||||
|
type DomainDataResponse struct {
|
||||||
|
Whois interface{} `json:"whois"`
|
||||||
|
DNS DNSData `json:"dns"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DNSData represents the structure of the DNS records.
|
||||||
|
type DNSData struct {
|
||||||
|
A []string `json:"A,omitempty"`
|
||||||
|
AAAA []string `json:"AAAA,omitempty"`
|
||||||
|
CNAME string `json:"CNAME,omitempty"`
|
||||||
|
MX []string `json:"MX,omitempty"`
|
||||||
|
TXT []string `json:"TXT,omitempty"`
|
||||||
|
NS []string `json:"NS,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WhoisInfo is a sanitized version of the parsed whois data for the API response.
|
||||||
|
type WhoisInfo struct {
|
||||||
|
Domain *WhoisDomain `json:"domain,omitempty"`
|
||||||
|
Registrar *WhoisRegistrar `json:"registrar,omitempty"`
|
||||||
|
Registrant *WhoisContact `json:"registrant,omitempty"`
|
||||||
|
Admin *WhoisContact `json:"admin,omitempty"`
|
||||||
|
Tech *WhoisContact `json:"tech,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WhoisDomain omits unnecessary fields from the original parsed domain struct.
|
||||||
|
type WhoisDomain struct {
|
||||||
|
ID string `json:"id,omitempty"`
|
||||||
|
Domain string `json:"domain,omitempty"`
|
||||||
|
WhoisServer string `json:"whois_server,omitempty"`
|
||||||
|
Status []string `json:"status,omitempty"`
|
||||||
|
NameServers []string `json:"name_servers,omitempty"`
|
||||||
|
DNSSEC bool `json:"dnssec"`
|
||||||
|
CreatedDate string `json:"created_date,omitempty"`
|
||||||
|
UpdatedDate string `json:"updated_date,omitempty"`
|
||||||
|
ExpirationDate string `json:"expiration_date,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WhoisRegistrar contains registrar information.
|
||||||
|
type WhoisRegistrar struct {
|
||||||
|
ID string `json:"id,omitempty"`
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
Email string `json:"email,omitempty"`
|
||||||
|
Phone string `json:"phone,omitempty"`
|
||||||
|
ReferralURL string `json:"referral_url,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WhoisContact contains contact information for registrant, admin, or tech.
|
||||||
|
type WhoisContact struct {
|
||||||
|
ID string `json:"id,omitempty"`
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
Organization string `json:"organization,omitempty"`
|
||||||
|
Street string `json:"street,omitempty"`
|
||||||
|
City string `json:"city,omitempty"`
|
||||||
|
Province string `json:"province,omitempty"`
|
||||||
|
PostalCode string `json:"postal_code,omitempty"`
|
||||||
|
Country string `json:"country,omitempty"`
|
||||||
|
Phone string `json:"phone,omitempty"`
|
||||||
|
Fax string `json:"fax,omitempty"`
|
||||||
|
Email string `json:"email,omitempty"`
|
||||||
|
}
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"ipinfo/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ToPtr converts a string to a pointer, returning nil for empty strings.
|
||||||
|
func ToPtr(s string) *string {
|
||||||
|
if s == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &s
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsBogon checks if the IP is a bogon IP.
|
||||||
|
func IsBogon(ip net.IP) bool {
|
||||||
|
for _, network := range utils.BogonNets {
|
||||||
|
if network.Contains(ip) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
@@ -0,0 +1,187 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/likexian/whois"
|
||||||
|
"github.com/likexian/whois-parser"
|
||||||
|
)
|
||||||
|
|
||||||
|
// performWhoisWithFallback attempts a WHOIS query and falls back to IPv4 if it suspects an IPv6 issue.
|
||||||
|
func performWhoisWithFallback(domain string) (string, error) {
|
||||||
|
result, err := whois.Whois(domain)
|
||||||
|
if err == nil {
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.Contains(err.Error(), "dial tcp [") && strings.Contains(err.Error(), "]:43") {
|
||||||
|
slog.Warn("whois failed with potential ipv6 issue, falling back to ipv4", "domain", domain, "err", err)
|
||||||
|
|
||||||
|
serverHost, serverErr := getWhoisServerForDomain(domain)
|
||||||
|
if serverErr != nil {
|
||||||
|
slog.Error("could not find whois server during fallback", "domain", domain, "err", serverErr)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
ips, resolveErr := net.LookupIP(serverHost)
|
||||||
|
if resolveErr != nil {
|
||||||
|
slog.Error("could not resolve whois server hostname during fallback", "server", serverHost, "err", resolveErr)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ip := range ips {
|
||||||
|
if ip.To4() != nil {
|
||||||
|
ipv4Server := ip.String()
|
||||||
|
slog.Info("retrying whois query with explicit ipv4 address", "domain", domain, "server", ipv4Server)
|
||||||
|
return queryWhoisServer(domain, ipv4Server)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
slog.Warn("no ipv4 address found for whois server during fallback", "server", serverHost)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWhoisServerForDomain finds the authoritative WHOIS server for a domain by querying IANA.
|
||||||
|
func getWhoisServerForDomain(domain string) (string, error) {
|
||||||
|
parts := strings.Split(domain, ".")
|
||||||
|
if len(parts) < 2 {
|
||||||
|
return "", fmt.Errorf("invalid domain: %s", domain)
|
||||||
|
}
|
||||||
|
tld := parts[len(parts)-1]
|
||||||
|
|
||||||
|
conn, err := net.Dial("tcp", "whois.iana.org:43")
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("could not connect to iana whois server: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := conn.Close(); err != nil {
|
||||||
|
slog.Warn("error closing connection to iana whois server", "err", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = conn.Write([]byte(tld + "\r\n"))
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("could not send query to iana whois server: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(conn)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
if strings.HasPrefix(strings.ToLower(line), "whois:") {
|
||||||
|
serverParts := strings.Fields(line)
|
||||||
|
if len(serverParts) > 1 {
|
||||||
|
return serverParts[1], nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return "", fmt.Errorf("error reading from iana whois server: %w", err)
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("could not find whois server for TLD: %s", tld)
|
||||||
|
}
|
||||||
|
|
||||||
|
// queryWhoisServer manually performs a WHOIS query to a specific server IP.
|
||||||
|
func queryWhoisServer(domain, serverIP string) (string, error) {
|
||||||
|
conn, err := net.DialTimeout("tcp", net.JoinHostPort(serverIP, "43"), 10*time.Second)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("could not connect to %s: %w", serverIP, err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := conn.Close(); err != nil {
|
||||||
|
slog.Warn("error closing connection to whois server", "server", serverIP, "err", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
_ = conn.SetDeadline(time.Now().Add(10 * time.Second))
|
||||||
|
_, err = conn.Write([]byte(domain + "\r\n"))
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("could not send query to %s: %w", serverIP, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := io.ReadAll(conn)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("could not read response from %s: %w", serverIP, err)
|
||||||
|
}
|
||||||
|
return string(body), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatWhois converts a parsed whois object to the simplified WhoisInfo struct.
|
||||||
|
func formatWhois(parsed whoisparser.WhoisInfo) WhoisInfo {
|
||||||
|
info := WhoisInfo{}
|
||||||
|
if parsed.Domain != nil {
|
||||||
|
info.Domain = &WhoisDomain{
|
||||||
|
ID: parsed.Domain.ID,
|
||||||
|
Domain: parsed.Domain.Domain,
|
||||||
|
WhoisServer: parsed.Domain.WhoisServer,
|
||||||
|
Status: parsed.Domain.Status,
|
||||||
|
NameServers: parsed.Domain.NameServers,
|
||||||
|
DNSSEC: parsed.Domain.DNSSec,
|
||||||
|
CreatedDate: parsed.Domain.CreatedDate,
|
||||||
|
UpdatedDate: parsed.Domain.UpdatedDate,
|
||||||
|
ExpirationDate: parsed.Domain.ExpirationDate,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if parsed.Registrar != nil {
|
||||||
|
info.Registrar = &WhoisRegistrar{
|
||||||
|
ID: parsed.Registrar.ID,
|
||||||
|
Name: parsed.Registrar.Name,
|
||||||
|
Email: parsed.Registrar.Email,
|
||||||
|
Phone: parsed.Registrar.Phone,
|
||||||
|
ReferralURL: parsed.Registrar.ReferralURL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if parsed.Registrant != nil {
|
||||||
|
info.Registrant = &WhoisContact{
|
||||||
|
ID: parsed.Registrant.ID,
|
||||||
|
Name: parsed.Registrant.Name,
|
||||||
|
Organization: parsed.Registrant.Organization,
|
||||||
|
Street: parsed.Registrant.Street,
|
||||||
|
City: parsed.Registrant.City,
|
||||||
|
Province: parsed.Registrant.Province,
|
||||||
|
PostalCode: parsed.Registrant.PostalCode,
|
||||||
|
Country: parsed.Registrant.Country,
|
||||||
|
Phone: parsed.Registrant.Phone,
|
||||||
|
Fax: parsed.Registrant.Fax,
|
||||||
|
Email: parsed.Registrant.Email,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if parsed.Administrative != nil {
|
||||||
|
info.Admin = &WhoisContact{
|
||||||
|
ID: parsed.Administrative.ID,
|
||||||
|
Name: parsed.Administrative.Name,
|
||||||
|
Organization: parsed.Administrative.Organization,
|
||||||
|
Street: parsed.Administrative.Street,
|
||||||
|
City: parsed.Administrative.City,
|
||||||
|
Province: parsed.Administrative.Province,
|
||||||
|
PostalCode: parsed.Administrative.PostalCode,
|
||||||
|
Country: parsed.Administrative.Country,
|
||||||
|
Phone: parsed.Administrative.Phone,
|
||||||
|
Fax: parsed.Administrative.Fax,
|
||||||
|
Email: parsed.Administrative.Email,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if parsed.Technical != nil {
|
||||||
|
info.Tech = &WhoisContact{
|
||||||
|
ID: parsed.Technical.ID,
|
||||||
|
Name: parsed.Technical.Name,
|
||||||
|
Organization: parsed.Technical.Organization,
|
||||||
|
Street: parsed.Technical.Street,
|
||||||
|
City: parsed.Technical.City,
|
||||||
|
Province: parsed.Technical.Province,
|
||||||
|
PostalCode: parsed.Technical.PostalCode,
|
||||||
|
Country: parsed.Technical.Country,
|
||||||
|
Phone: parsed.Technical.Phone,
|
||||||
|
Fax: parsed.Technical.Fax,
|
||||||
|
Email: parsed.Technical.Email,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return info
|
||||||
|
}
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
package db
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/oschwald/maxminddb-golang"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetCityDB retrieves the city database reader.
|
||||||
|
func (g *GeoIPManager) GetCityDB() *maxminddb.Reader {
|
||||||
|
g.mu.RLock()
|
||||||
|
defer g.mu.RUnlock()
|
||||||
|
return g.cityDB
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetASNDB retrieves the ASN database reader.
|
||||||
|
func (g *GeoIPManager) GetASNDB() *maxminddb.Reader {
|
||||||
|
g.mu.RLock()
|
||||||
|
defer g.mu.RUnlock()
|
||||||
|
return g.asnDB
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetASNPrefixes retrieves the list of IP prefixes for a given ASN.
|
||||||
|
func (g *GeoIPManager) GetASNPrefixes(asn uint) []*net.IPNet {
|
||||||
|
g.mu.RLock()
|
||||||
|
defer g.mu.RUnlock()
|
||||||
|
return g.asnPrefixMap[asn]
|
||||||
|
}
|
||||||
@@ -1,320 +0,0 @@
|
|||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
|
||||||
"crypto/md5"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"skidoodle/ipinfo/internal/logger"
|
|
||||||
|
|
||||||
"github.com/oschwald/maxminddb-golang"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Constants for database names and paths
|
|
||||||
const (
|
|
||||||
CityDBName = "GeoLite2-City"
|
|
||||||
ASNDBName = "GeoLite2-ASN"
|
|
||||||
DBExtension = ".mmdb"
|
|
||||||
CityDBPath = CityDBName + DBExtension
|
|
||||||
ASNDBPath = ASNDBName + DBExtension
|
|
||||||
)
|
|
||||||
|
|
||||||
// Error messages
|
|
||||||
var (
|
|
||||||
ErrDatabaseNotFound = errors.New("database file not found")
|
|
||||||
ErrDatabaseOpen = errors.New("failed to open database")
|
|
||||||
ErrDownloadFailed = errors.New("failed to download database")
|
|
||||||
)
|
|
||||||
|
|
||||||
// ASNRecord represents a record in the ASN database
|
|
||||||
type ASNRecord struct {
|
|
||||||
AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"`
|
|
||||||
AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GeoIPManager manages the GeoIP databases
|
|
||||||
type GeoIPManager struct {
|
|
||||||
cityDB *maxminddb.Reader
|
|
||||||
asnDB *maxminddb.Reader
|
|
||||||
asnPrefixMap map[uint][]*net.IPNet
|
|
||||||
httpClient *http.Client
|
|
||||||
mu sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGeoIPManager creates a new GeoIPManager
|
|
||||||
func NewGeoIPManager() (*GeoIPManager, error) {
|
|
||||||
manager := &GeoIPManager{
|
|
||||||
httpClient: &http.Client{Timeout: 2 * time.Minute},
|
|
||||||
}
|
|
||||||
if err := manager.Initialize(); err != nil {
|
|
||||||
return nil, fmt.Errorf("initializing GeoIP manager: %w", err)
|
|
||||||
}
|
|
||||||
return manager, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize initializes the GeoIPManager by opening the database files
|
|
||||||
func (g *GeoIPManager) Initialize() error {
|
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
|
|
||||||
if err := g.openDB(CityDBPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := g.openDB(ASNDBPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
g.buildASNPrefixMap()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openDB opens a MaxMind DB file
|
|
||||||
func (g *GeoIPManager) openDB(path string) error {
|
|
||||||
db, err := maxminddb.Open(path)
|
|
||||||
if err == nil {
|
|
||||||
if path == CityDBPath {
|
|
||||||
g.cityDB = db
|
|
||||||
} else {
|
|
||||||
g.asnDB = db
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return errors.Wrapf(ErrDatabaseOpen, "failed to open %s: %v", path, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Log.Info("Database not found, attempting initial download", "path", path)
|
|
||||||
if err := g.DownloadDatabases(context.Background()); err != nil {
|
|
||||||
return errors.Wrap(ErrDownloadFailed, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
db, err = maxminddb.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(ErrDatabaseOpen, "failed to open %s after download: %v", path, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if path == CityDBPath {
|
|
||||||
g.cityDB = db
|
|
||||||
} else {
|
|
||||||
g.asnDB = db
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// buildASNPrefixMap builds a map of ASN prefixes for fast lookups
|
|
||||||
func (g *GeoIPManager) buildASNPrefixMap() {
|
|
||||||
logger.Log.Info("Building ASN prefix map for fast lookups...")
|
|
||||||
startTime := time.Now()
|
|
||||||
g.asnPrefixMap = make(map[uint][]*net.IPNet)
|
|
||||||
if g.asnDB == nil {
|
|
||||||
logger.Log.Warn("ASN database is not available, skipping prefix map build")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
networks := g.asnDB.Networks()
|
|
||||||
for networks.Next() {
|
|
||||||
var record ASNRecord
|
|
||||||
subnet, err := networks.Network(&record)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if record.AutonomousSystemNumber > 0 {
|
|
||||||
g.asnPrefixMap[record.AutonomousSystemNumber] = append(g.asnPrefixMap[record.AutonomousSystemNumber], subnet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
logger.Log.Info("Finished building ASN prefix map", "duration", time.Since(startTime))
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadDatabases downloads the GeoIP databases
|
|
||||||
func (g *GeoIPManager) DownloadDatabases(ctx context.Context) error {
|
|
||||||
accountID := os.Getenv("GEOIPUPDATE_ACCOUNT_ID")
|
|
||||||
licenseKey := os.Getenv("GEOIPUPDATE_LICENSE_KEY")
|
|
||||||
if accountID == "" || licenseKey == "" {
|
|
||||||
return errors.New("GEOIPUPDATE_ACCOUNT_ID and GEOIPUPDATE_LICENSE_KEY must be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
editionIDs := os.Getenv("GEOIPUPDATE_EDITION_IDS")
|
|
||||||
if editionIDs == "" {
|
|
||||||
editionIDs = "GeoLite2-City GeoLite2-ASN"
|
|
||||||
}
|
|
||||||
|
|
||||||
var firstError error
|
|
||||||
for _, editionID := range strings.Fields(editionIDs) {
|
|
||||||
if err := g.downloadEdition(ctx, accountID, licenseKey, editionID); err != nil {
|
|
||||||
logger.Log.Error("Failed to download edition", "edition", editionID, "error", err)
|
|
||||||
if firstError == nil {
|
|
||||||
firstError = err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return firstError
|
|
||||||
}
|
|
||||||
|
|
||||||
// downloadEdition downloads a specific GeoIP database edition
|
|
||||||
func (g *GeoIPManager) downloadEdition(ctx context.Context, accountID, licenseKey, editionID string) error {
|
|
||||||
dbPath := editionID + DBExtension
|
|
||||||
logger.Log.Info("Checking for updates", "database", dbPath)
|
|
||||||
|
|
||||||
hash, err := fileMD5(dbPath)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return errors.Wrapf(err, "could not calculate MD5 for %s", dbPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
downloadURL := fmt.Sprintf("https://updates.maxmind.com/geoip/databases/%s/update?db_md5=%s", editionID, hash)
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", downloadURL, nil)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not create request")
|
|
||||||
}
|
|
||||||
req.SetBasicAuth(accountID, licenseKey)
|
|
||||||
|
|
||||||
resp, err := g.httpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "http request failed")
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode == http.StatusNotModified {
|
|
||||||
logger.Log.Info("Database is already up to date", "database", dbPath)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
body, _ := io.ReadAll(resp.Body)
|
|
||||||
return fmt.Errorf("received non-200 status code: %d - %s", resp.StatusCode, string(body))
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Log.Info("Downloading and decompressing new version", "database", dbPath)
|
|
||||||
|
|
||||||
gzr, err := gzip.NewReader(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not create gzip reader")
|
|
||||||
}
|
|
||||||
defer gzr.Close()
|
|
||||||
|
|
||||||
tmpPath := dbPath + ".tmp"
|
|
||||||
outFile, err := os.Create(tmpPath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "could not create temporary file")
|
|
||||||
}
|
|
||||||
defer outFile.Close()
|
|
||||||
|
|
||||||
if _, err := io.Copy(outFile, gzr); err != nil {
|
|
||||||
os.Remove(tmpPath)
|
|
||||||
return errors.Wrap(err, "could not decompress and write db file")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Rename(tmpPath, dbPath); err != nil {
|
|
||||||
return errors.Wrap(err, "could not replace database file")
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Log.Info("Successfully downloaded and updated", "database", dbPath)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateDatabases updates the GeoIP databases
|
|
||||||
func (g *GeoIPManager) UpdateDatabases() error {
|
|
||||||
if err := g.DownloadDatabases(context.Background()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
|
|
||||||
if g.cityDB != nil {
|
|
||||||
g.cityDB.Close()
|
|
||||||
}
|
|
||||||
if g.asnDB != nil {
|
|
||||||
g.asnDB.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
var openErr error
|
|
||||||
g.cityDB, openErr = maxminddb.Open(CityDBPath)
|
|
||||||
if openErr != nil {
|
|
||||||
return errors.Wrap(openErr, "reopening city database")
|
|
||||||
}
|
|
||||||
|
|
||||||
g.asnDB, openErr = maxminddb.Open(ASNDBPath)
|
|
||||||
if openErr != nil {
|
|
||||||
return errors.Wrap(openErr, "reopening ASN database")
|
|
||||||
}
|
|
||||||
|
|
||||||
g.buildASNPrefixMap()
|
|
||||||
logger.Log.Info("Successfully reloaded GeoIP databases")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartUpdater starts a background updater for the GeoIP databases
|
|
||||||
func (g *GeoIPManager) StartUpdater(ctx context.Context, updateInterval time.Duration) {
|
|
||||||
logger.Log.Info("Starting MaxMind GeoIP database updater", "interval", updateInterval.String())
|
|
||||||
ticker := time.NewTicker(updateInterval)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
logger.Log.Info("Performing scheduled GeoIP database update")
|
|
||||||
if err := g.UpdateDatabases(); err != nil {
|
|
||||||
logger.Log.Error("Failed to update databases", "error", err)
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
ticker.Stop()
|
|
||||||
logger.Log.Info("GeoIP database updater stopped")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the GeoIP database readers
|
|
||||||
func (g *GeoIPManager) Close() {
|
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
if g.cityDB != nil {
|
|
||||||
g.cityDB.Close()
|
|
||||||
}
|
|
||||||
if g.asnDB != nil {
|
|
||||||
g.asnDB.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCityDB retrieves the city database reader
|
|
||||||
func (g *GeoIPManager) GetCityDB() *maxminddb.Reader {
|
|
||||||
g.mu.RLock()
|
|
||||||
defer g.mu.RUnlock()
|
|
||||||
return g.cityDB
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetASNDB retrieves the ASN database reader
|
|
||||||
func (g *GeoIPManager) GetASNDB() *maxminddb.Reader {
|
|
||||||
g.mu.RLock()
|
|
||||||
defer g.mu.RUnlock()
|
|
||||||
return g.asnDB
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetASNPrefixes retrieves the list of IP prefixes for a given ASN
|
|
||||||
func (g *GeoIPManager) GetASNPrefixes(asn uint) []*net.IPNet {
|
|
||||||
g.mu.RLock()
|
|
||||||
defer g.mu.RUnlock()
|
|
||||||
return g.asnPrefixMap[asn]
|
|
||||||
}
|
|
||||||
|
|
||||||
// fileMD5 calculates the MD5 hash of a file
|
|
||||||
func fileMD5(path string) (string, error) {
|
|
||||||
file, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
hash := md5.New()
|
|
||||||
if _, err := io.Copy(hash, file); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%x", hash.Sum(nil)), nil
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,124 @@
|
|||||||
|
package db
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/oschwald/maxminddb-golang"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GeoIPManager manages the GeoIP databases
|
||||||
|
type GeoIPManager struct {
|
||||||
|
cityDB *maxminddb.Reader
|
||||||
|
asnDB *maxminddb.Reader
|
||||||
|
asnPrefixMap map[uint][]*net.IPNet
|
||||||
|
httpClient *http.Client
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGeoIPManager creates a new GeoIPManager
|
||||||
|
func NewGeoIPManager() (*GeoIPManager, error) {
|
||||||
|
manager := &GeoIPManager{
|
||||||
|
httpClient: &http.Client{Timeout: 2 * time.Minute},
|
||||||
|
}
|
||||||
|
if err := manager.Initialize(); err != nil {
|
||||||
|
return nil, fmt.Errorf("initializing geoip manager: %w", err)
|
||||||
|
}
|
||||||
|
return manager, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize initializes the GeoIPManager by opening the database files
|
||||||
|
func (g *GeoIPManager) Initialize() error {
|
||||||
|
g.mu.Lock()
|
||||||
|
defer g.mu.Unlock()
|
||||||
|
|
||||||
|
if err := g.openDB(CityDBPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := g.openDB(ASNDBPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
g.buildASNPrefixMap()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the GeoIP database readers
|
||||||
|
func (g *GeoIPManager) Close() {
|
||||||
|
g.mu.Lock()
|
||||||
|
defer g.mu.Unlock()
|
||||||
|
if g.cityDB != nil {
|
||||||
|
if err := g.cityDB.Close(); err != nil {
|
||||||
|
slog.Warn("failed to close citydb", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if g.asnDB != nil {
|
||||||
|
if err := g.asnDB.Close(); err != nil {
|
||||||
|
slog.Warn("failed to close asndb", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// openDB opens a MaxMind DB file, downloading it if it doesn't exist.
|
||||||
|
func (g *GeoIPManager) openDB(path string) error {
|
||||||
|
db, err := maxminddb.Open(path)
|
||||||
|
if err == nil {
|
||||||
|
if path == CityDBPath {
|
||||||
|
g.cityDB = db
|
||||||
|
} else {
|
||||||
|
g.asnDB = db
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("%w: failed to open %s: %v", ErrDatabaseOpen, path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Warn("database not found, attempting initial download", "path", path)
|
||||||
|
if err := g.DownloadDatabases(context.Background()); err != nil {
|
||||||
|
return fmt.Errorf("%w: %v", ErrDownloadFailed, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
db, err = maxminddb.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%w: failed to open %s after download: %v", ErrDatabaseOpen, path, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if path == CityDBPath {
|
||||||
|
g.cityDB = db
|
||||||
|
} else {
|
||||||
|
g.asnDB = db
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildASNPrefixMap builds a map of ASN prefixes for fast lookups.
|
||||||
|
func (g *GeoIPManager) buildASNPrefixMap() {
|
||||||
|
slog.Info("building asn prefix map for fast lookups")
|
||||||
|
startTime := time.Now()
|
||||||
|
g.asnPrefixMap = make(map[uint][]*net.IPNet)
|
||||||
|
if g.asnDB == nil {
|
||||||
|
slog.Warn("asn database is not available, skipping prefix map build")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
networks := g.asnDB.Networks()
|
||||||
|
for networks.Next() {
|
||||||
|
var record ASNRecord
|
||||||
|
subnet, err := networks.Network(&record)
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("skipping asn network due to error", "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if record.AutonomousSystemNumber > 0 {
|
||||||
|
g.asnPrefixMap[record.AutonomousSystemNumber] = append(g.asnPrefixMap[record.AutonomousSystemNumber], subnet)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
slog.Info("finished building asn prefix map", "duration", time.Since(startTime))
|
||||||
|
}
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
package db
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
// Constants for database names and paths
|
||||||
|
const (
|
||||||
|
CityDBName = "GeoLite2-City"
|
||||||
|
ASNDBName = "GeoLite2-ASN"
|
||||||
|
DBExtension = ".mmdb"
|
||||||
|
CityDBPath = CityDBName + DBExtension
|
||||||
|
ASNDBPath = ASNDBName + DBExtension
|
||||||
|
)
|
||||||
|
|
||||||
|
// Error messages
|
||||||
|
var (
|
||||||
|
ErrDatabaseOpen = errors.New("failed to open database")
|
||||||
|
ErrDownloadFailed = errors.New("failed to download database")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ASNRecord represents a record in the ASN database
|
||||||
|
type ASNRecord struct {
|
||||||
|
AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"`
|
||||||
|
AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
|
||||||
|
}
|
||||||
@@ -0,0 +1,165 @@
|
|||||||
|
package db
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/oschwald/maxminddb-golang"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StartUpdater starts a background updater for the GeoIP databases.
|
||||||
|
func (g *GeoIPManager) StartUpdater(ctx context.Context, updateInterval time.Duration) {
|
||||||
|
slog.Info("starting database updater", "interval", updateInterval.String())
|
||||||
|
ticker := time.NewTicker(updateInterval)
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
slog.Info("performing scheduled database update")
|
||||||
|
if err := g.UpdateDatabases(); err != nil {
|
||||||
|
slog.Error("failed to update databases", "err", err)
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
ticker.Stop()
|
||||||
|
slog.Info("database updater stopped")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateDatabases downloads new databases and reloads them into the manager.
|
||||||
|
func (g *GeoIPManager) UpdateDatabases() error {
|
||||||
|
if err := g.DownloadDatabases(context.Background()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
g.mu.Lock()
|
||||||
|
defer g.mu.Unlock()
|
||||||
|
|
||||||
|
if g.cityDB != nil {
|
||||||
|
_ = g.cityDB.Close()
|
||||||
|
}
|
||||||
|
if g.asnDB != nil {
|
||||||
|
_ = g.asnDB.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
var openErr error
|
||||||
|
g.cityDB, openErr = maxminddb.Open(CityDBPath)
|
||||||
|
if openErr != nil {
|
||||||
|
return fmt.Errorf("reopening city database: %w", openErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
g.asnDB, openErr = maxminddb.Open(ASNDBPath)
|
||||||
|
if openErr != nil {
|
||||||
|
return fmt.Errorf("reopening asn database: %w", openErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
g.buildASNPrefixMap()
|
||||||
|
slog.Info("successfully reloaded databases")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadDatabases downloads all configured GeoIP database editions.
|
||||||
|
func (g *GeoIPManager) DownloadDatabases(ctx context.Context) error {
|
||||||
|
accountID := os.Getenv("GEOIPUPDATE_ACCOUNT_ID")
|
||||||
|
licenseKey := os.Getenv("GEOIPUPDATE_LICENSE_KEY")
|
||||||
|
if accountID == "" || licenseKey == "" {
|
||||||
|
return fmt.Errorf("GEOIPUPDATE_ACCOUNT_ID and GEOIPUPDATE_LICENSE_KEY must be set")
|
||||||
|
}
|
||||||
|
|
||||||
|
editionIDs := os.Getenv("GEOIPUPDATE_EDITION_IDS")
|
||||||
|
if editionIDs == "" {
|
||||||
|
editionIDs = "GeoLite2-City GeoLite2-ASN"
|
||||||
|
}
|
||||||
|
|
||||||
|
var firstError error
|
||||||
|
for _, editionID := range strings.Fields(editionIDs) {
|
||||||
|
if err := g.downloadEdition(ctx, accountID, licenseKey, editionID); err != nil {
|
||||||
|
slog.Error("failed to download edition", "edition", editionID, "err", err)
|
||||||
|
if firstError == nil {
|
||||||
|
firstError = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return firstError
|
||||||
|
}
|
||||||
|
|
||||||
|
// downloadEdition downloads a specific GeoIP database edition.
|
||||||
|
func (g *GeoIPManager) downloadEdition(ctx context.Context, accountID, licenseKey, editionID string) error {
|
||||||
|
dbPath := editionID + DBExtension
|
||||||
|
slog.Info("checking for updates", "database", dbPath)
|
||||||
|
|
||||||
|
hash, err := fileMD5(dbPath)
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return fmt.Errorf("could not calculate md5 for %s: %w", dbPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadURL := fmt.Sprintf("https://updates.maxmind.com/geoip/databases/%s/update?db_md5=%s", editionID, hash)
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "GET", downloadURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not create request: %w", err)
|
||||||
|
}
|
||||||
|
req.SetBasicAuth(accountID, licenseKey)
|
||||||
|
|
||||||
|
resp, err := g.httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("http request failed: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := resp.Body.Close(); err != nil {
|
||||||
|
slog.Error("failed to close response body", "err", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if resp.StatusCode == http.StatusNotModified {
|
||||||
|
slog.Info("database is already up to date", "database", dbPath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
return fmt.Errorf("received non-200 status code: %d - %s", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("downloading and decompressing new version", "database", dbPath)
|
||||||
|
|
||||||
|
gzr, err := gzip.NewReader(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not create gzip reader: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := gzr.Close(); err != nil {
|
||||||
|
slog.Error("failed to close gzip reader", "err", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
tmpPath := dbPath + ".tmp"
|
||||||
|
outFile, err := os.Create(tmpPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("could not create temporary file: %w", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := outFile.Close(); err != nil {
|
||||||
|
slog.Error("failed to close output file", "err", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _, err := io.Copy(outFile, gzr); err != nil {
|
||||||
|
_ = os.Remove(tmpPath)
|
||||||
|
return fmt.Errorf("could not decompress and write db file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Rename(tmpPath, dbPath); err != nil {
|
||||||
|
return fmt.Errorf("could not replace database file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("successfully downloaded and updated", "database", dbPath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
package db
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/md5"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fileMD5 calculates the MD5 hash of a file.
|
||||||
|
func fileMD5(path string) (string, error) {
|
||||||
|
file, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := file.Close(); err != nil {
|
||||||
|
fmt.Printf("Error closing file: %v\n", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
hash := md5.New()
|
||||||
|
if _, err := io.Copy(hash, file); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%x", hash.Sum(nil)), nil
|
||||||
|
}
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log/slog"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
var Log *slog.Logger
|
|
||||||
|
|
||||||
// init initializes the logger
|
|
||||||
func init() {
|
|
||||||
Log = slog.New(slog.NewJSONHandler(os.Stdout, nil))
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,138 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"ipinfo/internal/common"
|
||||||
|
"ipinfo/internal/db"
|
||||||
|
|
||||||
|
"golang.org/x/net/idna"
|
||||||
|
)
|
||||||
|
|
||||||
|
const favicon = `<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16"></svg>`
|
||||||
|
|
||||||
|
// faviconHandler handles requests for the favicon.
|
||||||
|
func faviconHandler(w http.ResponseWriter, _ *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "image/svg+xml")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
_, _ = w.Write([]byte(favicon))
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleDomainLookup handles domain lookup requests.
|
||||||
|
func handleDomainLookup(w http.ResponseWriter, _ *http.Request, domain string) {
|
||||||
|
punycodeDomain, err := idna.ToASCII(domain)
|
||||||
|
if err != nil {
|
||||||
|
sendJSONError(w, "Please provide a valid domain name.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(punycodeDomain) > 253 {
|
||||||
|
sendJSONError(w, "Please provide a valid domain name.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := common.LookupDomainData(punycodeDomain)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("failed to look up domain data", "domain", punycodeDomain, "error", err)
|
||||||
|
sendJSONError(w, "Error retrieving data for domain.", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sendJSONResponse(w, data, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleASNLookup handles ASN lookup requests.
|
||||||
|
func handleASNLookup(w http.ResponseWriter, _ *http.Request, path string, geoIP *db.GeoIPManager) {
|
||||||
|
var asnStr string
|
||||||
|
lowerPath := strings.ToLower(path)
|
||||||
|
|
||||||
|
if strings.HasPrefix(lowerPath, "asn/") {
|
||||||
|
asnStr = path[4:]
|
||||||
|
} else if strings.HasPrefix(lowerPath, "as") {
|
||||||
|
asnStr = path[2:]
|
||||||
|
} else {
|
||||||
|
sendJSONError(w, "Invalid ASN query format. Use /asn/<number> or /AS<number>.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
asn, err := strconv.ParseUint(asnStr, 10, 32)
|
||||||
|
if err != nil || asn == 0 {
|
||||||
|
sendJSONError(w, "Invalid ASN: must be a positive number.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := common.LookupASNData(geoIP, uint(asn))
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "no prefixes found") {
|
||||||
|
sendJSONError(w, err.Error(), http.StatusNotFound)
|
||||||
|
} else {
|
||||||
|
slog.Error("failed to look up asn data", "asn", asn, "error", err)
|
||||||
|
sendJSONError(w, "Error retrieving data for ASN.", http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sendJSONResponse(w, data, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleIPLookup handles IP lookup requests.
|
||||||
|
func handleIPLookup(w http.ResponseWriter, r *http.Request, path string, geoIP *db.GeoIPManager) {
|
||||||
|
parts := strings.Split(path, "/")
|
||||||
|
var ipAddress, field string
|
||||||
|
|
||||||
|
switch len(parts) {
|
||||||
|
case 0:
|
||||||
|
ipAddress = GetRealIP(r) // No more "common." prefix
|
||||||
|
case 1:
|
||||||
|
if parts[0] == "" {
|
||||||
|
ipAddress = GetRealIP(r) // No more "common." prefix
|
||||||
|
} else if _, ok := fieldMap[parts[0]]; ok {
|
||||||
|
ipAddress = GetRealIP(r) // No more "common." prefix
|
||||||
|
field = parts[0]
|
||||||
|
} else {
|
||||||
|
ipAddress = parts[0]
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
ipAddress = parts[0]
|
||||||
|
field = parts[1]
|
||||||
|
default:
|
||||||
|
sendJSONError(w, "Invalid request format.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ip := net.ParseIP(ipAddress)
|
||||||
|
if ip == nil {
|
||||||
|
sendJSONError(w, "Please provide a valid IP address.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if field != "" {
|
||||||
|
if _, ok := fieldMap[field]; !ok {
|
||||||
|
sendJSONError(w, "Please provide a valid field.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if common.IsBogon(ip) {
|
||||||
|
sendJSONResponse(w, bogonDataStruct{IP: ip.String(), Bogon: true}, http.StatusOK)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data := common.LookupIPData(geoIP, ip)
|
||||||
|
if data == nil {
|
||||||
|
sendJSONError(w, "Could not retrieve data for the specified IP.", http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if field != "" {
|
||||||
|
value := getField(data, field)
|
||||||
|
sendJSONResponse(w, map[string]*string{field: value}, http.StatusOK)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sendJSONResponse(w, data, http.StatusOK)
|
||||||
|
}
|
||||||
@@ -0,0 +1,57 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// gzipResponseWriter is a wrapper for gzip compression.
|
||||||
|
type gzipResponseWriter struct {
|
||||||
|
http.ResponseWriter
|
||||||
|
Writer *gzip.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w gzipResponseWriter) Write(b []byte) (int, error) {
|
||||||
|
return w.Writer.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w gzipResponseWriter) Close() {
|
||||||
|
if err := w.Writer.Close(); err != nil {
|
||||||
|
slog.Error("failed to close gzip writer", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newGzipResponseWriter wraps the ResponseWriter if the client accepts gzip.
|
||||||
|
func newGzipResponseWriter(w http.ResponseWriter, r *http.Request) http.ResponseWriter {
|
||||||
|
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||||
|
w.Header().Set("Content-Encoding", "gzip")
|
||||||
|
gz := gzip.NewWriter(w)
|
||||||
|
return gzipResponseWriter{ResponseWriter: w, Writer: gz}
|
||||||
|
}
|
||||||
|
return w
|
||||||
|
}
|
||||||
|
|
||||||
|
// loggingMiddleware logs the incoming HTTP request and its duration.
|
||||||
|
func loggingMiddleware(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.URL.Path == "/favicon.ico" {
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
duration := time.Since(start)
|
||||||
|
|
||||||
|
slog.Info(fmt.Sprintf("%s %s from %s in %s",
|
||||||
|
r.Method,
|
||||||
|
r.URL.Path,
|
||||||
|
GetRealIP(r),
|
||||||
|
duration,
|
||||||
|
))
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -0,0 +1,23 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// sendJSONResponse sends a JSON response with the given data and status code.
|
||||||
|
func sendJSONResponse(w http.ResponseWriter, data any, statusCode int) {
|
||||||
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||||
|
w.WriteHeader(statusCode)
|
||||||
|
encoder := json.NewEncoder(w)
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
if err := encoder.Encode(data); err != nil {
|
||||||
|
slog.Error("failed to encode json response", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendJSONError sends a JSON error response with the given message and status code.
|
||||||
|
func sendJSONError(w http.ResponseWriter, errMsg string, statusCode int) {
|
||||||
|
sendJSONResponse(w, map[string]string{"error": errMsg}, statusCode)
|
||||||
|
}
|
||||||
@@ -0,0 +1,64 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"ipinfo/internal/db"
|
||||||
|
"ipinfo/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// newRouter creates the main request router and applies middleware.
|
||||||
|
func newRouter(geoIP *db.GeoIPManager) http.Handler {
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
|
||||||
|
// Register handlers
|
||||||
|
mux.Handle("/health", utils.HealthCheck())
|
||||||
|
mux.HandleFunc("/favicon.ico", faviconHandler)
|
||||||
|
mux.HandleFunc("/", rootHandler(geoIP))
|
||||||
|
|
||||||
|
// Chain middleware
|
||||||
|
var handler http.Handler = mux
|
||||||
|
handler = loggingMiddleware(handler)
|
||||||
|
|
||||||
|
return handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// rootHandler is the main routing logic that inspects the path.
|
||||||
|
func rootHandler(geoIP *db.GeoIPManager) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Apply gzip compression where accepted
|
||||||
|
w = newGzipResponseWriter(w, r)
|
||||||
|
if gw, ok := w.(gzipResponseWriter); ok {
|
||||||
|
defer gw.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
path := strings.Trim(r.URL.Path, "/")
|
||||||
|
parts := strings.Split(path, "/")
|
||||||
|
firstPart := ""
|
||||||
|
if len(parts) > 0 {
|
||||||
|
firstPart = parts[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Route to ASN handler
|
||||||
|
if strings.HasPrefix(strings.ToLower(firstPart), "as") {
|
||||||
|
handleASNLookup(w, r, path, geoIP)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Route to Domain handler
|
||||||
|
isDomain := strings.Contains(firstPart, ".") && net.ParseIP(firstPart) == nil && firstPart != ""
|
||||||
|
if isDomain {
|
||||||
|
if len(parts) > 1 {
|
||||||
|
sendJSONError(w, "Invalid request for domain. Field lookups are not supported.", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
handleDomainLookup(w, r, firstPart)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to IP handler
|
||||||
|
handleIPLookup(w, r, path, geoIP)
|
||||||
|
}
|
||||||
|
}
|
||||||
+16
-224
@@ -1,58 +1,25 @@
|
|||||||
package internal
|
package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"compress/gzip"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"errors"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
common "skidoodle/ipinfo/internal/common"
|
"ipinfo/internal/db"
|
||||||
db "skidoodle/ipinfo/internal/db"
|
|
||||||
"skidoodle/ipinfo/internal/logger"
|
|
||||||
utils "skidoodle/ipinfo/utils/health"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// favicon is the SVG data for the favicon
|
// Server represents the HTTP server.
|
||||||
const favicon = `<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16"></svg>`
|
|
||||||
|
|
||||||
// bogonDataStruct represents the response structure for bogon IP queries
|
|
||||||
type bogonDataStruct struct {
|
|
||||||
IP string `json:"ip"`
|
|
||||||
Bogon bool `json:"bogon"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// gzipResponseWriter is a wrapper for gzip compression
|
|
||||||
type gzipResponseWriter struct {
|
|
||||||
http.ResponseWriter
|
|
||||||
Writer *gzip.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes the compressed data to the response
|
|
||||||
func (w gzipResponseWriter) Write(b []byte) (int, error) {
|
|
||||||
return w.Writer.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Server represents the HTTP server
|
|
||||||
type Server struct {
|
type Server struct {
|
||||||
server *http.Server
|
server *http.Server
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServer creates a new HTTP server
|
// NewServer creates a new HTTP server.
|
||||||
func NewServer(geoIP *db.GeoIPManager) *Server {
|
func NewServer(geoIP *db.GeoIPManager) *Server {
|
||||||
mux := http.NewServeMux()
|
// The router is now created in its own file.
|
||||||
mux.Handle("/health", utils.HealthCheck())
|
handler := newRouter(geoIP)
|
||||||
mux.HandleFunc("/favicon.ico", faviconHandler)
|
|
||||||
mux.HandleFunc("/", router(geoIP))
|
|
||||||
|
|
||||||
// Chain the logging middleware
|
|
||||||
var handler http.Handler = mux
|
|
||||||
handler = loggingMiddleware(handler)
|
|
||||||
|
|
||||||
return &Server{
|
return &Server{
|
||||||
server: &http.Server{
|
server: &http.Server{
|
||||||
@@ -65,202 +32,27 @@ func NewServer(geoIP *db.GeoIPManager) *Server {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartServer starts the HTTP server
|
// Start starts the HTTP server and handles graceful shutdown.
|
||||||
func StartServer(ctx context.Context, geoIP *db.GeoIPManager) error {
|
func (s *Server) Start(ctx context.Context) error {
|
||||||
server := NewServer(geoIP)
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
logger.Log.Info("Server listening", "address", server.server.Addr)
|
slog.Info("server listening", "address", s.server.Addr)
|
||||||
if err := server.server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
if err := s.server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
logger.Log.Error("Server error", "error", err)
|
slog.Error("server error", "error", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
|
|
||||||
logger.Log.Info("Shutdown signal received, shutting down server gracefully...")
|
slog.Info("shutdown signal received")
|
||||||
|
|
||||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := server.server.Shutdown(shutdownCtx); err != nil {
|
if err := s.server.Shutdown(shutdownCtx); err != nil {
|
||||||
logger.Log.Error("Server shutdown failed", "error", err)
|
slog.Error("shutdown failed", "error", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Log.Info("Server shutdown complete")
|
slog.Info("shutdown complete")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// router returns the HTTP request router for the GeoIP service
|
|
||||||
func router(geoIP *db.GeoIPManager) http.HandlerFunc {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
|
||||||
w.Header().Set("Content-Encoding", "gzip")
|
|
||||||
gz := gzip.NewWriter(w)
|
|
||||||
defer gz.Close()
|
|
||||||
w = &gzipResponseWriter{Writer: gz, ResponseWriter: w}
|
|
||||||
}
|
|
||||||
|
|
||||||
path := strings.Trim(r.URL.Path, "/")
|
|
||||||
lowerPath := strings.ToLower(path)
|
|
||||||
|
|
||||||
if strings.HasPrefix(lowerPath, "as") {
|
|
||||||
handleASNLookup(w, r, path, geoIP)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
handleIPLookup(w, r, path, geoIP)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// faviconHandler handles requests for the favicon
|
|
||||||
func faviconHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.Header().Set("Content-Type", "image/svg+xml")
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
w.Write([]byte(favicon))
|
|
||||||
}
|
|
||||||
|
|
||||||
// fieldMap maps request fields to their corresponding data struct fields
|
|
||||||
var fieldMap = map[string]func(*common.DataStruct) *string{
|
|
||||||
"ip": func(d *common.DataStruct) *string { return d.IP },
|
|
||||||
"hostname": func(d *common.DataStruct) *string { return d.Hostname },
|
|
||||||
"org": func(d *common.DataStruct) *string { return d.Org },
|
|
||||||
"city": func(d *common.DataStruct) *string { return d.City },
|
|
||||||
"region": func(d *common.DataStruct) *string { return d.Region },
|
|
||||||
"country": func(d *common.DataStruct) *string { return d.Country },
|
|
||||||
"timezone": func(d *common.DataStruct) *string { return d.Timezone },
|
|
||||||
"loc": func(d *common.DataStruct) *string { return d.Loc },
|
|
||||||
}
|
|
||||||
|
|
||||||
// getField retrieves the value of a specific field from the data struct
|
|
||||||
func getField(data *common.DataStruct, field string) *string {
|
|
||||||
if f, ok := fieldMap[field]; ok {
|
|
||||||
return f(data)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleASNLookup handles ASN lookup requests
|
|
||||||
func handleASNLookup(w http.ResponseWriter, _ *http.Request, path string, geoIP *db.GeoIPManager) {
|
|
||||||
var asnStr string
|
|
||||||
lowerPath := strings.ToLower(path)
|
|
||||||
|
|
||||||
if strings.HasPrefix(lowerPath, "asn/") {
|
|
||||||
asnStr = path[4:]
|
|
||||||
} else if strings.HasPrefix(lowerPath, "as") {
|
|
||||||
asnStr = path[2:]
|
|
||||||
} else {
|
|
||||||
sendJSONError(w, "Invalid ASN query format. Use /asn/<number> or /AS<number>.", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
asn, err := strconv.ParseUint(asnStr, 10, 32)
|
|
||||||
if err != nil || asn == 0 {
|
|
||||||
sendJSONError(w, "Invalid ASN: must be a positive number.", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := common.LookupASNData(geoIP, uint(asn))
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "no prefixes found") {
|
|
||||||
sendJSONError(w, err.Error(), http.StatusNotFound)
|
|
||||||
} else {
|
|
||||||
logger.Log.Error("Error looking up ASN data", "asn", asn, "error", err)
|
|
||||||
sendJSONError(w, "Error retrieving data for ASN.", http.StatusInternalServerError)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sendJSONResponse(w, data, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleIPLookup handles IP lookup requests
|
|
||||||
func handleIPLookup(w http.ResponseWriter, r *http.Request, path string, geoIP *db.GeoIPManager) {
|
|
||||||
requestedThings := strings.Split(path, "/")
|
|
||||||
var IPAddress, field string
|
|
||||||
|
|
||||||
switch len(requestedThings) {
|
|
||||||
case 0:
|
|
||||||
IPAddress = common.GetRealIP(r)
|
|
||||||
case 1:
|
|
||||||
if requestedThings[0] == "" {
|
|
||||||
IPAddress = common.GetRealIP(r)
|
|
||||||
} else if _, ok := fieldMap[requestedThings[0]]; ok {
|
|
||||||
IPAddress = common.GetRealIP(r)
|
|
||||||
field = requestedThings[0]
|
|
||||||
} else if net.ParseIP(requestedThings[0]) != nil {
|
|
||||||
IPAddress = requestedThings[0]
|
|
||||||
} else {
|
|
||||||
sendJSONError(w, "Please provide a valid IP address.", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case 2:
|
|
||||||
IPAddress = requestedThings[0]
|
|
||||||
if _, ok := fieldMap[requestedThings[1]]; ok {
|
|
||||||
field = requestedThings[1]
|
|
||||||
} else {
|
|
||||||
sendJSONError(w, "Please provide a valid field.", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
sendJSONError(w, "Please provide a valid IP address.", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ip := net.ParseIP(IPAddress)
|
|
||||||
if ip == nil {
|
|
||||||
sendJSONError(w, "Please provide a valid IP address.", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if common.IsBogon(ip) {
|
|
||||||
sendJSONResponse(w, bogonDataStruct{IP: ip.String(), Bogon: true}, http.StatusOK)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
data := common.LookupIPData(geoIP, ip)
|
|
||||||
if data == nil {
|
|
||||||
sendJSONError(w, "Please provide a valid IP address.", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if field != "" {
|
|
||||||
value := getField(data, field)
|
|
||||||
sendJSONResponse(w, map[string]*string{field: value}, http.StatusOK)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
sendJSONResponse(w, data, http.StatusOK)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendJSONResponse sends a JSON response with the given data and status code.
|
|
||||||
func sendJSONResponse(w http.ResponseWriter, data any, statusCode int) {
|
|
||||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
||||||
w.WriteHeader(statusCode)
|
|
||||||
encoder := json.NewEncoder(w)
|
|
||||||
encoder.SetIndent("", " ")
|
|
||||||
if err := encoder.Encode(data); err != nil {
|
|
||||||
logger.Log.Error("Error encoding JSON response", "error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendJSONError sends a JSON error response with the given message and status code.
|
|
||||||
func sendJSONError(w http.ResponseWriter, errMsg string, statusCode int) {
|
|
||||||
sendJSONResponse(w, map[string]string{"error": errMsg}, statusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// loggingMiddleware logs the incoming HTTP request and its duration.
|
|
||||||
func loggingMiddleware(next http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
start := time.Now()
|
|
||||||
next.ServeHTTP(w, r)
|
|
||||||
logger.Log.Info("HTTP request",
|
|
||||||
slog.String("method", r.Method),
|
|
||||||
slog.String("path", r.URL.Path),
|
|
||||||
slog.String("remote_addr", r.RemoteAddr),
|
|
||||||
slog.Duration("duration", time.Since(start)),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -0,0 +1,7 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
// bogonDataStruct represents the response structure for bogon IP queries.
|
||||||
|
type bogonDataStruct struct {
|
||||||
|
IP string `json:"ip"`
|
||||||
|
Bogon bool `json:"bogon"`
|
||||||
|
}
|
||||||
@@ -0,0 +1,43 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"ipinfo/internal/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fieldMap maps request fields to their corresponding data struct fields.
|
||||||
|
var fieldMap = map[string]func(*common.DataStruct) *string{
|
||||||
|
"ip": func(d *common.DataStruct) *string { return d.IP },
|
||||||
|
"hostname": func(d *common.DataStruct) *string { return d.Hostname },
|
||||||
|
"org": func(d *common.DataStruct) *string { return d.Org },
|
||||||
|
"city": func(d *common.DataStruct) *string { return d.City },
|
||||||
|
"region": func(d *common.DataStruct) *string { return d.Region },
|
||||||
|
"country": func(d *common.DataStruct) *string { return d.Country },
|
||||||
|
"timezone": func(d *common.DataStruct) *string { return d.Timezone },
|
||||||
|
"loc": func(d *common.DataStruct) *string { return d.Loc },
|
||||||
|
}
|
||||||
|
|
||||||
|
// getField retrieves the value of a specific field from the data struct.
|
||||||
|
func getField(data *common.DataStruct, field string) *string {
|
||||||
|
if f, ok := fieldMap[field]; ok {
|
||||||
|
return f(data)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRealIP extracts the client's real IP address from request headers.
|
||||||
|
func GetRealIP(r *http.Request) string {
|
||||||
|
for _, header := range []string{"CF-Connecting-IP", "X-Real-IP", "X-Forwarded-For"} {
|
||||||
|
if ip := r.Header.Get(header); ip != "" {
|
||||||
|
return strings.TrimSpace(strings.Split(ip, ",")[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||||
|
if err != nil {
|
||||||
|
return r.RemoteAddr
|
||||||
|
}
|
||||||
|
return host
|
||||||
|
}
|
||||||
@@ -2,14 +2,14 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
db "skidoodle/ipinfo/internal/db"
|
"ipinfo/internal/db"
|
||||||
logger "skidoodle/ipinfo/internal/logger"
|
"ipinfo/internal/server"
|
||||||
server "skidoodle/ipinfo/internal/server"
|
|
||||||
|
|
||||||
"github.com/joho/godotenv"
|
"github.com/joho/godotenv"
|
||||||
)
|
)
|
||||||
@@ -17,7 +17,7 @@ import (
|
|||||||
// main is the entry point of the application
|
// main is the entry point of the application
|
||||||
func main() {
|
func main() {
|
||||||
if err := godotenv.Load(); err != nil {
|
if err := godotenv.Load(); err != nil {
|
||||||
logger.Log.Info("No .env file found, using system environment variables")
|
slog.Info("env file not found, using system environment variables")
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
@@ -25,17 +25,18 @@ func main() {
|
|||||||
|
|
||||||
geoIP, err := db.NewGeoIPManager()
|
geoIP, err := db.NewGeoIPManager()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Log.Error("Failed to initialize GeoIP databases", "error", err)
|
slog.Error("failed to initialize databases", "error", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
defer geoIP.Close()
|
defer geoIP.Close()
|
||||||
|
|
||||||
geoIP.StartUpdater(ctx, 24*time.Hour)
|
geoIP.StartUpdater(ctx, 24*time.Hour)
|
||||||
|
|
||||||
logger.Log.Info("Starting server...")
|
slog.Info("starting server")
|
||||||
if err := server.StartServer(ctx, geoIP); err != nil {
|
appServer := server.NewServer(geoIP)
|
||||||
logger.Log.Error("Server failed", "error", err)
|
if err := appServer.Start(ctx); err != nil {
|
||||||
|
slog.Error("server failed to start", "error", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
logger.Log.Info("Application shut down gracefully")
|
slog.Info("server shut down gracefully")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,8 @@
|
|||||||
- **IP Geolocation**: Provides city, region, country, continent, and coordinates for any IP address.
|
- **IP Geolocation**: Provides city, region, country, continent, and coordinates for any IP address.
|
||||||
- **ASN Information**: Includes autonomous system number and organization.
|
- **ASN Information**: Includes autonomous system number and organization.
|
||||||
- **Hostname Lookup**: Retrieves the hostname associated with the IP address.
|
- **Hostname Lookup**: Retrieves the hostname associated with the IP address.
|
||||||
|
- **Domain WHOIS**: Fetches structured WHOIS data for any domain.
|
||||||
|
- **Domain DNS Records**: Retrieves common DNS records (A, AAAA, CNAME, MX, TXT, NS).
|
||||||
- **Automatic Database Updates**: Keeps GeoIP databases up-to-date daily.
|
- **Automatic Database Updates**: Keeps GeoIP databases up-to-date daily.
|
||||||
|
|
||||||
## Example Endpoints
|
## Example Endpoints
|
||||||
@@ -71,6 +73,46 @@ $ curl https://ip.albert.lol/AS13335
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Get WHOIS and DNS records for a domain
|
||||||
|
```sh
|
||||||
|
$ curl https://ip.albert.lol/example.com
|
||||||
|
{
|
||||||
|
"whois": {
|
||||||
|
"domain": {
|
||||||
|
"id": "2336799_DOMAIN_COM-VRSN",
|
||||||
|
"domain": "example.com",
|
||||||
|
"whois_server": "whois.iana.org",
|
||||||
|
"status": [
|
||||||
|
"clientDeleteProhibited",
|
||||||
|
"clientTransferProhibited",
|
||||||
|
"clientUpdateProhibited"
|
||||||
|
],
|
||||||
|
"name_servers": [
|
||||||
|
"a.iana-servers.net",
|
||||||
|
"b.iana-servers.net"
|
||||||
|
],
|
||||||
|
"dnssec": true,
|
||||||
|
"created_date": "1995-08-14T04:00:00Z",
|
||||||
|
"updated_date": "2025-08-14T07:01:39Z",
|
||||||
|
"expiration_date": "2026-08-13T04:00:00Z"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"dns": {
|
||||||
|
"A": [
|
||||||
|
"93.184.216.34"
|
||||||
|
],
|
||||||
|
"AAAA": [
|
||||||
|
"2606:2800:220:1:248:1893:25c8:1946"
|
||||||
|
],
|
||||||
|
"NS": [
|
||||||
|
"a.iana-servers.net",
|
||||||
|
"b.iana-servers.net"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Running Locally
|
## Running Locally
|
||||||
|
|
||||||
### With Docker
|
### With Docker
|
||||||
|
|||||||
@@ -0,0 +1,22 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HealthCheck Returns a simple health check handler.
|
||||||
|
func HealthCheck() http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
_, err := w.Write([]byte("OK"))
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("failed to write healthcheck response",
|
||||||
|
"component", "healthcheck",
|
||||||
|
"method", r.Method,
|
||||||
|
"path", r.URL.Path,
|
||||||
|
"error", err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Returns a simple health check handler
|
|
||||||
func HealthCheck() http.Handler {
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
w.Write([]byte("OK"))
|
|
||||||
})
|
|
||||||
return mux
|
|
||||||
}
|
|
||||||
@@ -4,7 +4,7 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Contains a list of known bogon IP ranges
|
// BogonNets Contains a list of known bogon IP ranges
|
||||||
var BogonNets = []*net.IPNet{
|
var BogonNets = []*net.IPNet{
|
||||||
// IPv4
|
// IPv4
|
||||||
{IP: net.IPv4(0, 0, 0, 0), Mask: net.CIDRMask(8, 32)}, // "This" network
|
{IP: net.IPv4(0, 0, 0, 0), Mask: net.CIDRMask(8, 32)}, // "This" network
|
||||||
Reference in New Issue
Block a user