Преглед изворни кода

setup

git-svn-id: https://svn.microneil.com/svn/SNFMulti/trunk@1 dc71a809-1921-45c4-985c-09c81d0142d9
wx
madscientist пре 15 година
комит
b313d3961a
37 измењених фајлова са 15339 додато и 0 уклоњено
  1. 1286
    0
      FilterChain.cpp
  2. 768
    0
      FilterChain.hpp
  3. 814
    0
      GBUdb.cpp
  4. 293
    0
      GBUdb.hpp
  5. 354
    0
      GBUdb.inline.hpp
  6. 56
    0
      Makefile.am
  7. 2141
    0
      SNFMulti.cpp
  8. 471
    0
      SNFMulti.hpp
  9. 5
    0
      gccVersion.txt
  10. 106
    0
      mangler.cpp
  11. 34
    0
      mangler.hpp
  12. 112
    0
      scanner.cpp
  13. 69
    0
      scanner.hpp
  14. 1009
    0
      snfCFGmgr.cpp
  15. 554
    0
      snfCFGmgr.hpp
  16. 46
    0
      snfCFGmgr.inline.hpp
  17. 232
    0
      snfGBUdbmgr.cpp
  18. 68
    0
      snfGBUdbmgr.hpp
  19. 1950
    0
      snfLOGmgr.cpp
  20. 670
    0
      snfLOGmgr.hpp
  21. 121
    0
      snfLOGmgr.inline.hpp
  22. 773
    0
      snfNETmgr.cpp
  23. 138
    0
      snfNETmgr.hpp
  24. 787
    0
      snfXCImgr.cpp
  25. 199
    0
      snfXCImgr.hpp
  26. 233
    0
      snf_HeaderFinder.cpp
  27. 96
    0
      snf_HeaderFinder.hpp
  28. 50
    0
      snf_HeaderFinder.inline.hpp
  29. 791
    0
      snf_engine.cpp
  30. 546
    0
      snf_engine.hpp
  31. 21
    0
      snf_match.h
  32. 146
    0
      snf_sync.cpp
  33. 85
    0
      snf_sync.hpp
  34. 138
    0
      snf_xci.cpp
  35. 78
    0
      snf_xci.hpp
  36. 54
    0
      tcp_watchdog.cpp
  37. 45
    0
      tcp_watchdog.hpp

+ 1286
- 0
FilterChain.cpp
Разлика између датотеке није приказан због своје велике величине
Прегледај датотеку


+ 768
- 0
FilterChain.hpp Прегледај датотеку

@@ -0,0 +1,768 @@
// FilterChain.hpp
//
// (C) 2002-2009 MicroNeil Research Corporation
//
// This is the base class header for FilterChain objects.
// FilterChain objects can be chained together to filter
// a byte stream. Each object produces a single character
// per call. It will also call it's source object for the
// next character as required.

// History...

// 20060822 _M
// Adding FilterChainHeaderAnalysis to identify missing headers and header
// anomalies, and to extract and test IP data.

// 20060127 _M
// Added FilterChainCBFG to accept a buffer of a specific
// length.

// 20041116 _M Added UrlDecode module. The module will repeat a decoded version of
// any anchor tag that it sees which contains decodable %xx bytes. Other anchor
// tags are not repeated.

// 20041116 _M Upgrades to the Defunker module. The module now decodes any HTML
// encoded bytes that could have been normal ascii.

// 20041114 _M Completed basic defunker engine which strips out all HTML and some
// basic   encoding.

// 20041113 _M Began heavy upgrades to this module to improve performance and
// provide additional obfuscation removal. This modification will include a move
// from the use of switch(State) mechanisms to the use of function pointers. This
// should save a few cycles on every byte processed.

// 20021025 _M
// Added FilterChainCString to accept a Null Terminated
// String (CString). Except for the input form it operates
// exactly like the FilterChainInput form as modified below.
// This allows WebClay to deliver the message using a buffer
// rather than a file.

// 20021015 _M
// Modified FilterChainInput to eat control characters and
// <CR> bytes so that the input stream "appears" always to
// be terminated in the *nix standard \n. Tabs are also passed
// but all other low bytes are eaten.

// 20020721 _M File Created.

// This is the base class - nothing special happens here
// except defining the basic format of a FilterChain object.
// If this object is instantiated, then it will simply return
// it's source's data, or a stream of '0's if none has been
// defined.

#ifndef _MN_FilterChain
#define _MN_FilterChain

#include <stdexcept>
#include <iostream>
#include <sstream>
#include <string>
#include <cstring>
#include <cstdlib>
#include <cctype>


using namespace std;


// Define parameters for this module.

const static int ScanBufferSize = 128; // Define the buffer size.

// Define the base class.

class FilterChain {

private:

FilterChain* Source; // Where we get our data.

public:

class BadSource : public invalid_argument { // Bad Source Exception.
public: BadSource(const string& w):invalid_argument(w){}
};
class Empty : public underflow_error { // Empty Exception.
public: Empty(const string& w):underflow_error(w){}
};

virtual unsigned char GetByte() { // Return either 0
if(NULL==Source) return 0; // if we have no source
else return Source->GetByte(); // otherwise it's byte.
}

FilterChain(){Source=NULL;} // Default Constructor no source.

// The next constructor throws an error if no source is defined.

FilterChain(FilterChain* S) {
if(NULL==S) throw BadSource("FilterChain: NULL source not valid");
else Source = S;
}
};

// FilterChainInput
// This version of FilterChain accepts an istream as a source and
// gets a single character from it at each GetByte();

class FilterChainInput : public FilterChain {

private:

istream* SourceIstream;

public:

// Here we overload the GetByte() function to get a byte
// from the source stream. This is a litle bit special because
// we're going to start our filtering process. Since we are
// filtering text streams for pattern matching systems we will
// eat any special control characters we get - including <CR>.
// This helps us standardize on a *nix model for line ends as
// each line end will be \n. It also gets rid of a lot of junk.

unsigned char GetByte() { // Get the next byte.
char i; // Keep it here.

do{ // Loop to eat junk.

SourceIstream->get(i); // Read the next byte...
if(!SourceIstream->good()) // If something went wrong then
throw Empty("FilterChain: No more data"); // throw the empty exception.

if(i >= ' ') break; // Send all good bytes right away.
if(i=='\n' || i=='\t') break; // If we hit a \n or \t send it.
// Otherwise quietly eat anything
} while(true); // less than a space.

return i; // Return the latest byte...
}

// Here we overload the constructor to accept a stream.

FilterChainInput(istream* S){ // Build me with a stream.
if(NULL==S) throw BadSource("FilterChainInput: Null source not valid" ); // If it's NULL that's bad.
if(!S->good()) throw BadSource("FilterChainInput: Bad istream"); // Not good is bad.
else SourceIstream = S; // If it's good we keep it.
}

FilterChainInput() { // If we don't have a source then
throw BadSource("FilterChainInput: Source required"); // we're no good.
}
};

// FilterChainCString
// This version sources the data for the chain from a message buffer, or
// more precisely a null terminated string. The basic operation is identical
// to that of FilterChainInput above except that we're not working with
// a filestream as an input.

class FilterChainCString : public FilterChain {

private:

unsigned char* InputBuffer;
int BufferIndex;

public:

// Here we overload GetByte() just like we do in FilterChainInput
// except that we're going to get our data from a NULL terminated
// string instead of a stream. IN FACT ... the code below was simply
// copied from FilterChainInput and modified in place.

unsigned char GetByte() { // Get the next byte.
unsigned char i; // Keep it here.

do{ // Loop to eat junk.

i = InputBuffer[BufferIndex++]; // Read the next byte...
if(0 == i) // If there's nothing left then
throw Empty("FilterChainCString: No more data"); // throw the empty exception.

if(i >= ' ') break; // Send all good bytes right away.
if(i=='\n' || i=='\t') break; // If we hit a \n or \t send it.
// Otherwise quietly eat anything
} while(true); // less than a space.

return i; // Return the latest byte...
}

// Here we overload the constructor to accept a stream.

FilterChainCString(unsigned char* S){ // Build me with a char buffer.
if(NULL==S) throw BadSource("FilterChainCString: NULL source not valid"); // If it's NULL that's bad.
if(0==S[0]) throw BadSource("FilterChainCString: Empty source not valid"); // Empty is bad.
else InputBuffer = S; // If it's good we keep it.
BufferIndex = 0; // Always start at index 0.
}

FilterChainCString() { // If we don't have a source then
throw BadSource("FilterChainCString: Source required"); // we're no good.
}
};

// FilterChainCBFR
// This version sources the data for the chain from a message buffer, NOT
// a null terminated string. The basic operation is identical to FilterChainCString
// except that this version requires the length of the buffer and stops when that
// number of characters have been read.

class FilterChainCBFR : public FilterChain {

private:

unsigned char* InputBuffer;
int BufferIndex;
int BufferLength;

stringstream& PrependedHeaders;

bool PrependNotBuffer;

public:

// Here we overload GetByte() just like we do in FilterChainInput
// except that we're going to get our data from a known length char
// buffer instead of a stream. IN FACT ... the code below was simply
// copied from FilterChainCString and modified in place.

unsigned char GetByte() { // Get the next byte.
unsigned char i; // Keep it here.

if(PrependNotBuffer) { // While in prepend mode:

if(BufferIndex < PrependedHeaders.str().length()) { // If there is more to get
i = PrependedHeaders.str().at(BufferIndex); // then get it and move
++BufferIndex; // the index.
} else { // As soon as we run out
PrependNotBuffer = false; // of prepended headers switch
BufferIndex = 0; // to the CBFR and reset the index.
return GetByte(); // Recurse to get the next byte.
}

} else { // While in buffer mode:

do{ // Loop to eat junk.
if(BufferLength <= BufferIndex) // If there's nothing left then
throw Empty("FilterChainCBFR: No more data"); // throw the empty exception.

i = InputBuffer[BufferIndex++]; // Read the next byte...

if(i >= ' ') break; // Send all good bytes right away.
if(i=='\n' || i=='\t') break; // If we hit a \n or \t send it.
// Otherwise quietly eat anything
} while(true); // less than a space.
}

return i; // Return the latest byte...
}

// Here we overload the constructor to accept a stream.

FilterChainCBFR(unsigned char* S, int l, stringstream& P) : // Give me a bfr and a stringstream.
InputBuffer(S), // Grab the buffer,
BufferLength(l), // Grab the buffer length,
BufferIndex(0), // Initialize the index to 0,
PrependedHeaders(P), // Grab the PrependedHeaders reference.
PrependNotBuffer(true) { // Do PrependedHeaders first.

if(NULL==S) throw BadSource("FilterChainCBFR: NULL source not valid"); // If it's NULL that's bad.
if(0==l && 0==P.str().length())
throw BadSource("FilterChainCBFR: Empty source not valid"); // Empty is bad.
}

};

// FilterChainBase64
// This version decodes base64 content in email messages. It begins
// to decode this as soon as it sees the following message and two
// blank lines indicating the coding has started.
//
// Content-Transfer-Encoding: base64
//
// Once it sees a bad character or what appears to be the start of
// a new MIME segment, the filter turns off and passes through it's
// source data.

// The startup string for this filter is below. In this case we keep the
// <LF> part of the string to ensure we will be looking at the start
// of a line when we match.

const static unsigned char Base64Start[] = "\nContent-Transfer-Encoding: base64";

// The following table makes conversion fast because it's all lookups. The
// special value XX64 is used everywhere a bad byte is found in the table.

const static unsigned char XX64 = 0xFF;

// Note the special case '=' is used for pad. It is given the value 0x00.

// The input to this table is the incoming byte. The output is either XX64
// or a valid base64 numerical value.

const static unsigned char Base64Table[256] = {

// 0 1 2 3 4 5 6 7 8 9 A B C D E F

XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64, // 0
XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64, // 1
XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,0x3E,XX64,XX64,XX64,0x3F, // 2
0x34,0x35,0x36,0x37,0x38,0x39,0x3A,0x3B,0x3C,0x3D,XX64,XX64,XX64,0x00,XX64,XX64, // 3
XX64,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,0x0E, // 4
0x0F,0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,XX64,XX64,XX64,XX64,XX64, // 5
XX64,0x1A,0x1B,0x1C,0x1D,0x1E,0x1F,0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28, // 6
0x29,0x2A,0x2B,0x2C,0x2D,0x2E,0x2F,0x30,0x31,0x32,0x33,XX64,XX64,XX64,XX64,XX64, // 7
XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64, // 8
XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64, // 9
XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64, // A
XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64, // B
XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64, // C
XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64, // D
XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64, // E
XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64,XX64 // F
};

// The following constants are used to find segment positions when converting from
// 4 six bit values to 3 octets.

const static unsigned char base64_seg0_shift = 18;
const static unsigned char base64_seg1_shift = 12;
const static unsigned char base64_seg2_shift = 6;
const static unsigned char base64_seg3_shift = 0;

class FilterChainBase64 : public FilterChain {

private:

unsigned char x,y; // We need a few holding bins.
unsigned int Workspace; // Numerical workspace for conversion.

enum FilterState { // Operating State Codes.
SCANNING, // One-in = One-out, looking for startup.
DEQUEING, // Delivering buffered data.
DECODING // Delivering filtered data.
} State;

int ScanIx; // Scanning Index.
int DequeIx; // Dequeing Index.
unsigned char Buffer; // Define a buffer.

bool ValidByte(unsigned char y); // True if y can be decoded.

public:

unsigned char GetByte(); // Overload the main fn().

FilterChainBase64(FilterChain* S) // Sourced constructor...
:FilterChain(S){ // Call the base constructor.
State = SCANNING; // Set filter inactive.
ScanIx=DequeIx=0; // Reset our indexes.
} // We're all ready to start.

FilterChainBase64() { // Don't allow any
throw BadSource("FilterChainBase64: Source required"); // null constructors.
}

};

// FilterChainQuotedPrintable
// This version decodes quoted-printable content in email messages.
//
// For simplicity this one is always on. That is, whenever it sees a
// convertable quoted printable byte it will exchange it for the byte
// that is represented. This is only intended for operation preceeding the
// spam filter engine so it is safe to make these conversions.

class FilterChainQuotedPrintable : public FilterChain {

private:

long int Workspace; // Plain Text Workspace.
enum FilterState { // Operating State Codes
SCANNING, // One-in = One-out - looking for start-up.
DEQUEING, // Delivering buffered data.
DECODING // Delivering filtered data.
} State;

int BufferLength; // How full is the buffer.
int BufferIndex; // What byte are we on?
unsigned char Buffer[ScanBufferSize]; // Define the buffer.

bool isHexDigit(unsigned char i); // true if i is a hex digit byte.
int convertHexDigit(unsigned char i); // returns integer value of hex digit i.

public:

unsigned char GetByte(); // Overload the main fn().

FilterChainQuotedPrintable(FilterChain* S) // Sourced constructor...
:FilterChain(S){ // Call the base constructor.
State = SCANNING; // Set to the initial state.
BufferIndex = 0; // Initial buffer index.
BufferLength = 0; // Initial buffer length.
Workspace = 0; // Clear the workspace.
}

FilterChainQuotedPrintable() { // Don't allow any
throw BadSource("FilterChainQuotedPrintable: Source required"); // null constructors.
}

};


// FilterChainDefunker
// This module stores a copy of the stream containing HTML and then emits it
// at the end of the stream with all of the html elements removed and/or decoded
// to eliminate html based obfuscation.

class FilterChainDefunker;

static const int DefunkerSize = 32768; // Store size.
static const int DefunkerQueueSize = 24; // Size of defunker queue.

static const char* DefunkerPreamble = " ----[DEFUNKER]---- ";

// Patterns to match

static const char* patMatchBR = "<br>";
static const char* patMatchP = "<p>";
static const char* patNBSP = "&nbsp;";
static const char* patAMP = "&amp;";
static const char* patAPOS = "&apos;";
static const char* patLT = "&lt;";
static const char* patGT = "&gt;";
static const char* patQUOT = "&quot;";

class FilterChainDefunker : public FilterChain { // Class definition.

private:

unsigned char StoreBuffer[DefunkerSize];
int InputPosition;
int OutputPosition;

// Nodes in the state change model are represented by functions.
// These modes represent the state prior to getting the Empty exception.
// During this mode, the Defunker simply stores a portion of the message
// to be scanned later.

unsigned char LastRawByte; // Last Raw Byte (for SkipHeaders);
unsigned char SkipHeaders(); // Skips the headers before Store();
unsigned char Store(); // Stores the message content for later.

// Here is a handy Queue mechanism for recovering failed patterns.

int QueueLength; // Queue Length (write position).
int QueuePosition; // Queue Read Position.
unsigned char Qbfr[DefunkerQueueSize]; // Queue Buffer.

void ClearQueue() { // Clear the queue.
memset(Qbfr,0,sizeof(Qbfr)); // Reset the buffer.
QueueLength = 0; // Zero the length.
QueuePosition = 0; // Zero the position.
}

unsigned char DeQueue() { // Empty the queue then back to DefunkRoot.
if(QueuePosition >= QueueLength) { // If the queue is empty then
ClearQueue(); // clear the queue,
Internal = &FilterChainDefunker::DefunkRoot; // go back to DefunkRoot mode,
return GetInternal(); // and return the next byte.
} // If the queue is not empty then
return Qbfr[QueuePosition++]; // return the next byte from the queue.
}

void EnQueue(unsigned char x) { // Add a byte to the queue.
if(QueueLength<DefunkerQueueSize) // If we are safely within the buffer
Qbfr[QueueLength++] = x; // then add this byte to the queue.
}

// These modes represent the Defunker pulling data out of it's
// stored copy so that it can be filtered and delivered to the scanner.
// These modes get turned on once the Empty exception is read from
// the underlying source.

unsigned char Preamble(); // Preamble - separates Defunked text.
unsigned char DefunkRoot(); // Root in Defunk mode.
unsigned char OpenTag(); // Open tag detected.
unsigned char OpenAmp(); // Open & tag.
unsigned char MatchBR(); // Matching <br>
unsigned char MatchP(); // Matching <p>
unsigned char MatchNBSP(); // Matching &nbps;
unsigned char SwitchAMPAPOS(); // Looking for AMP or APOS.
unsigned char MatchAMP(); // Matching &amp;
unsigned char MatchAPOS(); // Matching &apos;
unsigned char MatchLT(); // Matching &lt;
unsigned char MatchGT(); // Matching &gt;
unsigned char MatchQUOT(); // Matching &quot;
unsigned char EatTag(); // Eating an unknown tag.
unsigned char DecodeNum(); // Decoding &#...number...;

// Part of defunking is to convert all runs of whitespace into a single space.
// It also doubles as the master output function once we're out of Store() mode.

unsigned char SpaceConvChart[256]; // Space conversion chart.
unsigned char LastReadOut; // Last ReadOut byte (for deduping spaces).
unsigned char ReadOut(); // Read out the store through the filter.

unsigned char LastGetStore; // Last GetStore byte (for EatTag).
unsigned char GetStore(); // Read a byte from the store.

// Here is a handy pattern match function for eliminating some tags.

bool MatchTagPattern(const char* pattern) { // Matches pattern. True if matched.
int pos = 2; // Now on the third byte (index 2).
while(pattern[pos]){ // While we have more bytes to match
unsigned char x = GetStore(); // grab the next byte.

// Special case - HTML tag with a space as in <p stuff>

if(x==' ' && pattern[pos]=='>') { // If we have a tag with parameters.
pos++; // Move pos forward to it's null.
while(GetStore()!='>')continue; // Eat up to the > and then
break; // we are done.
}

// In the normal case follow the pattern.

if(tolower(x)!=pattern[pos]) break; // If we fell off then stop.
pos++; // If we didn't break move ahead.
}

// At this point we are either at the null in our pattern or we did not match.

if(pattern[pos]) { return false; } // If we're not at the end then no match.

return true; // Otherwise we do have a match :-)
}

// These are the function pointers that map the current state of this object.

unsigned char (FilterChainDefunker::*Master)(); // Master function for GetByte()
unsigned char (FilterChainDefunker::*Internal)(); // Internal function for GetByte()

public:

unsigned char GetByte() { // Overload the main fn().
return (*this.*Master)(); // Call the master function.
}

unsigned char GetInternal() { // Internal state machine get.
return (*this.*Internal)(); // Call the internal function.
}

FilterChainDefunker(FilterChain* S) // Sourced constructor...
:FilterChain(S), // Call the base constructor.
Master(&FilterChainDefunker::SkipHeaders), // Set the initial external and
Internal(&FilterChainDefunker::Preamble), // internal states.
InputPosition(0), // Reset both position pointers.
OutputPosition(0),
LastReadOut(0),
LastGetStore(0),
LastRawByte(0) {

ClearQueue(); // Clear the queue;

memset(StoreBuffer,0,sizeof(StoreBuffer)); // Clear the store buffer.

for(int i=0;i<256;i++) SpaceConvChart[i]=i; // Initialize the chart.
SpaceConvChart[(int)'\r']=' '; // Convert <CR> to space.
SpaceConvChart[(int)'\n']=' '; // Convert <LF> to space.
SpaceConvChart[(int)'\t']=' '; // Convert Tab to space.
}

FilterChainDefunker() { // Don't allow any
throw BadSource("FilterChainDefunker: Source required"); // null constructors.
}

};

// FilterChainUrlDecode
// This module removes any unnecessary URL encoding within an <a...> tag. The
// cleaned up version (if different) is emitted immediately after the original
// <a...> tag so that both versions can be interpreted by the pattern scanner.
// This is designed to eliminate common obfuscation techniques.

const int UrlDecodeBfrSize = 256; // Decode Buffer Size.

class FilterChainUrlDecode : public FilterChain {

private:

unsigned char DecodeBfr[UrlDecodeBfrSize]; // Decoded anchor buffer.
int DecodeLength; // Decoded anchor length.
int DecodePosition; // Read (Inject) Position.
bool DecodeFlag; // True if the URL was decoded.

void Clear() { // Function to clear the bfr.
memset(DecodeBfr,0,sizeof(DecodeBfr)); // Null it out and set
DecodeLength = 0; // the length to zero.
DecodePosition = 0; // Reset the Read position.
DecodeFlag = false; // Reset the Decode Flag.
}

void AddToBfr(unsigned char c) { // Safely add to our buffer.
if(DecodeLength < sizeof(DecodeBfr)-1) // If we have more room then
DecodeBfr[DecodeLength++] = c; // write the incoming byte.
}

unsigned char (FilterChainUrlDecode::*Internal)(); // Internal State Fn

bool isHexDigit(unsigned char i); // Is i a hex digit?
int convertHexDigit(unsigned char i); // Convert a single hex digit.
unsigned char convertHexByte(unsigned char* x); // Convert a hex byte.

// Here are the states of the UrlDecode module...

unsigned char Bypass(); // Bypass - waiting for '<'
unsigned char Tag(); // Looks for an 'a' or 'i' after '<'
unsigned char Img1(); // Looks for 'm' in <img
unsigned char Img2(); // Looks for 'g' in <img
unsigned char Root(); // Root state of the decode FSM.
unsigned char GetD1(); // Decoding step one.
unsigned char GetD2(); // Decoding step two.
unsigned char Inject(); // Injects the bfr into the stream.

public:

unsigned char GetByte() { // Overload the main fn().
return (*this.*Internal)(); // Call the Internal function.
}

FilterChainUrlDecode(FilterChain* S) // Sourced constructor...
:FilterChain(S), // Call the base constructor.
Internal(&FilterChainUrlDecode::Bypass) { // Set ByPass mode.
Clear(); // Clear the system.
}

FilterChainUrlDecode() { // Don't allow any
throw BadSource("FilterChainUrlDecode: Source required"); // null constructors.
}

};

// FilterChainHeaderAnalysis (and friends)
// Performs header anomaly analysis and IP extraction and analysis.
// IP Analysis is peformed via a provided class that implements the IPTester
// interface. An IP is provided to the IPTester as a [#.#.#.#] string. The
// IPTester may respond with information to be emitted into the headers for
// the pattern matching engine based on those results --- or not ;-)

class FilterChainIPTester {
public:
virtual string& test(string& input, string& output) = 0;
};

// The supplied test() function accepts the input string and returns the
// output string. If desired, the output string can be modified to include
// data from the tests that will be emitted into the data stream for the
// pattern analysis engine to see. Otherwise, the output string should
// remain blank. The test() function _should_ be thread safe -- that is why
// we pass it both input and output ;-)
//
// The provided tester may have any side-effects that are desired.

class FilterChainHeaderAnalysis : public FilterChain {

private:

unsigned char (FilterChainHeaderAnalysis::*Mode)(); // Internal State Fn Pointer (What Mode)
FilterChainIPTester& IPTester; // This is the IP tester we use.
string IPToTest; // String to capture IPs for testing.
string IPTestResult; // String to receive IPtest results.

// Header analysis output state...

string EndOfHeaderResults; // String to capture EndOfHeaderResults.

// OutputIndex and OutputLength are used to inject string data.
// These are used to inject IPTestResult data and Header Analysis data.

char* OutputBuffer; // Pointer to output injection string.
int OutputIndex; // End of header output results index.
void SetOutputBuffer(string& s); // Setup the OutputBuffer.
unsigned char doInjectIPTestResult(); // Inject OutputBuffer and go to doSeekNL.
unsigned char doInjectAnalysis(); // Inject OutputBuffer and go to doOff.

// Header seek pattern state...
// These tools work to follow patterns for header tags.
// SetFollowPattern resets the engine and establishes the pattern to follow.
// FollowPattern checks c against the next byte in the pattern.
// -1 = The pattern failed.
// 1 = The pattern was followed.
// 0 = The pattern is complete.

const char* MatchPattern; // Current pattern to match.
int MatchIndex; // Pattern match following index.
void SetFollowPattern(const char* p) { MatchPattern = p; MatchIndex = 0; } // Set the pattern to follow.
int FollowPattern(char c); // Follow the pattern.

//// Internal modes for this module...

unsigned char doSeekNL(); // Looking for a new line.
unsigned char doSeekDispatch(); // Looking at the first char after NL.
unsigned char doReceived(); // Identifying a Received: header.
unsigned char doFindIP(); // Seeking the [IP] in a Received header.
unsigned char doTestIP(); // Gets and tests the [IP].
unsigned char doFrom(); // Identifying a From: header.
unsigned char doTo(); // Identifying a To: header.
unsigned char doCC(); // Identifying a CC: header.
unsigned char doMessageID(); // Identifying a MessageID header.
unsigned char doDate(); // Identifying a Date: header.
unsigned char doSubject(); // Identifying a Subject: header.
unsigned char doEndOfHeaders(); // IdentifyEndOfHeaders & Emit Results.

unsigned char doOff() { return FilterChain::GetByte(); } // Bypass mode.

bool FoundFrom; // True if From: was found.
bool FoundTo; // True if To: was found.
bool FoundCC; // True if CC: was found.
bool FoundMessageID; // True if Message-ID: was found.
bool FoundDate; // True if Date: was found.
bool FoundSubject; // True if Subject: was found.
bool FoundHighBitCharacters; // True if high bit characters were found.

unsigned char GetCheckedByte() { // Internal GetByte & check for high bits.
unsigned char x = FilterChain::GetByte(); // Get the byte from up the chain.
if(0 < (x & 0x80)) { // Check for a high bit byte (non-ascii).
FoundHighBitCharacters = true; // If it is found then set the flag.
} // If not then at least we checked ;-)
return x; // Return the byte.
}

public:

unsigned char GetByte() { // Overload the main fn().
return (*this.*Mode)(); // Call the Internal function for this mode.
}

FilterChainHeaderAnalysis(FilterChain* S, FilterChainIPTester& T) : // Construct with the chain and a tester.
FilterChain(S), // Capture the chain.
IPTester(T), // Capture the tester.
IPToTest(""), // IPToTest and
IPTestResult(""), // IPTestResult are both empty to start.
FoundFrom(false), // Set all of the "found" bits to false.
FoundTo(false),
FoundCC(false),
FoundMessageID(false),
FoundDate(false),
FoundSubject(false),
FoundHighBitCharacters(false),
Mode(&FilterChainHeaderAnalysis::doSeekDispatch) { // Start in SeekDispatch() mode
} // -- first byte of a new line ;-)

bool MissingFrom() { return (!FoundFrom); } // True if missing From header.
bool MissingTo() { return (!FoundTo); } // True if missing To header.
bool MissingCC() { return (!FoundCC); } // True if missing CC header.
bool MissingSubject() { return (!FoundSubject); } // True if missing Subject header.
bool MissingDate() { return (!FoundDate); } // True if missing Date header.
bool MissingMessageID() { return (!FoundDate); } // True if missing MessageID header.
bool HighBitCharacters() { return (FoundHighBitCharacters); } // True if High bit characters were found.

};

#endif

+ 814
- 0
GBUdb.cpp Прегледај датотеку

@@ -0,0 +1,814 @@
// GBUdb.cpp
//
// (C) Copyright 2006 - 2009 ARM Research Labs, LLC
// See www.armresearch.com for the copyright terms.
//
// See GBUdb.hpp for details.

#include <iostream>
#include <fstream>
#include <cstring>
#include <unistd.h>
#include "GBUdb.hpp"

using namespace std;

//// Handy utilities...

//// GBUdbDataset implementations //////////////////////////////////////////////

GBUdbDataset::~GBUdbDataset() { // Shutdown a dataset.
if(NULL != DataArray) { // If the DataArray was allocated
delete[] DataArray; // be sure to delete it and
DataArray = NULL; // NULL it's pointer.
}
MyArraySize = 0; // For safety set the size to zero
MyFileName = ""; // and "" the name.
}

GBUdbDataset::GBUdbDataset(const char* SetFileName) : // Open/Create a dataset.
DataArray(NULL), // The array pointer starts as NULL.
MyArraySize(0) { // And the size is zero.
FileName(SetFileName); // Set the file name if provided.
if(0 != MyFileName.length() && (0 == access(MyFileName.c_str(),F_OK))) { // If a file name was provided and exists
load(); // then read the file from disk.
} else { // If the file name was not provided
DataArray = new GBUdbRecord[GBUdbDefaultArraySize]; // then allocate a new Array of
MyArraySize = GBUdbDefaultArraySize; // the default size.
DataArray[ixNextFreeNode()].RawData = // The first new node is the one
GBUdbRootNodeOffset + GBUdbRecordsPerNode; // right after the root node.
DataArray[ixMatchListRoot()].RawData = // Once that's up we can use it to
newMatchNodeRoot(); // allocate the first MatchNode.
}
}

GBUdbDataset::GBUdbDataset(GBUdbDataset& Original) : // Copy constructor.
MyFileName(Original.MyFileName), // Copy the name pointer.
DataArray(NULL), // The array pointer starts as NULL.
MyArraySize(Original.MyArraySize) { // We copy the ArraySize
DataArray = new GBUdbRecord[MyArraySize]; // then allocate a new Array that size.
memcpy(DataArray, Original.DataArray, sizeof(GBUdbRecord) * MyArraySize); // Then we copy the data wholesale.
}

const char* GBUdbDataset::FileName(const char* NewName) { // (Re) Set the file name.
MyFileName = ""; // Delete any previous file name.
if(NULL != NewName) { // If we've been given a non-null cstring
MyFileName = NewName; // capture it as our file name.
}
return MyFileName.c_str(); // Return our new FileName.
}

//// During the read, it is safe to plow through the array without
//// checking because any unknown entry points to the zero node and
//// all zero node entries point to the zero node. The read-only
//// method does not add new nodes.

GBUdbRecord& GBUdbDataset::readRecord(unsigned int IP) { // Read a record.
IP = remapIP00toFF(IP); // Make the IP safe for consumption.
int a0, a1, a2, a3; // We will break the IP into 4 octets.
unsigned int xIP = IP; // Grab a copy of IP to maniuplate.
const int LowOctetMask = 0x000000FF; // Mask for seeing the low octet.
const int BitsInOneOctet = 8; // Number of bits to shift per octet.
a3 = xIP & LowOctetMask; xIP >>= BitsInOneOctet; // Grab the a3 octet and shift the IP.
a2 = xIP & LowOctetMask; xIP >>= BitsInOneOctet; // Grab the a2 octet and shift the IP.
a1 = xIP & LowOctetMask; xIP >>= BitsInOneOctet; // Grab the a1 octet and shift the IP.
a0 = xIP & LowOctetMask; // Grab the final octet.
GBUdbIndex RecordIndex = GBUdbRootNodeOffset; // Starting at the root node, follow...
RecordIndex = DataArray[RecordIndex + a0].Index(); // Follow the node then
if(isMatch(RecordIndex)) { // Check for a shortcut (match record).
if(isMatch(RecordIndex, IP)) { return MatchedData(RecordIndex); } // If we have an exact match we're done!
else { return SafeUnknownRecord(); } // If we have a mismatch we are lost...
}
RecordIndex = DataArray[RecordIndex + a1].Index(); // Follow the node then
if(isMatch(RecordIndex)) { // Check for a shortcut (match record).
if(isMatch(RecordIndex, IP)) { return MatchedData(RecordIndex); } // If we have an exact match we're done!
else { return SafeUnknownRecord(); } // If we have a mismatch we are lost...
}
RecordIndex = DataArray[RecordIndex + a2].Index(); // Follow the node. No more match checks.
if(isMatch(RecordIndex)) { // Check for a shortcut (match record).
if(isMatch(RecordIndex, IP)) { return MatchedData(RecordIndex); } // If we have an exact match we're done!
else { return SafeUnknownRecord(); } // If we have a mismatch we are lost...
}
return DataArray[RecordIndex + a3]; // Final node has our data :-)
}

//// dropRecord()
//// This code is essentially a hack of the readRecord() code. If it finds
//// the record it will return true, mark the record as GBUdbUnknown, reduce
//// the IP count, and de-allocate the Match record. Records stored in nodes
//// are set to GBUdbUnknown and the node is left in place - otherwise repeated
//// add and drop operations would lead to leaking all nodes into the match
//// record allocation space. (Node allocation is not a linked list ;-)

bool GBUdbDataset::dropRecord(unsigned int IP) { // Drop an IP record.
IP = remapIP00toFF(IP); // Make the IP safe for consumption.
int a0, a1, a2, a3; // We will break the IP into 4 octets.
unsigned int xIP = IP; // Grab a copy of IP to maniuplate.
const int LowOctetMask = 0x000000FF; // Mask for seeing the low octet.
const int BitsInOneOctet = 8; // Number of bits to shift per octet.
a3 = xIP & LowOctetMask; xIP >>= BitsInOneOctet; // Grab the a3 octet and shift the IP.
a2 = xIP & LowOctetMask; xIP >>= BitsInOneOctet; // Grab the a2 octet and shift the IP.
a1 = xIP & LowOctetMask; xIP >>= BitsInOneOctet; // Grab the a1 octet and shift the IP.
a0 = xIP & LowOctetMask; // Grab the final octet.
GBUdbIndex RecordIndex = GBUdbRootNodeOffset; // Starting at the root node, follow...
GBUdbIndex Node0Index = GBUdbRootNodeOffset; // Keep track of our previous nodes.
GBUdbIndex Node1Index = 0; // This node not set yet.
GBUdbIndex Node2Index = 0; // This node not set yet.
GBUdbIndex Node3Index = 0; // This node not set yet.

RecordIndex = DataArray[Node0Index + a0].Index(); // Follow the node then
if(isMatch(RecordIndex)) { // Check for a shortcut (match record).
if(isMatch(RecordIndex, IP)) { // If we have an exact match we proceed:
MatchedData(RecordIndex).RawData = GBUdbUnknown; // Set the data in the match to unknown.
DataArray[Node0Index + a0].Index(GBUdbUnknown); // Remove the reference to the match record.
deleteMatchAt(RecordIndex); // Reclaim the match record for re-use.
decreaseIPCount(); // Reduce the IP count.
return true; // Return that we were successful.
} else { return false; } // If we have a mismatch we cannot delete.
} else { // If this was a Node link then
Node1Index = RecordIndex; // capture the node root and get ready
} // to follow the next node.

RecordIndex = DataArray[Node1Index + a1].Index(); // Follow the node then
if(isMatch(RecordIndex)) { // Check for a shortcut (match record).
if(isMatch(RecordIndex, IP)) { // If we have an exact match we proceed:
MatchedData(RecordIndex).RawData = GBUdbUnknown; // Set the data in the match to unknown.
DataArray[Node1Index + a1].Index(GBUdbUnknown); // Remove the reference to the match record.
deleteMatchAt(RecordIndex); // Reclaim the match record for re-use.
decreaseIPCount(); // Reduce the IP count.
return true; // Return that we were successful.
} else { return false; } // If we have a mismatch we cannot delete.
} else { // If this was a Node link then
Node2Index = RecordIndex; // capture the node root and get ready
} // to follow the next node.

RecordIndex = DataArray[Node2Index + a2].Index(); // Follow the node then
if(isMatch(RecordIndex)) { // Check for a shortcut (match record).
if(isMatch(RecordIndex, IP)) { // If we have an exact match we proceed:
MatchedData(RecordIndex).RawData = GBUdbUnknown; // Set the data in the match to unknown.
DataArray[Node2Index + a2].Index(GBUdbUnknown); // Remove the reference to the match record.
deleteMatchAt(RecordIndex); // Reclaim the match record for re-use.
decreaseIPCount(); // Reduce the IP count.
return true; // Return that we were successful.
} else { return false; } // If we have a mismatch we cannot delete.
} else { // If this was a Node link then
Node3Index = RecordIndex; // capture the node root and get ready
} // to follow the next node.

RecordIndex = Node3Index + a3; // Follow the node.
if(GBUdbUnknown != DataArray[RecordIndex].RawData) { // If there is data there then
DataArray[RecordIndex].RawData = GBUdbUnknown; // mark the entry as unknown,
decreaseIPCount(); // decrease the IP count
return true; // and return true.
} // If we got all the way to the end and
return false; // didn't find a match then return false.
}

/* Ahhh, the simple life. In a single mode lightning index, each key
** octet lives in a node, so when you grow a new path you either follow
** existing nodes or make new ones. We're not doing that here, but as
** a reference here is how that is usually handled:
**
GBUdbIndex GBUdbDataset::invokeAt(GBUdbRecord& R) { // Invoke at Record.
if(GBUdbUnknown == R.RawData) { // If the record does not point to a
R.Index(newNodeRoot()); // node then give it a new node.
} // If the record already has a node
return R.Index(); // or we gave it one, then follow it.
}
*/

//// Little helper function for invokeAt()

int getOctet(int Octet, unsigned int IP) { // Returns Octet number Octet from IP.
const int BitsInOneOctet = 8; // Number of bits to shift per octet.
const int LowOctetMask = 0x000000FF; // Mask for seeing the low octet.
int BitsToShift = 0; // Assume we want a3 but
switch(Octet) { // If we don't, use this handy switch.
case 0: { BitsToShift = 3 * BitsInOneOctet; break; } // For octet 0, shift out 3 octets.
case 1: { BitsToShift = 2 * BitsInOneOctet; break; } // For octet 1, shift out 2 octets.
case 2: { BitsToShift = 1 * BitsInOneOctet; break; } // For octet 2, shift out 1 octets.
} // For octet 3, shift none more octets.
if(0 < BitsToShift) { // If we have bits to shift then
IP >>= BitsToShift; // shift them.
}
return (IP & LowOctetMask); // Exctract the octet at the bottom.
}

//// invokeAt() is a helper function that encapsulates the work of growing new
//// pathways. There are several cases to handle in a bimodal indexing scheme
//// since sometimes you extend new nodes (as commented out above), and some-
//// times you create MatchRecords, and sometimes you have collisions and
//// have to extend previous matches.... or not. All of that will become clear
//// shortly ;-) The good news is that at least invokeAt() is always supposed
//// to return the next place to go --- that is, you never get lost because if
//// the next step in the path does not exist yet then you create it.

GBUdbIndex GBUdbDataset::invokeAt(GBUdbRecord& R, unsigned int IP, int Octet, bool ExtendMatches) {

// R is either known (goes somewhere) or unknown (we would be lost).
// IF R is UNNKOWN then we ...
//// create a match and return it. (No conflict, no extension, no extra node :-)
//**** We got out of that one so we're back at the root level.

if(GBUdbUnknown == R.RawData) {
R.Index(newMatchRecord(IP));
return R.Index();
}

// ELSE R is KNOWN then it either points to a MatchRecord or a Node.
//// IF R points to a Node then we will simply follow it.
//**** We got out of that one so we're back at the root level.

if(!isMatch(R.Index())) {
return R.Index();
}

// ELSE R points to a MatchRecord then we get more complex.
//// IF the MatchRecord matches our IP then we simply follow it.
//**** We got out of that one so we're back at the root level.

if(isMatch(R.Index(),IP)) {
return R.Index();
}

// ELSE the MatchRecord does not match then we get more complex again...
//// IF we are Extending Matches then we...
////// create a new node
////// push the existing match onto the new node
////// and create a new match for the new IP on that node.
////// since we already have the solution we return the new match node index (skip a step).
//**** We got out of that one so we're back at the root level.

if(ExtendMatches) { // If we are extending matches
GBUdbIndex I = newNodeRoot(); // we create a new node.
int NewSlotForCurrentMatch = // Locate the slot in that node where
getOctet( // the current match should reside
Octet + 1, // based on the octet after this one
DataArray[R.Index()] // by extracting that octet from
.RawData); // the MatchReord header.
// Then we put the current match into
DataArray[I + NewSlotForCurrentMatch].Index(R.Index()); // the correct slot on the new node,
return R.Index(I); // point the current slot to that node
} // and return the node to be followed.

// ELSE we are NOT Extending Matches then we...
// ** KNOW that we are adding node a3 and dealing with the final octet **
//// create a new node
//// map the existing match data into the new node.
//// delete the existing match (for reallocation). deleteMatchAt(GBUdbIndex I)
//// map the new IP into the new node.

GBUdbIndex I = newNodeRoot(); // Create a new node.
int NewSlotForCurrentMatch = // Locate the slot in that node where
getOctet( // the current match should reside
Octet + 1, // based on the octet after this one
DataArray[R.Index()] // by extracting that octet from
.RawData); // the MatchReord header.

if(ExtendMatches) { // If we are extending matches...
// then we put the current match into
DataArray[I + NewSlotForCurrentMatch].Index(R.Index()); // the correct slot on the new node.

} else { // If we are not extending matches...
// then we must be at the end node so
DataArray[I + NewSlotForCurrentMatch].RawData = // we copy in the data from
MatchedData(R.Index()).RawData; // the current MatchRecord,
deleteMatchAt(R.Index()); // and return the MatchRecord for re-use.
}

return R.Index(I); // Point the current slot to new node
} // and return that node index to follow.

//// The "invoke" method creates all of the needed nodes starting
//// at any point where an "unwknown" entry is found.

GBUdbRecord& GBUdbDataset::invokeRecord(unsigned int IP) { // Invoke a record.
if(FreeNodes() < GBUdbGrowthThreshold) grow(); // If we need more space, make more.
IP = remapIP00toFF(IP); // Make the IP safe for consumption.
int a0, a1, a2, a3; // We will break the IP into 4 octets.
unsigned int xIP = IP; // Grab a copy of IP to maniuplate.
const int LowOctetMask = 0x000000FF; // Mask for seeing the low octet.
const bool Extend = true; // Magic number for extending Matches.
const bool DoNotExtend = false; // Magic number for NOT extending them.
const int BitsInOneOctet = 8; // Number of bits to shift per octet.
a3 = xIP & LowOctetMask; xIP >>= BitsInOneOctet; // Grab the a3 octet and shift the IP.
a2 = xIP & LowOctetMask; xIP >>= BitsInOneOctet; // Grab the a2 octet and shift the IP.
a1 = xIP & LowOctetMask; xIP >>= BitsInOneOctet; // Grab the a1 octet and shift the IP.
a0 = xIP & LowOctetMask; // Grab the final octet.
GBUdbIndex RecordIndex = GBUdbRootNodeOffset; // Starting at the root node,
RecordIndex = invokeAt(DataArray[RecordIndex + a0], IP, 0, Extend); // Invoke w/ possible match outcome.
if(isMatch(RecordIndex, IP)) { // If this resulted in a match
GBUdbRecord& Result = MatchedData(RecordIndex); // then we will grab the match data
increaseIPCountIfNew(Result); // and increase the IP count if it's new.
return Result; // Then we return the result. Done!
}
RecordIndex = invokeAt(DataArray[RecordIndex + a1], IP, 1, Extend); // Invode w/ possible match outcome.
if(isMatch(RecordIndex, IP)) { // If this resulted in a match
GBUdbRecord& Result = MatchedData(RecordIndex); // then we will grab the match data
increaseIPCountIfNew(Result); // and increase the IP count if it's new.
return Result; // Then we return the result. Done!
}
RecordIndex = invokeAt(DataArray[RecordIndex + a2], IP, 2, DoNotExtend); // Invode w/ possible match outcome.
if(isMatch(RecordIndex, IP)) { // If this resulted in a match
GBUdbRecord& Result = MatchedData(RecordIndex); // then we will grab the match data
increaseIPCountIfNew(Result); // and increase the IP count if it's new.
return Result; // Then we return the result. Done!
}
GBUdbRecord& Result = DataArray[RecordIndex + a3]; // Grab the record at the final node.
increaseIPCountIfNew(Result); // If new, increase the IP count.
return Result; // Return the record.
}

void GBUdbDataset::save() { // Flush the GBUdb to disk.
string TempFileName = MyFileName + ".tmp"; // Calculate temp and
string BackFileName = MyFileName + ".bak"; // backup file names.
ofstream dbFile; // Grab a file for writing.
dbFile.open(TempFileName.c_str(), ios::out | ios::binary | ios::trunc); // Open the file and truncate if present.
dbFile.write((char*)DataArray, sizeof(GBUdbRecord) * MyArraySize); // Write our array into the file.
bool AllOK = dbFile.good(); // Are we happy with this?
dbFile.close(); // Close the file when done to be nice.
if(AllOK) { // If everything appears to be ok
unlink(BackFileName.c_str()); // Delete any old backup file we have
rename(MyFileName.c_str(), BackFileName.c_str()); // and make the current file a backup.
rename(TempFileName.c_str(), MyFileName.c_str()); // Then make our new file current.
}
}

void GBUdbDataset::load() { // Read the GBUdb from disk.

ifstream dbFile; // Grab a file for reading.
dbFile.open(MyFileName.c_str(), ios::in | ios::binary); // Open the file with the name we have.
dbFile.seekg(0, ios::end); // Go to the end of the
int FileSize = dbFile.tellg(); // file and back so we can
dbFile.seekg(0, ios::beg); // determine it's size.

int SaneGBUdbFileSizeLimit = (GBUdbDefaultArraySize * sizeof(GBUdbRecord)); // What is a sane size limit?
assert(SaneGBUdbFileSizeLimit <= FileSize); // File size sanity check.

int NewArraySize = FileSize / sizeof(GBUdbRecord); // How many records in this file?

if(NULL != DataArray) { // If we have an array loaded then
delete[] DataArray; // delete the array,
DataArray = NULL; // NULL it's pointer,
MyArraySize = 0; // and zero it's size.
}

DataArray = new GBUdbRecord[NewArraySize]; // Allocate an array of the proper size
MyArraySize = NewArraySize; // set the local size variable
dbFile.read((char*)DataArray,FileSize); // and read the file into the array.
dbFile.close(); // Close when done to be nice.
}

void GBUdbDataset::grow(int HowManyNodes) { // Grow the DataArray.
int NewArraySize = MyArraySize + (HowManyNodes * GBUdbRecordsPerNode); // Calcualte the new array size.
GBUdbRecord* NewDataArray = new GBUdbRecord[NewArraySize]; // Allocate the new array.
int OldArrayLessControl = MyArraySize + GBUdbControlNodeOffset; // Include all records but no control.
memcpy(NewDataArray, DataArray, sizeof(GBUdbRecord) * OldArrayLessControl); // Copy the old data to the new array.
for( // Loop through the control nodes...
int o = MyArraySize + GBUdbControlNodeOffset, // o = old node index
n = NewArraySize + GBUdbControlNodeOffset, // n = new node index
c = GBUdbRecordsPerNode; // c = the record count (how many to do).
c > 0; // For until we run out of records,
c--) { // decrementing the count each time,
NewDataArray[n].RawData = DataArray[o].RawData;n++;o++; // Copy the old control data.
}
delete[] DataArray; // Delete the old data array.
DataArray = NewDataArray; // Swap in the new data array.
MyArraySize = NewArraySize; // Correct the size value.
}

GBUdbIndex GBUdbDataset::newMatchRecord(unsigned int IP) { // Allocate a new Match record for IP.
GBUdbIndex I = DataArray[ixMatchListRoot()].RawData; // Grab the root unused Match Record index.
GBUdbRecord& R = DataArray[I]; // Grab the record itself and inspect it.
if((R.RawData & GBUdbFlagsMask) != GBUdbMatchUnusedBit) { // Check that this looks like an
throw MatchAllocationCorrupted(); // unused match record and if not throw!
} // If all is well then lets proceed.

//// First, let's heal the linked list for future allocations.

if(GBUdbMatchUnusedBit == R.RawData) { // If the match record we are on is
DataArray[ixMatchListRoot()].RawData = // the last in the list then allocate
newMatchNodeRoot(); // a new MatchListNode for the next
} else { // allocation. However, if there are
DataArray[ixMatchListRoot()].RawData = // more records left in the list then
(R.RawData & GBUdbMatchDataMask); // set up the next node for the next
} // allocation.

//// Once that's done we can use the record we have for real data.

R.RawData = EncodedMatch(IP); // Encode the match record for the IP.

return I; // Return the match record's index.
}

GBUdbIndex GBUdbDataset::newMatchNodeRoot() { // Allocate a new Match node.
GBUdbIndex I = newNodeRoot(); // Grab a new node to convert.
int iLastMatch = GBUdbRecordsPerNode - 2; // Calc the localized i for last match.
for(int i = 0; i < iLastMatch; i+=2) { // Loop through the node
DataArray[I+i].RawData = GBUdbMatchUnusedBit | (I+i+2); // Build a linked list of Unused Match
DataArray[I+i+1].RawData = GBUdbUnknown; // records with empty data.
}
DataArray[I+iLastMatch].RawData = GBUdbMatchUnusedBit; // The last record gets a NULL index
DataArray[I+iLastMatch+1].RawData = GBUdbUnknown; // and null data to terminate the list.
return I; // Return the root index.
}

// doForAllRecords()
// This method uses a recursive call to doAllAtNode()
// doAllAtNode sweeps through each record in a node and processes any
// node entries through the next level (calling itself) or directly if
// the node is node3, or if it's pointing to a match record.

void GBUdbDataset::updateWorkingIP(unsigned int& WIP, int OctetValue, int Level) { // Update the Working IP (WIP) at octet Level
switch(Level) {
case 0: { // For the node zero address,
WIP = WIP & 0x00FFFFFF; // Mask out the node zero bits.
OctetValue = OctetValue << 24; // Shift the octet value into position.
WIP = WIP | OctetValue; // Or the octet value bits into place.
break;
}
case 1: {
WIP = WIP & 0xFF00FFFF; // Mask out the node zero bits.
OctetValue = OctetValue << 16; // Shift the octet value into position.
WIP = WIP | OctetValue; // Or the octet value bits into place.
break;
}
case 2: {
WIP = WIP & 0xFFFF00FF; // Mask out the node zero bits.
OctetValue = OctetValue << 8; // Shift the octet value into position.
WIP = WIP | OctetValue; // Or the octet value bits into place.
break;
}
case 3: {
WIP = WIP & 0xFFFFFF00; // Mask out the node zero bits.
WIP = WIP | OctetValue; // Or the octet value bits into place.
break;
}
}
}

//// Note about doAllAtNode(). The x.x.x.0 address is skipped on purpose. This
//// is because all x.x.x.0 addresses are mapped to x.x.x.255. By skipping this
//// address and starting at x.x.x.1 in any search, we do not need to check for
//// x.x.x.0 ips that were remapped. They will simply appear at x.x.x.255.

void GBUdbDataset::doAllAtNode( // Recursively call O with all valid records.
GBUdbIndex I, // Input the node index.
GBUdbOperator& O, // Input the Operator to call.
int NodeLevel, // Input the NodeLevel.
unsigned int WIP // Input the working IP.
) {
int FirstI = (3 > NodeLevel) ? 0 : 1; // Skip any x.x.x.0 addresses.
for(int i = FirstI; i < GBUdbRecordsPerNode; i++) { // Loop through the slots in this node.
GBUdbIndex RecordIndex = DataArray[I + i].Index(); // Get the record index for this slot.
if(GBUdbUnknown != RecordIndex) { // Check that this slot is not empty.
updateWorkingIP(WIP, i, NodeLevel); // If we've got something then update the WIP.
if(3 > NodeLevel) { // If we are working in rootward nodes:
if(isMatch(RecordIndex)) { // Check for a match record. If we have one then
unsigned int MatchIP = WIP & 0xFF000000; // build the IP for the match from the root
MatchIP |= (DataArray[RecordIndex].RawData & 0x00FFFFFF); // of the WIP and the match IP data.
O(MatchIP, MatchedData(RecordIndex)); // Then call the operator with the matched data.
// If this slot is not a match record
} else { // then it is a node address so we will
doAllAtNode(RecordIndex, O, NodeLevel+1, WIP); // recurse to that node at a deeper level.
}
} else { // If we are working in the last node then
O(WIP, DataArray[I + i]); // call the Operator with this IP & Record.
} // All known data values in the last node are
} // actual data records after all.
}
}

void GBUdbDataset::doForAllRecords(GBUdbOperator& O) { // Call O for every valid record.
unsigned int WorkingIP = 0; // A working IP for all levels to use.
int NodeLevel = 0; // The Node level where we start.
doAllAtNode(GBUdbRootNodeOffset, O, NodeLevel, WorkingIP); // Start at the root node, level 0.
}

//// GBUdb Implementations /////////////////////////////////////////////////////

bool AlertFor(int count) { // True if an alert is needed.
return ( // We want an alert whenever a count
0x00000001 == count || // hits any of these thresholds. Each
0x00000002 == count || // threshold is a new bit position
0x00000004 == count || // indicating that the count has
0x00000008 == count || // achieved a new power of 2. This
0x00000010 == count || // mechanism insures that newer IPs
0x00000020 == count || // get lots of attention while long
0x00000040 == count || // standing IPs still get visited
0x00000080 == count || // from time to time as their activity
0x00000100 == count || // continues.
0x00000200 == count ||
0x00000400 == count ||
0x00000800 == count ||
0x00001000 == count ||
0x00002000 == count ||
0x00004000 == count
);
}

char* getTimestamp(char* TimestampBfr) { // Creates an ISO GMT timestamp.

time_t rawtime; // Get a timer and
tm * gmt; // a time structure.
time(&rawtime); // Grab the current time and
gmt=gmtime(&rawtime); // convert it to GMT.

sprintf(TimestampBfr,"%04d%02d%02d%02d%02d%02d\0", // Format yyyymmddhhmmss
gmt->tm_year+1900,
gmt->tm_mon+1,
gmt->tm_mday,
gmt->tm_hour,
gmt->tm_min,
gmt->tm_sec
);

return TimestampBfr;
}

char* getIPString(unsigned int IP, char* bfr) { // Converts an IP to a string.
int a0, a1, a2, a3; // We will break the IP into 4 octets.
const int LowOctetMask = 0x000000FF; // Mask for seeing the low octet.
const int BitsInOneOctet = 8; // Number of bits to shift per octet.
a3 = IP & LowOctetMask; IP >>= BitsInOneOctet; // Grab the a3 octet and shift the IP.
a2 = IP & LowOctetMask; IP >>= BitsInOneOctet; // Grab the a2 octet and shift the IP.
a1 = IP & LowOctetMask; IP >>= BitsInOneOctet; // Grab the a1 octet and shift the IP.
a0 = IP & LowOctetMask; // Grab the final octet.
sprintf(bfr,"%d.%d.%d.%d",a0,a1,a2,a3);
return bfr;
}

void GBUdb::recordAlertFor(unsigned int IP, GBUdbRecord& R, unsigned int C) { // Record an alert event for R if needed.
if(AlertFor(C)) { // If an alert is needed at this level...
GBUdbAlert NewAlert; // Create a new alert record.
NewAlert.IP = IP; // Assign the IP.
NewAlert.R = R; // Assign the Record.
ScopeMutex JustMe(AlertsMutex); // Lock the alerts list mutex.
MyAlerts.push_back(NewAlert); // Add our new alert to the list.
}
}

GBUdbAlert::GBUdbAlert() : // Default constructor gets timestamp.
IP(0) { // IP to zero, R will init to zero
getTimestamp(UTC); // on it's own... Get timestamp.
}

string GBUdbAlert::toXML() { // Convert this alert to XML text
stringstream Alert; // We'll use a stringstream.

const char* FlagName; // We will want the Flag as text.
switch(R.Flag()) { // Switch on the Flag() value.
case Good: { FlagName = "Good"; break; } // Convert each value to it's name.
case Bad: { FlagName = "Bad"; break; }
case Ugly: { FlagName = "Ugly"; break; }
case Ignore: { FlagName = "Ignore"; break; }
}

char IPStringBfr[20]; // We need a buffer for our IP.

Alert
<< "<gbu time=\'" << UTC // GBU alert + timestamp followed
<< "\' ip=\'" << getIPString(IP,IPStringBfr) // with the IP,
<< "\' t=\'" << FlagName // the type flag,
<< "\' b=\'" << R.Bad() // the bad count,
<< "\' g=\'" << R.Good() // and the good count.
<< "\'/>"; // That's the end.

return Alert.str(); // Return the string.
}

//// Alert import and export - for sharing data between nodes.

void GBUdb::GetAlerts(list<GBUdbAlert>& ListToFill) { // Get all current alerts & clear;
ListToFill.clear(); // Clear out the list to fill.
ScopeMutex JustMe(AlertsMutex); // Lock for a moment.
ListToFill = MyAlerts; // Copy our alerts to the new list.
MyAlerts.clear(); // Clear our alerts.
}

// In order to allow gbudb nodes to interact without swamping their individuality,
// the default mode for integrating thier data is to represent the remote peer's
// influence on a logarithmic scale.

unsigned int rescaleGBUdbCount(unsigned int C) { // Rescale count C for integration.
if(C < 0x00000001) { return 0; } else // Log2, really, .. the short way.
if(C < 0x00000002) { return 1; } else // How many significant bits are in
if(C < 0x00000004) { return 2; } else // the number. Put another way, what
if(C < 0x00000008) { return 3; } else // power of 2 is required to for
if(C < 0x00000010) { return 4; } else // this number.
if(C < 0x00000020) { return 5; } else
if(C < 0x00000040) { return 6; } else
if(C < 0x00000080) { return 7; } else
if(C < 0x00000100) { return 8; } else
if(C < 0x00000200) { return 9; } else
if(C < 0x00000400) { return 10; } else
if(C < 0x00000800) { return 11; } else
if(C < 0x00001000) { return 12; } else
if(C < 0x00002000) { return 13; } else
if(C < 0x00004000) { return 14; } else
return 15;
}

void GBUdb::ImportAlerts(list<GBUdbAlert>& PeerAlerts) { // Integrate peer alerts using log2.
list<GBUdbAlert>::iterator iA;
for(iA = PeerAlerts.begin(); iA != PeerAlerts.end(); iA++) { // Go through the list of PeerAlerts.
GBUdbRecord R = (*iA).R; // Grab the Record in this alert.
R.Bad(rescaleGBUdbCount(R.Bad())); // Adjust the bad and good counts
R.Good(rescaleGBUdbCount(R.Good())); // for integration.
adjustCounts((*iA).IP, R); // Adjust the local counts w/ R.
}
}

//// doForAllRecords
//// This method handles GBUdbOperators and their locking semantics.
//// For full dataset locking the mutex is acquired before calling the
//// dataset's doForAllRecords(). For record locking, the O passed to
//// this method is wrapped in a record locking shim (below) and that is
//// passed to the dataset. If None is selected then the Operator is
//// passed to the dataset as is -- assuming that the Operator will handle
//// it's own locking as needed.

class GBUdbRecordLockingShim : public GBUdbOperator { // Record locking shim for doForAllRecords.

private:

GBUdbOperator& MyOperator; // Reference the Operator we will be servicing.
Mutex& MyMutex; // Reference the Mutex for the GBUdb we are in.

public:

GBUdbRecordLockingShim(GBUdbOperator& O, Mutex M) : // On construction we grab our critical pieces.
MyOperator(O),
MyMutex(M) {
}

GBUdbRecord& operator()(unsigned int IP, GBUdbRecord& R) { // When our operator() is called
ScopeMutex JustMe(MyMutex); // we lock the mutex in scope and
return MyOperator(IP, R); // call the Operator we're servicing.
} // When we leave scope we unlock (see above).
};

void GBUdb::doForAllRecords(GBUdbOperator& O, GBUdbLocking L) { // Calls O(IP, Record) w/Every record.
if(Dataset == L) { // If we are locking for the Dataset, then
ScopeMutex JustMe(MyMutex); // we will lock the mutex during this
MyDataset->doForAllRecords(O); // entire operation.
} else
if(Record == L) { // If we are locking per record then
GBUdbRecordLockingShim X(O, MyMutex); // we create a record locking shim instance
MyDataset->doForAllRecords(X); // and call O() through that.
} else { // If locking is NOT enabled, then
MyDataset->doForAllRecords(O); // we will call O() without any locking.
}
}

//// The saveSnapshot() method allows us to save a snapshot of our dataset
//// while keeping the mutex locked for as short a time as possible: Just long
//// enough to make a copy of the dataset in RAM.

void GBUdb::saveSnapshot() { // Saves a snapshot of the current db.
GBUdbDataset* Snapshot = NULL; // We need a pointer for our snapshot.
if(NULL == MyDataset) { // If we do not have a dataset to copy
return; // then we simply return.
} else { // If we do have a Dataset to copy...
ScopeMutex JustMe(MyMutex); // Lock the mutex and
Snapshot = new GBUdbDataset(*MyDataset); // make a copy in memory.
} // Then we can unlock the mutex.
Snapshot->save(); // Then outside the mutex we can save.
delete Snapshot; // Once saved we can delete the snapshot.
PostsCounter = 0; // Reset the posts counter.
}

//// reduce()
//// Using the doForAllRecords() functionality, this method reduces all counts
//// by 2 thus renormalizing all records at lower count values. Unknown flagged
//// records who's counts drop to zero will achieve the state GBUdbUnknown. As
//// such, those values would not be carried over in a compress() operation.

class ReduceAll : public GBUdbOperator { // To reduce the good and bad counts.
public:
GBUdbRecord& operator()(unsigned int IP, GBUdbRecord& R) { // Given each record,
R.Good(R.Good() >> 1); // Reduce the Good count by half.
R.Bad(R.Bad() >> 1); // Reduce the Bad count by half.
return R; // Return the record.
}
} ReduceAllOperator;

void GBUdb::reduce() { // Reduce all counts by half.
doForAllRecords(ReduceAllOperator); // Call do for all records with the
} // ReduceAllOperator.

//// compress()
//// Using the doForAllRecords() functionality, this method creates a temporary
//// dataset, copies the existing data into that dataset except where the data
//// is GBUdbUnknown, and then swaps the new dataset in place of the old.

class CompressAll : public GBUdbOperator {
private:

GBUdbDataset* MyOldDataset; // Where do we find the old dataset.
GBUdbDataset* MyNewDataset; // Where do we store our new dataset.

int CountConverted;
int CountDropped;

public:

// Note - There is no destructor. It is expected that the calling function
// will extract the NewDataset and replace the OldDataset when the operation
// has been successful.

CompressAll(GBUdbDataset* OldDataset) : // Startup by
MyOldDataset(OldDataset), // Grabbing the old dataset,
MyNewDataset(NULL), // The new one isn't there yet.
CountConverted(0), // Converted and Dropped
CountDropped(0) { // Counts are zero.
MyNewDataset = new GBUdbDataset(NULL); // Allocate a new Dataset.
MyNewDataset->FileName(OldDataset->FileName()); // Set it's name the same as the old.
} // We don't want to Load() it that way ;-)

GBUdbRecord& operator()(unsigned int IP, GBUdbRecord& R) { // The ForAll Operator goes like this...
if(GBUdbUnknown != R.RawData) { // If the record is not GBUdbUnknown then
MyNewDataset->invokeRecord(IP).RawData = R.RawData; // invoke it and copy it's data.
++CountConverted; // Increment the converted count.
} else { // If the record is GBUdbUnknown then
++CountDropped; // count it as dropped and forget it.
}
return R; // Return the record reference.
}

GBUdbDataset* Old() {return MyOldDataset;} // Here we can get our OldDataset pointer.
GBUdbDataset* New() {return MyNewDataset;} // Here we can get our NewDataset pointer.
int Converted() {return CountConverted;} // Here we can get the converted count.
int Dropped() {return CountDropped;} // Here we can get the dropped count.
};

void GBUdb::compress() { // Remove any unknown records (reduced to zero).
CompressAll BuildCompressedDataset(MyDataset); // Create a CompressAll operator for this dataset.
ScopeMutex Freeze(MyMutex); // Lock the mutex for the rest of this operation.
MyDataset->doForAllRecords(BuildCompressedDataset); // Copy all of the active data records.
MyDataset = BuildCompressedDataset.New(); // Put the new dataset in place.
delete BuildCompressedDataset.Old(); // Delete the old dataset.
} // All done, so we're unlocked.

int GBUdb::readIgnoreList(const char* FileName) { // setIgnore for a list of IPs
int IPCount = 0; // Keep track of the IPs we read.
try { // Capture any exceptions.
char IPLineBuffer[256]; // Create a line buffer.
ifstream ListFile(FileName, ios::in); // Open up the list file.
while(ListFile.good()) { // While we've got a good file (not eof)
memset(IPLineBuffer, 0, sizeof(IPLineBuffer)); // Clear the buffer.
ListFile.getline(IPLineBuffer, sizeof(IPLineBuffer)); // Read the line.

// Now we have an IP on a line (in theory). We will parse
// the ip and process any that parse correctly.
// First eat anything that's not a digit.

unsigned long IP = 0L; // We need an IP buffer.
char* cursor = IPLineBuffer; // Start on the first byte.

if('#' == *cursor) continue; // Lines that start with # are comments.

// First octet.

while(NULL!=cursor && !isdigit(*cursor)) ++cursor; // Eat any nondigits.
if(!isdigit(*cursor)) continue; // If it's not a digit skip this line.
if(255 < atoi(cursor)) continue; // If the octet is out of range skip!
IP += atoi(cursor); IP <<= 8; // Grab the first int and shift it.
while(isdigit(*cursor)) ++cursor; // Eat those digits.
if('.'!=(*cursor)) continue; // If we don't find a dot skip this line.
++cursor; // If we do, skip the dot.

// Second octet.

if(!isdigit(*cursor)) continue; // If we're not at digit skip this line.
if(255 < atoi(cursor)) continue; // If the octet is out of range skip!
IP += atoi(cursor); IP <<= 8; // Grab the octet and shift things left.
while(isdigit(*cursor)) ++cursor; // Eat those digits.
if('.'!=(*cursor)) continue; // If we don't find a dot skip this line.
++cursor; // If we do, skip the dot.

// Third octet.

if(!isdigit(*cursor)) continue; // If we're not at digit skip this line.
if(255 < atoi(cursor)) continue; // If the octet is out of range skip!
IP += atoi(cursor); IP <<= 8; // Grab the octet and shift things left.
while(isdigit(*cursor)) ++cursor; // Eat those digits.
if('.'!=(*cursor)) continue; // If we don't find a dot skip this line.
++cursor; // If we do, skip the dot.

// Last octet.

if(!isdigit(*cursor)) continue; // If we're not at a digit skip this line.
if(255 < atoi(cursor)) continue; // If the octet is out of range skip!
IP += atoi(cursor); // Grab the octet. IP finished!

setIgnore(IP); // Set the IP to Ignore.
++IPCount; // Bump the IP count.

}
ListFile.close();
}
catch(...) { } // If we have an exception we stop.
return IPCount; // Always return the number of lines read.
}


+ 293
- 0
GBUdb.hpp Прегледај датотеку

@@ -0,0 +1,293 @@
// GBUdb.hpp
//
// (C) Copyright 2006 - 2009 ARM Research Labs, LLC
// See www.armresearch.com for the copyright terms.
//
// Good, Bad, Ugly, Ignore IP database engine.

////////////////////////////////////////////////////////////////////////////////
// Include M_GBUdb Only Once

#ifndef M_GBUdb
#define M_GBUdb

#include "threading.hpp"
#include <cmath>
#include <cctype>
#include <string>
#include <sstream>
#include <list>
#include <cstdlib>

using namespace std;

const unsigned int GBUdbFlagsMask = 0xC0000000; // Top 2 bits are the flag.
const unsigned int GBUdbIgnore = 0xC0000000; // Ignore is the 11 flag.
const unsigned int GBUdbUgly = 0x00000000; // Ugly/Unknown is the 00 flag.
const unsigned int GBUdbGood = 0x80000000; // Good is the 10 flag.
const unsigned int GBUdbBad = 0x40000000; // Bad is the 01 flag.
const unsigned int GBUdbGoodMask = 0x3FFF8000; // The good count is masked in this range.
const unsigned int GBUdbBadMask = 0x00007FFF; // Tha bad count is masked here.
const unsigned int GBUdbLimit = GBUdbBadMask; // When a count hits this, normalize in half.
const unsigned int GBUdbGoodShift = 15; // Shift good counts this many bits.

const unsigned int GBUdbMatchEntryBit = 0x80000000; // Match entry Index bit.
const unsigned int GBUdbMatchUnusedBit = 0x40000000; // Unalocated Match entry Index bit.
const unsigned int GBUdbMatchDataMask = 0x3fffffff; // IP Match data mask.

enum GBUdbFlag { // A type for the GBUdb flag.
Ignore = GBUdbIgnore, // Ignore
Ugly = GBUdbUgly, // Ugly
Good = GBUdbGood, // Good
Bad = GBUdbBad // Bad
};

//// GBUdbLocking semantics
//// When doForAllRecords() is called at the GBUdb level, we need to know how
//// the GBUdb mutex should be handled.

enum GBUdbLocking { // A type that describes locking semantics.
Dataset, // Lock the through the entire operation.
Record, // Lock and unlock for each record.
None // Do not lock.
};

typedef unsigned int GBUdbIndex; // A type for Index values from records.
const GBUdbIndex GBUdbUnknown = 0x00000000; // The unknown address.

const int GBUdbRecordsPerNode = 256; // Records per node.
const int GBUdbDefaultGrowNodes = 8192; // Default Nodes to grow.
const int GBUdbDefaultArraySize = GBUdbRecordsPerNode * GBUdbDefaultGrowNodes; // Default initial Array size.
const int GBUdbRootNodeOffset = 256; // First indexing node after node 0.
const int GBUdbGrowthThreshold = 4; // Time to grow at this # free nodes.

//// Node 0 is the go-nowhere node for when things fall off the index so it
//// is coded to all GBUdbUnknown.

//// The last node in the array is used for global statistics & allocation
//// tables.

const int GBUdbControlNodeOffset = -256; // Offset from end of data for control node.
const int GBUdbNextFreeNodeOffset = GBUdbControlNodeOffset + 0; // Offset for next free node index.
const int GBUdbMatchListOffset = GBUdbControlNodeOffset +1; // Offset for Match record allocation root.
const int GBUdbIPCountOffset = GBUdbControlNodeOffset + 2; // Offset for count of IPs in GBUdb.

// GBUdbRecord converts an ordinary unsigned long integer into a wealth of
// useful information just by adding a collection of useful tools.

class GBUdbRecord { // A GBUdb record is really just a
public: // long integer, but it can be interpreted
// lots of ways.
unsigned int RawData; // The raw unsigned int goes here.

GBUdbRecord(); // Initialize to zero.

GBUdbFlag Flag(); // This returns the flag.
GBUdbFlag Flag(GBUdbFlag f); // This sets and returns the flag.
unsigned int Good(); // This returns the good count.
unsigned int Good(unsigned int g); // This sets and returns the good count.
unsigned int Bad(); // This returns the bad count.
unsigned int Bad(unsigned int b); // This sets and returns the bad count.
unsigned int addGood(unsigned int g = 1); // This increments the good count.
unsigned int addBad(unsigned int b = 1); // This increments the bad count.
GBUdbRecord& integrate(GBUdbRecord& A, int LocalWeight, int RemoteWeight); // This integrates another record.

GBUdbIndex Index(); // This returns the record as an Index.
GBUdbIndex Index(GBUdbIndex i); // This sets the record as an index.

double Probability(); // Return +(bad) or -(good) probability.
double Confidence(); // Return the confidence based on samples.
};

// Special events need to be recorded. For that job we have GBUdbAlerts

const int UTCBufferSize = 16; // C string buffer size for UTC stamp.

class GBUdbAlert {
public:
GBUdbAlert(); // Constructor sets timestamp & nulls.
char UTC[UTCBufferSize]; // Time stamp for this alert.
unsigned int IP; // IP for this alert.
GBUdbRecord R; // GBUdbRecord for this alert.
string toXML(); // Convert to an xml representation.
};

// Mass update kinds of operations are handled by providing a functor
// of the type GBUdbOperator to the method doForAllRecords(). The functor is
// called with every record in the GBUdb.

//// Here is the virtual GBUdb Operator class.

class GBUdbOperator {
public:
virtual GBUdbRecord& operator()(unsigned int IP, GBUdbRecord& R) = 0;
};

// GBUdbDataset manages a large array of GBUdb records and nodes. Nodes are
// simulated data structures -- essentially arrays of GBUdbRecords that are
// interpreted as Indexes so that each byte of a particular IP can be used
// to follow the index through the tree to the final record that actually
// represents the IPs data.

// The last few records in the array are used to keep track of some basic
// statistics including where the next node will come from. As with the GBUdb
// record itself, it's all in how the data is interpreted. Using this strategy
// of converting plain-old integers into various data types on the fly allows
// us to allocate the entire structure as a single block and avoid much
// page swapping behind the scenes.

class GBUdbDataset {
private:
GBUdbRecord* DataArray; // Array of GBUdbRecords, nodes, etc.
int MyArraySize; // The size of the array in records.
string MyFileName; // CString for the file name.

GBUdbIndex ixIPCount(); // Index of the IP count for this db.
GBUdbIndex ixNextFreeNode(); // Index of the Next Free Node Index.
GBUdbIndex ixMatchListRoot(); // Index of the Match List Root Index.
GBUdbIndex newMatchRecord(unsigned int IP); // Allocate a new Match record for IP.
GBUdbIndex newMatchNodeRoot(); // Allocate a new Match node.
GBUdbIndex newNodeRoot(); // Allocates a new node, returns offset.
void deleteMatchAt(GBUdbIndex I); // Recall match record at I for reuse.

// invokeAt() Handles invocation at each node/octet using and managing MatchRecords as needed.

GBUdbIndex invokeAt(GBUdbRecord& R, unsigned int IP, int Octet, bool ExtendMatches);

int increaseIPCount(); // When we add an IP to the db.
int decreaseIPCount(); // When we drop an IP from the db.

void increaseIPCountIfNew(GBUdbRecord& R); // If R is GBUdbUnknown, IncreaseIPCount.

bool isMatch(GBUdbIndex I); // True if record at I is a match record.
bool isMatch(GBUdbIndex I, unsigned int IP); // True if record at I is a match for IP.
GBUdbRecord& MatchedData(GBUdbIndex I); // Returns the data for the match at I.
unsigned int EncodedMatch(unsigned int IP); // Returns encoded raw dat for a Match.

//// In order to support binmodal indexing we must make sure that
//// no octet3 data is mapped to the root record in an octet3 node. If
//// it were so mapped then an octet2 evaluation might misinterpret the
//// GBUdbFlag fields as a MatchRecord indicator and cause the data to
//// become corrupted. To solve this problem, any time an octet2 node
//// maps to an octet3 node and NOT a MatchRecord, the 0 record in the
//// octet3 node must have no flags. Since x.x.x.0 is presumed to be the
//// network address, and x.x.x.255 is presumed to be a broadcast address
//// we cause both to map to a single record (the 255 record) where the
//// Class C, B, or A data can be recorded and modified in safety. Since
//// there is no need to track the brodcast and network address cases.
//// separately there is no inherent conflict in this approach. The
//// remapIP00toFF method performs this transform as needed in the
//// readRecord() and invokeRecord() methods.

unsigned int remapIP00toFF(unsigned int IP); // Remaps final octet 00 to FF if needed.

GBUdbRecord MySafeUnknownRecord; // Safe unknown record to return.
GBUdbRecord& SafeUnknownRecord(); // Clears and returns the Safe record.

// doForAllNodes does its job by launching a recursive search algorythm
// which is embodied in doAllAtNode(). The doAllAtNode() method is called
// for the root node by doForAllRecords and searches through the tree depth
// first to locate each active record in the GBUdb and call the Operator.
// updateWorkingIP() uses progressive input from eacn level to determine
// the effective IP for the node under test.

void updateWorkingIP(unsigned int& WIP, int OctetValue, int Level);
void doAllAtNode(GBUdbIndex I, GBUdbOperator& O, int NodeLevel, unsigned int WorkingIP);

public:
~GBUdbDataset(); // Flush & shutdown a dataset.
GBUdbDataset(const char* SetFileName); // Create with a name or no name (NULL).
GBUdbDataset(GBUdbDataset& Original); // Copy constructor.

class CouldNotGrow {}; // Thrown when grow() fails.
class NoFreeNodes {}; // Thrown when newNodeRoot() fails.
class MatchAllocationCorrupted {}; // Thrown when newMatchRecord() fails.

GBUdbRecord& readRecord(unsigned int IP); // Read only - find a GBUdb record.
GBUdbRecord& invokeRecord(unsigned int IP); // Create and/or Find a GBUdb record.
bool dropRecord(unsigned int IP); // Drop an IP record. (true if we did)

int ArraySize(); // Array size.
int FreeNodes(); // Number of free nodes remaining.
int IPCount(); // Number of IPs stored.

const char* FileName(const char* NewName); // Set new file name w/ cstring.
const char* FileName(); // Return the name.

void grow(int HowManyNodes = GBUdbDefaultGrowNodes); // Grow (by number of nodes).
void save(); // Flush the dataset to disk.
void load(); // Read the dataset from disk.

void doForAllRecords(GBUdbOperator& O); // Calls O(IP, Record) W/ every record.

};

// The GBUdb ojbect manages access to the GBUdb. For example, it will grow the
// dataset when that is required, report new events, and generally serve as the
// main access point for a given GBUdb. It even serializes multiple threads.

//// Here is the actual GBUdb class.

class GBUdb {
private:

Mutex MyMutex; // Data sync mutex.
Mutex AlertsMutex; // Mutex for the alerts list.
GBUdbDataset* MyDataset; // Array of records.
int PostsCounter; // Counts good/bad posts.

list<GBUdbAlert> MyAlerts; // Allerts list.
void recordAlertFor(unsigned int IP, GBUdbRecord& R, unsigned int C); // Append an alert record if needed.

public:

GBUdb(); // Open/Create w/ no name.
GBUdb(const char* FileName); // Open/Create w/ cstring or NULL.
~GBUdb(); // Shutdown

const char* FileName(const char* NewName); // Set/Change the file name.
const char* FileName(); // Return the FileName.

void save(); // Save the data.
void load(); // Load the data.

GBUdbRecord addGood(unsigned int IP, int i = 1); // Count an IP as good.
GBUdbRecord addBad(unsigned int IP, int i = 1); // Count an IP as bad.

GBUdbRecord setGood(unsigned int IP); // Set the flag to Good for this IP.
GBUdbRecord setBad(unsigned int IP); // Set the flag to Bad for this IP.
GBUdbRecord setUgly(unsigned int IP); // Set the flag to Ugly for this IP.
GBUdbRecord setIgnore(unsigned int IP); // Set the flag to Ignore for this IP.

bool dropRecord(unsigned int IP); // Drop an IP record. (true if we did)

GBUdbRecord getRecord(unsigned int IP); // Retrieve an IP record.
GBUdbRecord setRecord(unsigned int IP, GBUdbRecord& R); // Store an IP record.

GBUdbRecord adjustCounts(unsigned int IP, GBUdbRecord& R); // Adds counts from R to record for IP.

void doForAllRecords(GBUdbOperator& O, GBUdbLocking L = Dataset); // Call the Operator w/ All records.
void saveSnapshot(); // Saves a snapshot of the current db.
void reduce(); // Reduce all counts by half.
void compress(); // Remove any unknown records (reduced to zero).

int readIgnoreList(const char* FileName = "GBUdbIgnoreList.txt"); // setIgnore for a list of IPs

void GetAlerts(list<GBUdbAlert>& ListToFill); // Get all current alerts & clear.
void ImportAlerts(list<GBUdbAlert>& PeerAlerts); // Default log2 alert import function.

int IPCount(); // Number of IPs stored.
int Size(); // Size of GBUdb in bytes.
double Utilization(); // Utilization (percent).
int Posts(); // Number of posts since last save.

};

//// Include inline method definitions /////////////////////////////////////////

#include "GBUdb.inline.hpp"

#endif

// End of GBUdb Include Only Once
////////////////////////////////////////////////////////////////////////////////

+ 354
- 0
GBUdb.inline.hpp Прегледај датотеку

@@ -0,0 +1,354 @@
// GBUdb.inline.hpp
//
// (C) Copyright 2006 - 2009 ARM Research Labs, LLC
// See www.armresearch.com for the copyright terms.
//
// See GBUdb.hpp for details & notes.
// This file contains inline implementations.

//// GBUdbRecord Implementations ///////////////////////////////////////////////

inline GBUdbRecord::GBUdbRecord() : // Initialize a new GBUdbRecord
RawData(0) { // to ZERO.
}

inline GBUdbFlag GBUdbRecord::Flag() { // Return the flags.
return (GBUdbFlag) (RawData & GBUdbFlagsMask); // Isolate the flags from the data & return.
}

inline GBUdbFlag GBUdbRecord::Flag(GBUdbFlag f) { // Set the flags.
RawData = RawData & (~GBUdbFlagsMask); // Strip the current flags from RawData.
RawData = RawData | f; // Put the new flags into RawData.
return (GBUdbFlag) (RawData & GBUdbFlagsMask); // Return the flags now in RawData.
}

inline unsigned int GBUdbRecord::Good() { // Return the Good count.
return ((RawData & GBUdbGoodMask) >> GBUdbGoodShift); // Isolate & shift the good count, return.
}

inline unsigned int GBUdbRecord::Good(unsigned int g) { // Set the good count.
RawData = RawData & (~GBUdbGoodMask); // Strip the current good count.
g = g & GBUdbLimit; // Make g safe (within bitfield limit).
RawData = RawData | (g << GBUdbGoodShift); // Shift & combine g with RawData.
return g; // Return the safe g value.
}

inline unsigned int GBUdbRecord::Bad() { // Get the bad count.
return (RawData & GBUdbBadMask); // Isolate the bad data and return.
}

inline unsigned int GBUdbRecord::Bad(unsigned int b) { // Set the bad count.
RawData = RawData & (~GBUdbBadMask); // Strip out the current bad count.
b = b & GBUdbLimit; // Make b safe (strip any extra bits).
RawData = RawData | b; // Combine RawData with the safe b.
return b; // return the safe b.
}

inline unsigned int GBUdbRecord::addGood(unsigned int g) { // Add to the good count & normalize.
unsigned int G = Good(); // Get the good.
unsigned int B = Bad(); // Get the bad.
G = G + g; // Add the new g to the good.
while(G > GBUdbLimit) { // If normalization is required
G = G >> 1; // then reduce the new good
B = B >> 1; // and bad counts by half
} // until things are normalized.
Good(G); // Then go ahead and set the
Bad(B); // new value(s) into place.
return G; // Return the new good count.
}

inline unsigned int GBUdbRecord::addBad(unsigned int b) { // Add to the bad count & normalize.
unsigned int G = Good(); // Get the good.
unsigned int B = Bad(); // Get the bad.
B = B + b; // Add the new b to the bad.
while(B > GBUdbLimit) { // If normalization is required
G = G >> 1; // then reduce the new good
B = B >> 1; // and bad counts by half
} // until things are normalized.
Good(G); // Then go ahead and set the
Bad(B); // new value(s) into place.
return B; // Return the new good count.
}

inline GBUdbRecord& GBUdbRecord::integrate(GBUdbRecord& A, int LocalWeight, int RemoteWeight) { // Integrate A

unsigned int Gl = Good(); // Get the good and
unsigned int Bl = Bad(); // bad counts from
unsigned int Gr = A.Good(); // the local and
unsigned int Br = A.Bad(); // remote records.

Gl = (Gl * LocalWeight) + (Gr * RemoteWeight); // Combine the Good and
Bl = (Bl * LocalWeight) + (Br * RemoteWeight); // bad counts using the weights.

while(Gl > GBUdbLimit || Bl > GBUdbLimit) { // Normalize the counts by
Gl = Gl >> 1; // dividing both in half until
Bl = Bl >> 1; // they are both within limits.
}
Good(Gl); // Then set the new Good
Bad(Bl); // and bad values and return
return *this; // this object.
}

inline GBUdbIndex GBUdbRecord::Index() { // Read the record as an index.
return (GBUdbIndex) RawData;
}

inline GBUdbIndex GBUdbRecord::Index(GBUdbIndex i) { // Write the index value of the record.
RawData = (unsigned int) i;
return (GBUdbIndex) RawData;
}

// Probability is about the ratio of a given event to the total events.
// In this case, positive probabilities indicate a tendency toward spam and
// negative probabilities indicate a tendency toward ham.

inline double GBUdbRecord::Probability() { // Calculate the probability of spam
unsigned int G = Good(); // Get the good and
unsigned int B = Bad(); // bad counts and
double P = 0.0; // grab a double to hold P.
if(0 == B + G) { // If we have no counts yet
return P; // then return a zero probability.
} // If we have counts lets do the math.
P = ((double) B - (double) G) / ((double) B + (double) G); // Calculate the differential
return P; // probability and return it.
}

// The confidence we have in a probability is related to the number of samples
// that are present. We calculate the confidence on a logarithmic scale between
// one sample and half the maximum number by category (good or bad) because
// during condensation all counts may be reduced by half. That is, a 100%
// confidence is achieved when a record contains a total of half the maximum
// number of counts for a single category.

inline double GBUdbRecord::Confidence() { // Calculate our confidence in prob.
unsigned int Total = Good() + Bad(); // What is our total count of samples.
if(0 == Total) return 0.0; // No samples is no confidence.
double Confidence = (log((double)Total) / log((double)(GBUdbLimit/2))); // Calculate on a log scale.
if(1.0 < Confidence) Confidence = 1.0; // Max confidence is 1.0.
return Confidence; // Return the result.
}

//// GBUdbDataSet Inline Methods ///////////////////////////////////////////////

inline GBUdbIndex GBUdbDataset::ixIPCount() { // Index of the IP count for this db.
return MyArraySize + GBUdbIPCountOffset; // Return the offest from the end.
}

inline GBUdbIndex GBUdbDataset::ixNextFreeNode() { // Index of the Next Free Node.
return MyArraySize + GBUdbNextFreeNodeOffset; // Return the offset from the end.
}

inline GBUdbIndex GBUdbDataset::newNodeRoot() { // Allocates a new node, returns offset.
if(0 >= FreeNodes()) { // Check that we have free nodes to
throw NoFreeNodes(); // allocate. If we don't then throw!
}
GBUdbIndex NewNode = DataArray[ixNextFreeNode()].Index(); // Grab the next new node index.
DataArray[ixNextFreeNode()].Index(NewNode + GBUdbRecordsPerNode); // Move the allocator up a node.
return NewNode; // Return the allocated node.
}

inline int GBUdbDataset::ArraySize() { // Return the current Array Size.
return MyArraySize;
}

inline int GBUdbDataset::FreeNodes() { // Return the number of free nodes.
int FreeRecords = MyArraySize - DataArray[ixNextFreeNode()].RawData; // Find the number of records left.
int FreeNodes = (FreeRecords / GBUdbRecordsPerNode) - 1; // Convert to nodes and subtract the
return FreeNodes; // control node, the return the value.
}

inline int GBUdbDataset::IPCount() { // Return the IP count.
return DataArray[ixIPCount()].RawData;
}

inline int GBUdbDataset::increaseIPCount() { // When we add an IP to the db.
return DataArray[ixIPCount()].RawData++; // Increment and return the IP count.
}

inline int GBUdbDataset::decreaseIPCount() { // When we drop an IP from the db.
return DataArray[ixIPCount()].RawData--; // Decrement and return the IP count.
}

inline const char* GBUdbDataset::FileName() { // get the file name.
return MyFileName.c_str();
}

inline unsigned int GBUdbDataset::EncodedMatch(unsigned int IP) { // Encode an IP as a MatchRecord header.
return GBUdbMatchEntryBit | (IP & GBUdbMatchDataMask); // Use the MatchEntery bit and as much
} // of the remaining IP data as possible.

inline bool GBUdbDataset::isMatch(GBUdbIndex I) { // True if record at I is a match record.
return (0 != (DataArray[I].RawData & GBUdbMatchEntryBit)); // Get the raw data and check for the bit.
}

inline bool GBUdbDataset::isMatch(GBUdbIndex I, unsigned int IP) { // True if record at I is a match for IP.
return (DataArray[I].RawData == EncodedMatch(IP));
}

inline GBUdbRecord& GBUdbDataset::MatchedData(GBUdbIndex I) { // Returns the data for the match at I.
return DataArray[I + 1]; // Since I points to the match record we
} // return the record immedately after it.

inline GBUdbRecord& GBUdbDataset::SafeUnknownRecord() { // Clears and returns the Safe record.
MySafeUnknownRecord.RawData = GBUdbUnknown; // Clear the SafeUnknownRecord and
return MySafeUnknownRecord; // return it as the result.
}

inline GBUdbIndex GBUdbDataset::ixMatchListRoot() { // Index of the Match List Root Index.
return MyArraySize + GBUdbMatchListOffset;
}

inline void GBUdbDataset::increaseIPCountIfNew(GBUdbRecord& R) { // If R is GBUdbUnknown, IncreaseIPCount.
if(GBUdbUnknown == R.RawData) { increaseIPCount(); } // If new, increase the IP count.
}

inline unsigned int GBUdbDataset::remapIP00toFF(unsigned int IP) { // Remaps final octet 00 to FF if needed.
const int LowOctetMask = 0x000000FF; // Mask for seeing the low octet.
if(0 == (IP & LowOctetMask)) { // If the lowest octet is 00 then
return (IP | LowOctetMask); // change it to FF and return.
} // If the lowest octet is something else
return IP; // then return the IP as is.
}

inline void GBUdbDataset::deleteMatchAt(GBUdbIndex I) { // Recalls MatchRecord at I for reuse.
GBUdbIndex Next = DataArray[ixMatchListRoot()].Index(); // Find the current allocation list root.
DataArray[I].RawData = (Next | GBUdbMatchUnusedBit); // Point the current match to that root.
DataArray[I+1].RawData = GBUdbUnknown; // Clean out any data the match had.
DataArray[ixMatchListRoot()].Index(I); // Make this record the list root.
}

//// GBUdb Implementations /////////////////////////////////////////////////////

inline GBUdb::GBUdb() : // Construct the db as new.
PostsCounter(0) { // No posts yet.
MyDataset = new GBUdbDataset(NULL); // Construct with no file name.
}

inline GBUdb::GBUdb(const char* FileName) : // Construct the db from a file.
PostsCounter(0) { // No Posts yet.
MyDataset = new GBUdbDataset(FileName); // Load the data set by name.
}

inline GBUdb::~GBUdb() { // Destroy the db object.
if(NULL != MyDataset) { // Save first if we can.
MyDataset->save();
delete MyDataset;
}
}

inline const char* GBUdb::FileName() { // Return the file name.
return MyDataset->FileName();
}

inline const char* GBUdb::FileName(const char* NewName) { // Set/Change the file name.
return MyDataset->FileName(NewName);
}

inline void GBUdb::save() { // Save the data.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
MyDataset->save(); // Save the dataset.
PostsCounter = 0; // Reset the posts counter.
}

inline void GBUdb::load() { // Load the data.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
MyDataset->load(); // Load the dataset.
}

inline GBUdbRecord GBUdb::addGood(unsigned int IP, int i) { // Count an IP as good.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
++PostsCounter; // Count this as a post.
GBUdbRecord& X = MyDataset->invokeRecord(IP); // Invoke the record.
unsigned int C = X.addGood(i); // Add a count to the good side.
recordAlertFor(IP, X ,C); // Record an alert if required.
return X; // Return a copy for analysis.
}

inline GBUdbRecord GBUdb::addBad(unsigned int IP, int i) { // Count an IP as bad.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
++PostsCounter; // Count this as a post.
GBUdbRecord& X = MyDataset->invokeRecord(IP); // Invoke the reocrd.
unsigned int C = X.addBad(i); // Add a count to the bad side.
recordAlertFor(IP, X, C); // Record an alert if required.
return X; // Return a copy for analysis.
}

inline GBUdbRecord GBUdb::setGood(unsigned int IP) { // Set the flag to Good for this IP.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
GBUdbRecord& X = MyDataset->invokeRecord(IP); // Invoke the reocrd.
X.Flag(Good); // Set the Good flag.
return X; // Return a copy for analysis.
}

inline GBUdbRecord GBUdb::setBad(unsigned int IP) { // Set the flag to Bad for this IP.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
GBUdbRecord& X = MyDataset->invokeRecord(IP); // Invoke the reocrd.
X.Flag(Bad); // Set the Bad flag.
return X; // Return a copy for analysis.
}

inline GBUdbRecord GBUdb::setUgly(unsigned int IP) { // Set the flag to Ugly for this IP.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
GBUdbRecord& X = MyDataset->invokeRecord(IP); // Invoke the reocrd.
X.Flag(Ugly); // Set the Ugly flag.
return X; // Return a copy for analysis.
}

inline GBUdbRecord GBUdb::setIgnore(unsigned int IP) { // Set the flag to Ignore for this IP.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
GBUdbRecord& X = MyDataset->invokeRecord(IP); // Invoke the reocrd.
X.Flag(Ignore); // Set the Ignore flag.
return X; // Return a copy for analysis.
}


inline GBUdbRecord GBUdb::getRecord(unsigned int IP) { // Retrieve an IP record.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
GBUdbRecord& X = MyDataset->readRecord(IP); // ReadOnly the reocrd.
return X; // Return a copy for analysis.
}

inline GBUdbRecord GBUdb::setRecord(unsigned int IP, GBUdbRecord& R) { // Store an IP record.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
GBUdbRecord& X = MyDataset->invokeRecord(IP); // Invoke the reocrd.
X = R; // Overwrite X with R.
return X; // Return a copy for analysis.
}

inline GBUdbRecord GBUdb::adjustCounts(unsigned int IP, GBUdbRecord& R) { // Adds counts from R to record for IP.
ScopeMutex JustMe(MyMutex); // Lock the data for this operation.
GBUdbRecord& X = MyDataset->invokeRecord(IP); // Locate the record in the data.
X.Bad(X.Bad() + R.Bad()); // Add the reflected adjustments
X.Good(X.Good() + R.Good()); // to the good and bad counts.
return X; // Return a copy for analysis.
}

inline bool GBUdb::dropRecord(unsigned int IP) { // Drop an IP record.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
return MyDataset->dropRecord(IP); // Pass on this call to our dataset.
}

inline int GBUdb::IPCount() { // Number of IPs stored.
ScopeMutex JustMe(MyMutex);
return MyDataset->IPCount();
}

inline int GBUdb::Size() { // Size of GBUdb in bytes.
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
return MyDataset->ArraySize() * sizeof(GBUdbRecord); // Total records converted to bytes.
}

inline double GBUdb::Utilization() { // Utilization (percent).
ScopeMutex JustMe(MyMutex); // Lock the mutex during this operation.
int TotalRecords = MyDataset->ArraySize(); // Calculate the total number of records.
int FreeRecords = MyDataset->FreeNodes() * GBUdbRecordsPerNode; // Calculate the number of unused records.
int UsedRecords = TotalRecords - FreeRecords; // Calcualte the number of used records.
return // Calculate and return as double...
((double) UsedRecords) * 100.0 / // (Used Records * 100) / (TotalRecords)
((double) TotalRecords);
}

inline int GBUdb::Posts() { // Number of posts since last snapshot.
int CurrentCount = PostsCounter; // Grab the current posts count.
return CurrentCount; // Return the count we had.
}

+ 56
- 0
Makefile.am Прегледај датотеку

@@ -0,0 +1,56 @@
## Process this file with automake to produce Makefile.in
##
## $Id$
##
##
## Author: Alban Deniz
##
## Copyright (C) 2008 by MicroNeil Corporation. All rights reserved.
##

CXXFLAGS = $(SNF_CXXFLAGS) -I@top_srcdir@/SNFMulti -I@top_srcdir@/CodeDweller

noinst_LIBRARIES = \
libSNFMulti.a

libSNFMulti_a_SOURCES = \
@top_srcdir@/SNFMulti/FilterChain.cpp \
@top_srcdir@/SNFMulti/GBUdb.cpp \
@top_srcdir@/SNFMulti/mangler.cpp \
@top_srcdir@/SNFMulti/scanner.cpp \
@top_srcdir@/SNFMulti/snfCFGmgr.cpp \
@top_srcdir@/SNFMulti/snf_engine.cpp \
@top_srcdir@/SNFMulti/snfGBUdbmgr.cpp \
@top_srcdir@/SNFMulti/snf_HeaderFinder.cpp \
@top_srcdir@/SNFMulti/snfLOGmgr.cpp \
@top_srcdir@/SNFMulti/SNFMulti.cpp \
@top_srcdir@/SNFMulti/snfNETmgr.cpp \
@top_srcdir@/SNFMulti/snf_sync.cpp \
@top_srcdir@/SNFMulti/snf_xci.cpp \
@top_srcdir@/SNFMulti/snfXCImgr.cpp \
@top_srcdir@/SNFMulti/tcp_watchdog.cpp

noinst_HEADERS = \
@top_srcdir@/SNFMulti/FilterChain.hpp \
@top_srcdir@/SNFMulti/GBUdb.hpp \
@top_srcdir@/SNFMulti/GBUdb.inline.hpp \
@top_srcdir@/SNFMulti/mangler.hpp \
@top_srcdir@/SNFMulti/scanner.hpp \
@top_srcdir@/SNFMulti/snfCFGmgr.hpp \
@top_srcdir@/SNFMulti/snfCFGmgr.inline.hpp \
@top_srcdir@/SNFMulti/snf_engine.hpp \
@top_srcdir@/SNFMulti/snfGBUdbmgr.hpp \
@top_srcdir@/SNFMulti/snf_HeaderFinder.hpp \
@top_srcdir@/SNFMulti/snf_HeaderFinder.inline.hpp \
@top_srcdir@/SNFMulti/snfLOGmgr.hpp \
@top_srcdir@/SNFMulti/snfLOGmgr.inline.hpp \
@top_srcdir@/SNFMulti/SNFMulti.hpp \
@top_srcdir@/SNFMulti/snfNETmgr.hpp \
@top_srcdir@/SNFMulti/snf_sync.hpp \
@top_srcdir@/SNFMulti/snf_xci.hpp \
@top_srcdir@/SNFMulti/snfXCImgr.hpp \
@top_srcdir@/SNFMulti/tcp_watchdog.hpp \
@top_srcdir@/SNFMulti/snf_match.h

clean-local:
rm -f *.gcno *.gcov *.gcda *~

+ 2141
- 0
SNFMulti.cpp
Разлика између датотеке није приказан због своје велике величине
Прегледај датотеку


+ 471
- 0
SNFMulti.hpp Прегледај датотеку

@@ -0,0 +1,471 @@
// SNFMulti.hpp
//
// (C) Copyright 2006 - 2009 ARM Research Labs, LLC.
// See www.armresearch.com for the copyright terms.
//
// 20060121_M
// This file creates an API for multi-threaded systems to use the SNF engine.
//
// This API is C++ oriented, meaning it throws exceptions and so forth.
// For use in shared objects and DLLs, the functions in here will be wrapped
// in a C style interface appropriate to that platform.
//
// The interface is based on the following structure.
//
// The application "Opens" one or more rulebases.
// The application "Opens" some number of scanners referencing opened rulebases.
// Each scanner handles one thread's worth of scanning, so it is presumed that
// each processing thread in the calling application will have one scanner to itself.
//
// Rulebases can be reloaded asynchronously. The scanner's grab a reference to the
// rulebase each time they restart. The grabbing and swapping in of new rulebases is
// a very short critical section.

#ifndef _ARM_SNFMulti
#define _ARM_SNFMulti

#include <stdexcept>
#include <sys/types.h>
#include <sys/stat.h>
#include <ctime>
#include <string>
#include "FilterChain.hpp"
#include "snf_engine.hpp"
#include "snf_match.h"
#include "threading.hpp"
#include "snfCFGmgr.hpp"
#include "snfLOGmgr.hpp"
#include "snfNETmgr.hpp"
#include "snfGBUdbmgr.hpp"
#include "GBUdb.hpp"
#include "snfXCImgr.hpp"

#include <cassert>

extern const char* SNF_ENGINE_VERSION;

// snf Result Code Constants

const int snf_SUCCESS = 0;
const int snf_ERROR_CMD_LINE = 65;
const int snf_ERROR_LOG_FILE = 66;
const int snf_ERROR_RULE_FILE = 67;
const int snf_ERROR_RULE_DATA = 68;
const int snf_ERROR_RULE_AUTH = 73;
const int snf_ERROR_MSG_FILE = 69;
const int snf_ERROR_ALLOCATION = 70;
const int snf_ERROR_BAD_MATRIX = 71;
const int snf_ERROR_MAX_EVALS = 72;
const int snf_ERROR_UNKNOWN = 99;

// Settings & Other Constants

const int snf_ScanHorizon = 32768; // Maximum length of message to check.
const int snf_MAX_RULEBASES = 10; // 10 Rulebases is plenty. Most use just 1
const int snf_MAX_SCANNERS = 500; // 500 Scanners at once should be plenty

const int SHUTDOWN = -999; // Shutdown Cursor Value.

// snfCFGPacket encapsulates configuration and rulebase data.
// The rulebase handler can write to it.
// Others can only read from it.
// The engine handler creates and owns one of these. It uses it to
// grab() and drop() cfg and rulebase data from the rulebase handler.

class snf_RulebaseHandler; // We need to know this exists.

class snfCFGPacket { // Our little bundle of, er, cfg stuff.

friend class snf_RulebaseHandler; // RulebaseHandler has write access.

private:
snf_RulebaseHandler* MyRulebase; // Where to grab() and drop()
TokenMatrix* MyTokenMatrix; // We combine the current token matrix
snfCFGData* MyCFGData; // and the current cfg data for each scan.

set<int> RulePanics; // Set of known rule panic IDs.

public:
snfCFGPacket(snf_RulebaseHandler* R); // Constructor grab()s the Rulebase.
~snfCFGPacket(); // Destructor drop()s the Rulebase.

TokenMatrix* Tokens(); // Consumers read the Token Matrix and
snfCFGData* Config(); // the snfCFGData.

bool bad(); // If anything is missing it's not good.

bool isRulePanic(int R); // Test for a rule panic.
};

class ScriptCaller : private Thread { // Calls system() in separate thread.
private:
Mutex MyMutex; // Protects internal data.
string SystemCallText; // Text to send to system().
Timeout GuardTimer; // Guard time between triggers.
bool GoFlag; // Go flag true when triggered.
bool DieFlag; // Die flag when it's time to leave.

string ScriptToRun(); // Safely grab the script.
bool hasGuardExpired(); // True if guard time has expired.
void myTask(); // Thread task overload.

public:
ScriptCaller(string Name); // Constructor.
~ScriptCaller(); // Destructor.

void SystemCall(string S); // Set system call text.
void GuardTime(int T); // Change guard time.
void trigger(); // Trigger if possible.

const static ThreadType Type; // The thread's type.

const static ThreadState CallingSystem; // State when in system() call.
const static ThreadState PendingGuardTime; // State when waiting for guard time.
const static ThreadState StandingBy; // State when waiting around.
const static ThreadState Disabled; // State when unable to run.
};

class snf_Reloader : private Thread { // Rulebase maintenance thread.
private:

snf_RulebaseHandler& MyRulebase; // We know our rulebase.
bool TimeToStop; // We know if it's time to stop.

string RulebaseFileCheckName; // We keep track of these files.
string ConfigFileCheckName;
string IgnoreListCheckFileName;
time_t RulebaseFileTimestamp; // We watch their timestamps.
time_t ConfigurationTimestamp;
time_t IgnoreListTimestamp;

void captureFileStats(); // Get stats for later comparison.
bool StatsAreDifferent(); // Check file stats for changes.

void myTask(); // How do we do this refresh thing?

ScriptCaller RulebaseGetter; // Reloader owns a RulebaseGetter.
bool RulebaseGetterIsTurnedOn; // True if we should run the getter.
void captureGetterConfig(); // Get RulebaseGetter config.

public:
snf_Reloader(snf_RulebaseHandler& R); // Setup takes some work.
~snf_Reloader(); // Tear down takes some work.

const static ThreadType Type; // The thread's type.

};

class snf_RulebaseHandler { // Engine Core Manager.

friend class snfCFGPacket;

private:

Mutex MyMutex; // This handler's mutex.

snf_Reloader* MyReloader; // Reloader engine (when in use).

int volatile ReferenceCount; // Associated scanners count.

snfCFGData* volatile Configuration; // Configuration for this handler.
TokenMatrix* volatile Rulebase; // Rulebase for this handler.
int volatile CurrentCount; // Active current scanners count.

TokenMatrix* volatile OldRulebase; // Retiring rulebase holder.
int volatile RetiringCount; // Active retiring scanners count.

bool volatile RefreshInProgress; // Flag for locking the refresh process.

int volatile MyGeneration; // Generation (reload) number.

void _snf_LoadNewRulebase(); // Internal function to load new rulebase.

Mutex XCIServerCommandMutex; // XCI Server Command Serializer.
snfXCIServerCommandHandler* myXCIServerCommandHandler; // ptr to Installed Srv Cmd Handler.

void grab(snfCFGPacket& CP); // Activate this Rulebase for a scan.
void drop(snfCFGPacket& CP); // Deactiveate this Rulebase after it.

public:

class ConfigurationError : public runtime_error { // When the configuration won't load.
public: ConfigurationError(const string& w):runtime_error(w) {}
};
class FileError : public runtime_error { // Exception: rulebase file won't load.
public: FileError(const string& w):runtime_error(w) {}
};
class AuthenticationError : public runtime_error { // Exception when authentication fails.
public: AuthenticationError(const string& w):runtime_error(w) {}
};
class IgnoreListError : public runtime_error { // When the ignore list won't load.
public: IgnoreListError(const string& w):runtime_error(w) {}
};
class AllocationError : public runtime_error { // Exception when we can't allocate something.
public: AllocationError(const string& w):runtime_error(w) {}
};
class Busy : public runtime_error { // Exception when there is a collision.
public: Busy(const string& w):runtime_error(w) {}
};
class Panic : public runtime_error { // Exception when something else happens.
public: Panic(const string& w):runtime_error(w) {}
};

//// Plugin Components.

snfCFGmgr MyCFGmgr; // Configuration manager.
snfLOGmgr MyLOGmgr; // Logging manager.
snfNETmgr MyNETmgr; // Communications manager.
snfGBUdbmgr MyGBUdbmgr; // GBUdb manager.
GBUdb MyGBUdb; // GBUdb for this rulebase.
snfXCImgr MyXCImgr; // XCI manager.

//// Methods.

snf_RulebaseHandler(): // Initialization is straight forward.
MyReloader(0),
MyGeneration(0),
ReferenceCount(0),
Rulebase(NULL),
CurrentCount(0),
OldRulebase(NULL),
RetiringCount(0),
RefreshInProgress(false),
myXCIServerCommandHandler(0) {
MyNETmgr.linkLOGmgr(MyLOGmgr); // Link the NET manager to the LOGmgr.
MyNETmgr.linkGBUdbmgr(MyGBUdbmgr); // Link the NET manager to the GBUdbmgr.
MyGBUdbmgr.linkGBUdb(MyGBUdb); // Link the GBUdb manager to it's db.
MyGBUdbmgr.linkLOGmgr(MyLOGmgr); // Link the GBUdb manager to the LOGmgr.
MyLOGmgr.linkNETmgr(MyNETmgr); // Link the LOG manager to the NETmgr.
MyLOGmgr.linkGBUdb(MyGBUdb); // Link the LOG manager to the GBUdb.
MyXCImgr.linkHome(this); // Link the XCI manager to this.
}

~snf_RulebaseHandler(); // Shutdown checks for safety.

bool isReady(); // Is the object is active.
bool isBusy(); // Is a refresh/open in progress.
int getReferenceCount(); // How many Engines using this handler.
int getCurrentCount(); // How many Engines active in the current rb.
int getRetiringCount(); // How many Engines active in the old rb.
void open(const char* path, // Lights up this hanlder.
const char* licenseid,
const char* authentication);

bool AutoRefresh(bool On); // Turn on/off auto refresh.
bool AutoRefresh(); // True if AutoRefresh is on.
void refresh(); // Reloads the rulebase and config.

void close(); // Closes this handler.

void use(); // Make use of this Rulebase Handler.
void unuse(); // Finish with this Rulebase Handler.

int Generation(); // Returns the generation number.

void addRulePanic(int RuleID); // Synchronously add a RulePanic.

IPTestRecord& performIPTest(IPTestRecord& I); // Perform an IP test.
void logThisIPTest(IPTestRecord& I, string Action); // Log an IP test result & action.

void logThisError(string ContextName, int Code, string Text); // Log an error message.
void logThisInfo(string ContextName, int Code, string Text); // Log an informational message.
string PlatformVersion(string NewPlatformVersion); // Set platform version info.
string PlatformVersion(); // Get platform version info.
string PlatformConfiguration(); // Get platform configuration.
string EngineVersion(); // Get engine version info.

void XCIServerCommandHandler(snfXCIServerCommandHandler& XCH); // Registers a new XCI Srvr Cmd handler.
string processXCIServerCommandRequest(snf_xci& X); // Handle a parsed XCI Srvr Cmd request.
};

// IPTestEngine w/ GBUdb interface.
// This will plug into the FilterChain to evaluate IPs on the fly.

class snf_IPTestEngine : public FilterChainIPTester {

private:

GBUdb* Lookup; // Where we find our GBUdb.
snfScanData* ScanData; // Where we find our ScanData.
snfCFGData* CFGData; // Where we find our CFG data.
snfLOGmgr* LOGmgr; // Where we find our LOG manager.

public:

snf_IPTestEngine(); // Initialize internal pointers to NULL.
void setGBUdb(GBUdb& G); // Setup the GBUdb lookup.
void setScanData(snfScanData& D); // Setup the ScanData object.
void setCFGData(snfCFGData& C); // (Re)Set the config data to use.
void setLOGmgr(snfLOGmgr& L); // Setup the LOGmgr to use.

string& test(string& input, string& output); // Our obligatory test function.
};

// Here's where we pull it all together.

class snf_EngineHandler {

private:

Mutex MyMutex; // This handler's mutex.
Mutex FileScan; // File scan entry mutex.

EvaluationMatrix* volatile CurrentMatrix; // Matrix for the latest scan.
snf_RulebaseHandler* volatile MyRulebase; // My RulebaseHandler.

snfScanData MyScanData; // Local snfScanData record.
snf_IPTestEngine MyIPTestEngine; // Local IP Test Engine.

int ResultsCount; // Count of Match Records for getResults
int ResultsRemaining; // Count of Match Records ahead of cursor.
MatchRecord* FinalResult; // Final (winning) result of the scan.
MatchRecord* ResultCursor; // Current Match Record for getResults.

string extractMessageID(const unsigned char* Msg, const int Len); // Get log safe Message-ID or substitute.

public:

class FileError : public runtime_error { // Exception when a file won't open.
public: FileError(const string& w):runtime_error(w) {}
};
class XHDRError : public runtime_error { // Exception when XHDR Inject/File fails.
public: XHDRError(const string& w):runtime_error(w) {}
};
class BadMatrix : public runtime_error { // Exception out of bounds of matrix.
public: BadMatrix(const string& w):runtime_error(w) {}
};
class MaxEvals : public runtime_error { // Exception too many evaluators.
public: MaxEvals(const string& w):runtime_error(w) {}
};
class AllocationError : public runtime_error { // Exception when we can't allocate something.
public: AllocationError(const string& w):runtime_error(w) {}
};
class Busy : public runtime_error { // Exception when there is a collision.
public: Busy(const string& w):runtime_error(w) {}
};
class Panic : public runtime_error { // Exception when something else happens.
public: Panic(const string& w):runtime_error(w) {}
};

snf_EngineHandler(): // Initialization is simple.
CurrentMatrix(NULL),
MyRulebase(NULL),
MyScanData(snf_ScanHorizon),
ResultsCount(0),
ResultsRemaining(0),
ResultCursor(NULL) {}

~snf_EngineHandler(); // Shutdown clenas up and checks for safety.

void open(snf_RulebaseHandler* Handler); // Light up the engine.
bool isReady(); // Is the Engine good to go? (doubles as busy)
void close(); // Close down the engine.

int scanMessageFile( // Scan this message file.
const string MessageFilePath, // -- this is the file (and id)
const int MessageSetupTime = 0, // -- setup time already used.
const IP4Address MessageSource = 0UL // -- message source IP (for injection).
);

int scanMessage( // Scan this message.
const unsigned char* MessageBuffer, // -- this is the message buffer.
const int MessageLength, // -- this is the length of the buffer.
const string MessageName = "", // -- this is the message identifier.
const int MessageSetupTime = 0, // -- setup time used (for logging).
const IP4Address MessageSource = 0UL // -- message source IP (for injection).
);

int getResults(snf_match* MatchBuffer); // Get the next match buffer.
int getDepth(); // Get the scan depth.

const string getClassicLog(); // Get classic log entries for last scan.
const string getXMLLog(); // Get XML log entries or last scan.
const string getXHDRs(); // Get XHDRs for last scan.
};

// Here's the class that pulls it all together.

class snf_MultiEngineHandler {

private:

Mutex RulebaseScan; // This handler's mutex.
int RulebaseCursor; // Next Rulebase to search.
snf_RulebaseHandler RulebaseHandlers[snf_MAX_RULEBASES]; // Array of Rulebase Handlers

int RoundRulebaseCursor(); // Gets round robin Rulebase handle candidates.

Mutex EngineScan; // Serializes searching the Engine list.
int EngineCursor; // Next Engine to search.
snf_EngineHandler EngineHandlers[snf_MAX_SCANNERS]; // Array of Engine Handlers

int RoundEngineCursor(); // Gets round robin Engine handle candidates.

public:

class TooMany : public runtime_error { // Exception when no more handle slots.
public: TooMany(const string& w):runtime_error(w) {}
};
class FileError : public runtime_error { // Exception when a file won't open.
public: FileError(const string& w):runtime_error(w) {}
};
class AuthenticationError : public runtime_error { // Exception when authentication fails.
public: AuthenticationError(const string& w):runtime_error(w) {}
};
class AllocationError : public runtime_error { // Exception when we can't allocate something.
public: AllocationError(const string& w):runtime_error(w) {}
};
class Busy : public runtime_error { // Exception when there is a collision.
public: Busy(const string& w):runtime_error(w) {}
};
class Panic : public runtime_error { // Exception when something else happens.
public: Panic(const string& w):runtime_error(w) {}
};

snf_MultiEngineHandler():
RulebaseCursor(0),
EngineCursor(0) {}

~snf_MultiEngineHandler(); // Clean up, safety check, shut down.

// snf_OpenRulebase()
// Grab the first available rulebse handler and light it up.

int OpenRulebase(const char* path, const char* licenseid, const char* authentication);

// snf_RefreshRulebase()
// Reload the rulebase associated with the handler.

void RefreshRulebase(int RulebaseHandle);

// snf_CloseRulebase()
// Shut down this Rulebase handler.

void CloseRulebase(int RulebaseHandle);

// snf_OpenEngine()
// Grab the first available Engine handler and light it up

int OpenEngine(int RulebaseHandle);

// snf_CloseEngine()
// Shut down this Engine handler.

void CloseEngine(int EngineHandle);

// snf_Scan()
// Scan the MessageBuffer with this Engine.

int Scan(int EngineHandle, const unsigned char* MessageBuffer, int MessageLength);

// The Engine prvides detailed match results through this function.

int getResults(int EngineHandle, snf_match* matchbfr);

// The Engine provies the scan depth through this function.

int getDepth(int EngineHandle);

};

#endif

+ 5
- 0
gccVersion.txt Прегледај датотеку

@@ -0,0 +1,5 @@
gcc (SUSE Linux) 4.3.1 20080507 (prerelease) [gcc-4_3-branch revision 135036]
Copyright (C) 2008 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.


+ 106
- 0
mangler.cpp Прегледај датотеку

@@ -0,0 +1,106 @@
// MANGLER.CPP
//
// (C) 1984-2009 MicroNeil Research Corporation
// Derived from Version 1 of Mangler Encryption Algorythm, 1984.
// Derived from Version 2 of Mangler Encryption Algorythm, 1998.
//

// 20021008 _M
// Found and corrected range bug in ChaosDriver(void) where
// ~Position might access a location outside the fill. Replaced
// ~Position with Position^0xff which has the intended effect.

// 20020119 _M Version 3.0
//
// Mangler encryption engine object.
// Using new optimized chaos driver for uniformity experiments.
// Important in this experiment is proof of highest possible entropy.

#include "mangler.hpp"

unsigned char MANGLER::ChaosDriver(void) { // Return the current
return Fill[Fill[Position]^Fill[Position^0xff]]; // chaos engine output
} // value.

// As of version 3 the output of the chaos driver was strengthened for
// cryptography and to increase the sensitivity of the output for use
// as a random number generator. In version 2, the software would simply
// return the fill value at the engine's current position. In the new
// version two distinct fill values are involved in abstracting the
// value of Position and determining the final output value and the Position
// value itself is used to add complexity to the output.

unsigned char MANGLER::Rotate(unsigned char i) { // Bitwise rotates i
return (
(i & 0x80)? // This operation is
(i<<1)+1: // described without
(i<<1) // using asm.
);
}

void MANGLER::ChaosDriver(unsigned char i) { // Drives chaos engine.

// First we move our mixing position in the fill buffer forward.

Position=( // Move mixing position.
Position+1+ // Move at least 1, then
(Fill[Position]&0x0f) // maybe a few more.
)%256; // But stay within the fill.

// The fill position in version 2 was simply incremented. This allowed
// for an attacker to predict something important about the state of
// the chaos engine. The new method above uses abstraction through the
// fill buffer to introduce "jitter" when setting a new position based
// on data that is hidden from the outside.

// Next we abstract the incoming character through the fill buffer and
// use it to select fill data to rotate and swap.

unsigned char Swap = ((Fill[Position]^Fill[i])+Position+i)%256;
unsigned char Tmp;

Tmp = Fill[Swap];
Fill[Swap]=Fill[Position];
Fill[Position]=Rotate(Tmp);

// Whenever the Swap and Path positions are the same, the result is
// that no data is swapped in the chaos field. We resolve that by
// recalling the ChaosDriver. This has the added effect of increasing
// the complexity and making it more difficult to predict the state
// of the engine... particularly because the engine evloves to a new
// state under these conditions without having exposed that change
// to the outside world.

if(Position==Swap) ChaosDriver(Tmp); // If we didn't swap, recurse.

}

// The encryption / decryption scheme works by modulating an input data
// stream with a chaotic system and allowing the encrypted stream to drive
// the chaotic system of both the transmitter and receiver. This will
// synchronize the two chaotic systems and allow the receiving system to
// "predict" the state of the transmiting system so that it can properly
// demodulate the encrypted stream. Both chaotic systems must start in the
// same state with the same fill data characteristics or else the two
// chaotic systems evolve to further divergent states.

unsigned char MANGLER::Encrypt(unsigned char i) {
unsigned char g = ChaosDriver() ^ i; // Take the output of the
ChaosDriver(g); // chaos engine and use it
return g; // to moduleate the input.
} // Then drive the engine
// with the encrypted data.

unsigned char MANGLER::Decrypt(unsigned char i) {
unsigned char g = ChaosDriver() ^ i; // Take the output of the
ChaosDriver(i); // chaos engine and use it
return g; // to demodulate the input.
} // then drive the engine
// with the original input.
MANGLER::MANGLER(void) {
for(short c = 0;c<256;c++) // The default constructor sets
Fill[c]=(unsigned char) c; // the key to the root primary
Position = 0; // value and Position to 0.
}



+ 34
- 0
mangler.hpp Прегледај датотеку

@@ -0,0 +1,34 @@
// MANGLER.HPP
//
// (C) 1984-2009 MicroNeil Research Corporation
// Derived from Version 1 of Mangler Encryption Algorythm, 1984.
// Derived from Version 2 of Mangler Encryption Algorythm, 1998.
//
// 20020119 _M Mangler V3.
// Mangler object header file.
// If it's already been included, it doesn't need to be included again.

#ifndef _MANGLER_
#define _MANGLER_

class MANGLER {
private:

unsigned char Fill[256]; // Where to store the fill.
unsigned int Position; // Where to put position.

unsigned char Rotate(unsigned char); // Bitwise Rotate Utility.

unsigned char ChaosDriver(void); // Returns current chaos.
void ChaosDriver(unsigned char i); // Drives chaos forward.

public:

unsigned char Encrypt(unsigned char i); // Returns encrypted data.
unsigned char Decrypt(unsigned char i); // Returns decrypted data.

MANGLER(void); // Default.
};

#endif


+ 112
- 0
scanner.cpp Прегледај датотеку

@@ -0,0 +1,112 @@
// scanner.cpp
//
// (C) 2002-2009 MicroNeil Research Corporation

// 20041117 _M - Included new improved Filter Chain module UrlDecode. This module
// scans each anchor or image tag for URL encoded characters and converts them to
// their singly byte counterparts. If a characters is converted then the decoded
// anchor tag is injected into the scan stream immediately after the source link.

// 20041114 _M - Included new Filter Chain module: Defunker. The Defunker re-emits
// the message to the scanner with all of the HTML and some coding removed. This
// allows HTML obfuscated patterns to be recognized by the scanning engine.

// 20040113 _M - New Reset() method used in ScanMessage() to keep things nice and
// tidy. Also, modified ScanText() to create a new evaluation matrix if it is
// needed, and to append to the existing one if there is one.

// 20030928 _M - Moving toward the peer-server architecture and V3. The message
// scanning component has been moved into it's own object called "scanner". From
// now on, a message, or text will be passed to the scanner and the scanner will
// return an evaulation matrix. As always, if something goes wrong it will throw.
// This allows us to separate the creation of a scanner, and it's use, from any
// other nifty logic. So, if I'm in a server mode, I can take my scanner and throw
// messages at it as often as I like. Each message I pump in one side comes out the
// other side as an evaluation matrix. This will work well for SMTP based engines
// as well as peer-server, or any other "service pipeline".
//
// Note that the scanner object has two ways it will accept data. One way is as a
// message via .ScanMessage(c_str). This method employs the filter chain system and
// expects to see an SMTP message. The second way is as plain text via .ScanText(c_str).
// This method is useful for "internal" purposes such as secondary scans used to
// locate compound rules or parameter scans used to pick up tuning data from the
// rulebase.

#include "scanner.hpp"

// Scanner::LoadRuleBase(RuleFileName, SecurityKey)

void Scanner::LoadRuleBase(string& RuleFileName, string& SecurityKey) {
RuleBase.Load(RuleFileName); // Load the rulebase file.
RuleBase.Validate(SecurityKey); // Validate the rulebase file.

}

// Scanner::ScanMessage(MessageBuffer)

EvaluationMatrix* Scanner::ScanMessage(unsigned char* MessageBuffer) { // Scan with the filter chain.

FilterChainCString IV(MessageBuffer); // Set up the filter chain.
FilterChainBase64 IW(&IV); // Include Base64 decoding.
FilterChainQuotedPrintable IX(&IW); // Include Quoted Printable decoding.
FilterChainUrlDecode IY(&IX); // Include URL decoder.
FilterChainDefunker IZ(&IY); // Include Defunking.

// Reset and create a new EvaluationMatrix object to use for this scan.
// ScanMessage is always called with a new message.

Reset(); // Reset for the new message.
myEvaluationMatrix = // Allocate a new evaluation matrix
new EvaluationMatrix(&RuleBase); // using the current rulebase.

if(!myEvaluationMatrix) // If the allocation fails then
throw BadMatrixAllocation(); // throw an appropriate exception.

try {
// Message header rules in earlier versions occasionally failed because there was not
// a new-line character in front of the very first header. So, now we insert one :-)
// This allows all header rules to start off with a ^ indicating the start of the line.
myEvaluationMatrix->EvaluateThis('\n'); // Insert a newline ahead of each message.
// Scan each byte in the file up to the horizon or the end of the message.
// If something goes wrong, an exception will be thrown.
while(myEvaluationMatrix->CountOfCharacters < ScanHorizon)
myEvaluationMatrix->EvaluateThis(IZ.GetByte());
}

catch(FilterChain::Empty) { // We're expecting this so it's ok, but
} // anything else will still be thrown!
return myEvaluationMatrix; // Return our results.
}

// Scanner::ScanText(TextBuffer)

EvaluationMatrix* Scanner::ScanText(unsigned char* TextBuffer) { // Scan without the filter chain.

// If needed, create a new EvaluationMatrix object to use for this scan.
// If not needed, we'll add this scanning to the existing matrix.
if(!myEvaluationMatrix) {
myEvaluationMatrix = // Allocate a new evaluation matrix
new EvaluationMatrix(&RuleBase); // using the current rulebase.

if(!myEvaluationMatrix) // If the allocation fails then
throw BadMatrixAllocation(); // throw an appropriate exception.
}

int index=0; // Set up an index at zero...

while( // For as long as we're
TextBuffer[index]!=0 && // not yet terminated and
myEvaluationMatrix->CountOfCharacters < ScanHorizon) // not at the horizon then
myEvaluationMatrix->EvaluateThis(TextBuffer[index++]); // scan this byte & move.

return myEvaluationMatrix; // Return our results.

}

+ 69
- 0
scanner.hpp Прегледај датотеку

@@ -0,0 +1,69 @@
// scanner.hpp
//
// (C) 2002-2009 MicroNeil Research Corporation

// 20040113 _M - Added Reset() to the scanner object to more completely handle
// cleanup after processing a message. Where previously the calling code would
// need to be sure it deleted the evaulation matrix when it was done, now it
// should call Reset. Reset is also included now in the destructor for this
// object.

// 20030928 _M - Moving toward the peer-server architecture and V3. The message
// scanning component has been moved into it's own object called "scanner". From
// now on, a message, or text will be passed to the scanner and the scanner will
// return an evaulation matrix. As always, if something goes wrong it will throw.
// This allows us to separate the creation of a scanner, and it's use, from any
// other nifty logic. So, if I'm in a server mode, I can take my scanner and throw
// messages at it as often as I like. Each message I pump in one side comes out the
// other side as an evaluation matrix. This will work well for SMTP based engines
// as well as peer-server, or any other "service pipeline".
//
// Note that the scanner object has two ways it will accept data. One way is as a
// message via .ScanMessage(c_str). This method employs the filter chain system and
// expects to see an SMTP message. The second way is as plain text via .ScanText(c_str).
// This method is useful for "internal" purposes such as secondary scans used to
// locate compound rules or parameter scans used to pick up tuning data from the
// rulebase.

#ifndef _MN_Scanner
#define _MN_Scanner

#include "FilterChain.hpp"
#include "snf_engine.hpp"

const int ScanHorizon = 32768; // Maximum length of message to check.

class Scanner {

private:

TokenMatrix RuleBase; // The RuleBase for this scanner.

EvaluationMatrix* myEvaluationMatrix; // Evaluation Matrix for current scan.

public:

class BadMatrixAllocation {}; // Exception for failed allocation.

Scanner() {myEvaluationMatrix=NULL;} // Construct with empty matrix.
~Scanner() {Reset();} // Destructor now cleans up.

void Reset() { // Reset safely deletes the eval
if(myEvaluationMatrix!=NULL){ // matrix and nulls it's pointer.
delete myEvaluationMatrix;
myEvaluationMatrix=NULL;
}
}

void LoadRuleBase(string& RuleFileName, string& SecurityKey); // Load & Validate RuleBase.

EvaluationMatrix* ScanMessage(unsigned char* MessageBuffer); // Scan with filter chain.

EvaluationMatrix* ScanText(unsigned char* TextBuffer); // Scan without filter chain.

inline EvaluationMatrix* GetMatrix(){return myEvaluationMatrix;} // Return the latest matrix.
};

#endif

+ 1009
- 0
snfCFGmgr.cpp
Разлика између датотеке није приказан због своје велике величине
Прегледај датотеку


+ 554
- 0
snfCFGmgr.hpp Прегледај датотеку

@@ -0,0 +1,554 @@
// snfCFGmgr.hpp
// Copyright (C) 2006 - 2009 Arm Research Labs, LLC
// See www.armresearch.com for the copyright terms.
//
// SNF Configuration manager.

//// Begin include only once

#ifndef included_snfCFGmgr_hpp
#define included_snfCFGmgr_hpp

#include "GBUdb.hpp"
#include "snf_HeaderFinder.hpp"

#include "configuration.hpp"
#include "threading.hpp"
#include <string>
#include <set>

using namespace std;

const unsigned long int HeaderDirectiveBypass = 0x00000001; // Bypass hd rule flag.
const unsigned long int HeaderDirectiveWhite = 0x00000002; // White hd rule flag.
const unsigned long int HeaderDirectiveDrillDown = 0x00000004; // DrillDown rule flag.
const unsigned long int HeaderDirectiveSource = 0x00000008; // Source rule flag.
const unsigned long int HeaderDirectiveContext = 0x80000000; // Context activation flag.

class HeaderDirectiveHandler : public Configurator { // Handle inputs to header directives.
public:

HeaderDirectiveSet HeaderDirectives; // Managed set of Header Directives.

void operator()(ConfigurationElement& E, ConfigurationData& D) { // The configurator call adds the Input.

if(HeaderDirectiveContext == ContextInput.Directive) { // If a context has been established
ContextInput.Context = HeaderDirectives.size() + 1; // then setup the context ID and
DirectiveInput.Context = ContextInput.Context; // share it with the input.
HeaderDirectives.insert(ContextInput); // Insert the context tester and
ContextInput.clear(); // then clear it for future use.
}

HeaderDirectives.insert(DirectiveInput); // Insert the directive and then
DirectiveInput.clear(); // clear the input for future use.
}

HeaderFinderPattern ContextInput; // The context can be set externally.
HeaderFinderPattern DirectiveInput; // The Input can be set externally.

void reset() { // Reset the handler like this:
HeaderDirectives.clear(); // Clear the header directives.
ContextInput.clear(); // Clear the Context Input.
DirectiveInput.clear(); // Clear the Directive Input.
}

};

class HeaderDirectiveInitializer : public Configurator { // Initializes Header Directives.
private:

HeaderDirectiveHandler* MyTarget; // Needs to know it's target.

public:

HeaderDirectiveInitializer() : MyTarget(NULL) {} // Constructor doesn't know it's target yet.

void setTarget(HeaderDirectiveHandler& H) { MyTarget = &H; } // We have a way to set the target though ;-)

void operator()(ConfigurationElement& E, ConfigurationData& D) { // The configurator() function goes to the
if(NULL!=MyTarget) { // target (if it's set) and pushes the
MyTarget->reset(); // reset button (empties the set).
}
}
};

class HeaderDirectiveWhiteHeaderInitializer : public Configurator { // Initializes White Header Directives.
private:

HeaderDirectiveHandler* MyTarget; // Needs to know it's target.

public:

HeaderDirectiveWhiteHeaderInitializer() : MyTarget(NULL) {} // Constructor doesn't know it's target yet.

void setTarget(HeaderDirectiveHandler& H) { MyTarget = &H; } // We have a way to set the target though ;-)

void operator()(ConfigurationElement& E, ConfigurationData& D) { // The configurator() function goes to the
if(NULL!=MyTarget) { // target (if it's set) and sets it up
MyTarget->ContextInput.clear(); // for a white header directive.
MyTarget->DirectiveInput.clear();
MyTarget->DirectiveInput.Directive = HeaderDirectiveWhite;
}
}
};

class HeaderDirectiveBypassHeaderInitializer : public Configurator { // Initializes Bypass Header Directives.
private:

HeaderDirectiveHandler* MyTarget; // Needs to know it's target.

public:

HeaderDirectiveBypassHeaderInitializer() : MyTarget(NULL) {} // Constructor doesn't know it's target yet.

void setTarget(HeaderDirectiveHandler& H) { MyTarget = &H; } // We have a way to set the target though ;-)

void operator()(ConfigurationElement& E, ConfigurationData& D) { // The configurator() function goes to the
if(NULL!=MyTarget) { // target (if it's set) and sets it up
MyTarget->ContextInput.clear(); // for a bypass header directive.
MyTarget->DirectiveInput.clear();
MyTarget->DirectiveInput.Directive = HeaderDirectiveBypass;
}
}
};

class HeaderDirectiveDrilldownInitializer : public Configurator { // Initializes Drilldown Header Directives.
private:

HeaderDirectiveHandler* MyTarget; // Needs to know it's target.

public:

HeaderDirectiveDrilldownInitializer() : MyTarget(NULL) {} // Constructor doesn't know it's target yet.

void setTarget(HeaderDirectiveHandler& H) { MyTarget = &H; } // We have a way to set the target though ;-)

void operator()(ConfigurationElement& E, ConfigurationData& D) { // The configurator() function goes to the
if(NULL!=MyTarget) { // target (if it's set) and sets it up for
MyTarget->ContextInput.clear(); // a drilldown header directive.
MyTarget->DirectiveInput.clear();
MyTarget->DirectiveInput.Directive = HeaderDirectiveDrillDown;
MyTarget->DirectiveInput.Header = "Received:";
}
}
};

class HeaderDirectiveSourceHeaderInitializer : public Configurator { // Initializes Source Header Directives.
private:

HeaderDirectiveHandler* MyTarget; // Needs to know it's target.

public:

HeaderDirectiveSourceHeaderInitializer() : MyTarget(NULL) {} // Constructor doesn't know it's target yet.

void setTarget(HeaderDirectiveHandler& H) { MyTarget = &H; } // We have a way to set the target though ;-)

void operator()(ConfigurationElement& E, ConfigurationData& D) { // The configurator() function goes to the
if(NULL!=MyTarget) { // target (if it's set) and sets it up
MyTarget->ContextInput.clear(); // for a context sensitive source header
MyTarget->DirectiveInput.clear(); // directive. Activation context as well
MyTarget->ContextInput.Directive = HeaderDirectiveContext; // as source header data.
MyTarget->ContextInput.Header = "Received:";
MyTarget->DirectiveInput.Directive = HeaderDirectiveSource;
}
}
};

class RangePoint { // Range point x:Probability, y:Confidence
public:

RangePoint() : // The simple constructor sets all to zero.
Confidence(0.0),
Probability(0.0) {}

RangePoint(double C, double P) : // This constructor sets the values.
Confidence(C),
Probability(P) {}

double Probability; // Probability and Confidence are
double Confidence; // freely accessible.

bool operator<(const RangePoint& right) const { // Comparison of RangePoint objects depends
return (Confidence < right.Confidence); // on the Confidence value. This is because
} // Confidence is used as a "key" in the set.
bool operator>(const RangePoint& right) const {
return (Confidence > right.Confidence);
}
bool operator==(const RangePoint& right) const {
return (Confidence == right.Confidence);
}
bool operator<=(const RangePoint& right) const {
return (Confidence <= right.Confidence);
}
bool operator>=(const RangePoint& right) const {
return (Confidence >= right.Confidence);
}
};

class RangeHandler : public Configurator { // The handler adds edgepoints and holds and
public: // tests the set that defines the region.

void operator()(ConfigurationElement& E, ConfigurationData& D) { // The () operator adds EdgeInput to the list.
EdgeMap.insert(EdgeInput);
}

bool On_Off; // Ranges can be turned on and off.
int Symbol; // They have a symbol assigned to them.
int Priority; // They have an evaluation priority.

RangePoint EdgeInput; // This EdgePoint is set, and added using ().
set<RangePoint> EdgeMap; // This contains the set of EdgePoints.

bool isInWhite(RangePoint& x); // True if x is inside the -P of the EdgeMap.
bool isInBlack(RangePoint& x); // True if x is inside the +P of the EdgeMap.

void reset() { EdgeMap.clear(); } // When we reset - we empty the EdgeMap.

};

class RangeInitializer : public Configurator { // The RangeInitializer Configurator.
private:

RangeHandler* MyTarget; // Needs to know it's target.

public:

RangeInitializer() : MyTarget(NULL) {} // Constructor doesn't know it's target yet.

void setTarget(RangeHandler& H) { MyTarget = &H; } // We have a way to set the target though ;-)

void operator()(ConfigurationElement& E, ConfigurationData& D) { // The configurator() function goes to the
if(NULL!=MyTarget) { // target (if it's set) and pushes the
MyTarget->reset(); // reset button.
}
}
};

class IntegerSetHandler : public Configurator { // Integer set handler for rule panics.
public:
void operator()(ConfigurationElement& E, ConfigurationData& D) { // The operator() inserts IntegerInput
IntegerSet.insert(IntegerInput); // if it's not already a member.
}

int IntegerInput; // The input port.
set<int> IntegerSet; // The set itself.

bool isListed(int x); // How to check if an int is listed.

void reset() { IntegerSet.clear(); } // How to reset (clear) the list.
};

class IntegerSetInitializer : public Configurator { // The initializer resets the set.
private:

IntegerSetHandler* MyTarget; // It needs to know which set to init.

public:

IntegerSetInitializer() : MyTarget(NULL) {} // Start off not knowing where to go.

void setTarget(IntegerSetHandler& H) { MyTarget = &H; } // Set a pointer to the handler.

void operator()(ConfigurationElement& E, ConfigurationData& D) { // The operator() does the trick.
if(NULL!=MyTarget) {
MyTarget->reset();
}
}
};

class XHDRSymbol { // XHeader associated with a Symbol
public:
int Symbol; // The integer symbol.
string Header; // The header to associate.
XHDRSymbol(int FreshSymbol, string FreshHeader) : // Creating the object requires both.
Symbol(FreshSymbol),
Header(FreshHeader) {}

bool operator<(const XHDRSymbol& right) const { // To live in a set we must have a <
return (Symbol < right.Symbol); // operator. Only the symbol matters
} // in this case.
};

class XHDRSymbolsHandler : public Configurator { // XHDRSymbol hander.
public:
set<XHDRSymbol> SymbolHeaders; // Carries a set of Symbol Headers.

void reset() { SymbolHeaders.clear(); } // Is reset by clearing the set.

string HeaderForSymbol(int S) { // Can return a Header for symbol.
string MatchingHeader = ""; // Starting with an empty string,
set<XHDRSymbol>::iterator iS = SymbolHeaders.find(XHDRSymbol(S,"")); // we look up the symbol and
if(SymbolHeaders.end() != iS) { // if we find it then we will
MatchingHeader = (*iS).Header; // return the matching header
} // string. If not then we return
return MatchingHeader; // the empty string.
} // Coded in-line on purpose.

bool OnOff; // Input OnOff value.
int Symbol; // Input Symbol value.
string Header; // Input Header value.

void operator()(ConfigurationElement& E, ConfigurationData& D) { // The operator() inserts an XHDRSymbol
if(OnOff) { // if the header entry is turned on and
SymbolHeaders.insert(XHDRSymbol(Symbol, Header)); // if it's not already a member.
}
}
};

class XHDRSymbolsInitializer : public Configurator { // The XHDRSymbols initializer.
private:

XHDRSymbolsHandler* MyTarget; // It needs to know which set to init.

public:

XHDRSymbolsInitializer() : MyTarget(NULL) {} // Start off not knowing where to go.

void setTarget(XHDRSymbolsHandler& H) { MyTarget = &H; } // Set a pointer to the handler.

void operator()(ConfigurationElement& E, ConfigurationData& D) { // The operator() does the trick.
if(NULL!=MyTarget) {
MyTarget->reset();
}
}
};

enum snfIPRange { // IP action ranges
Unknown, // Unknown - not defined.
White, // This is a good guy.
Normal, // Benefit of the doubt.
New, // It is new to us.
Caution, // This is suspicious.
Black, // This is bad.
Truncate // Don't even bother looking.
};

const int ScanLogMatches_All = 2; // Include all matches.
const int ScanLogMatches_Unique = 1; // Include 1 match of each rule.
const int ScanLogMatches_None = 0; // Include only the final result.

const int LogOutputMode_None = 0; // No output (don't process).
const int LogOutputMode_API = 1; // Make available to API.
const int LogOutputMode_File = 2; // Output to msgfile.xhdr.
const int LogOutputMode_Inject = 3; // Inject into msgfile.

class snfCFGData { // Object that stores our config data.
private:

ConfigurationElement MyCFGReader; // This is how we read our cfg data.

public:

snfCFGData(); // Constructor handled in .cpp

void initializeFromFile(const char* FileName); // Initialize from the provided file.

int Generation; // Generation tag.

// Here are the derived data elements...

string ConfigFilePath; // Configuration file path
string RuleFilePath; // Rulebase file path
string SecurityKey; // Security key for rulebase

// Here are the basic data elements...

string node_identity;
string node_licenseid;
string node_authentication;

//// paths

string paths_workspace_path;
string paths_rulebase_path;
string paths_log_path;

//// logging

bool Logs_Rotation_LocalTime_OnOff;

bool Status_SecondReport_Log_OnOff;
bool Status_SecondReport_Append_OnOff;
bool Status_MinuteReport_Log_OnOff;
bool Status_MinuteReport_Append_OnOff;
bool Status_HourReport_Log_OnOff;
bool Status_HourReport_Append_OnOff;

bool Scan_Identifier_Force_Message_Id;

int Scan_Classic_Mode;
bool Scan_Classic_Rotate;
int Scan_Classic_Matches;

int Scan_XML_Mode;
bool Scan_XML_Rotate;
int Scan_XML_Matches;
bool Scan_XML_Performance;
bool Scan_XML_GBUdb;

//// xheaders

int XHDROutput_Mode;

bool XHDRVersion_OnOff;
string XHDRVersion_Header;

bool XHDRLicense_OnOff;
string XHDRLicense_Header;

bool XHDRRulebase_OnOff;
string XHDRRulebase_Header;

bool XHDRIdentifier_OnOff;
string XHDRIdentifier_Header;

bool XHDRGBUdb_OnOff;
string XHDRGBUdb_Header;

bool XHDRResult_OnOff;
string XHDRResult_Header;

bool XHDRMatches_OnOff;
string XHDRMatches_Header;

bool XHDRBlack_OnOff;
string XHDRBlack_Header;

bool XHDRWhite_OnOff;
string XHDRWhite_Header;

bool XHDRClean_OnOff;
string XHDRClean_Header;

XHDRSymbolsHandler XHDRSymbolHeaders;
XHDRSymbolsInitializer XHDRSymbolHeadersInitializer;

//// platform

string PlatformElementContents;

//// network

int network_sync_secs;
string network_sync_host;
int network_sync_port;

bool update_script_on_off;
string update_script_call;
int update_script_guard_time;

//// gbudb

int gbudb_database_condense_minimum_seconds_between;
bool gbudb_database_condense_time_trigger_on_off;
int gbudb_database_condense_time_trigger_seconds;
bool gbudb_database_condense_posts_trigger_on_off;
int gbudb_database_condense_posts_trigger_posts;
bool gbudb_database_condense_records_trigger_on_off;
int gbudb_database_condense_records_trigger_records;
bool gbudb_database_condense_size_trigger_on_off;
int gbudb_database_condense_size_trigger_megabytes;

bool gbudb_database_checkpoint_on_off;
int gbudb_database_checkpoint_secs;

RangeHandler WhiteRangeHandler;
RangeInitializer WhiteRangeInitializer;

bool gbudb_regions_white_panic_on_off;
int gbudb_regions_white_panic_rule_range;

RangeHandler BlackRangeHandler;
RangeInitializer BlackRangeInitializer;

bool gbudb_regions_black_sample_on_off;
double gbudb_regions_black_sample_probability;
int gbudb_regions_black_sample_grab_one_in;
bool gbudb_regions_black_sample_passthrough;
int gbudb_regions_black_sample_passthrough_symbol;
int gbudb_regions_black_truncate_symbol;

bool gbudb_regions_black_truncate_on_off;
double gbudb_regions_black_truncate_probability;
int gbudb_regions_black_truncate_peek_one_in;

RangeHandler CautionRangeHandler;
RangeInitializer CautionRangeInitializer;

snfIPRange RangeEvaluation(GBUdbRecord& R); // Returns the range for a GBUdbRecord.
snfIPRange RangeEvaluation(RangePoint& p); // Returns the range for a RangePoint.

HeaderDirectiveHandler HeaderDirectivesHandler; //** Handles header directives.
HeaderDirectiveInitializer HeaderDirectivesInitializer; //** Initializes header directives set.
HeaderDirectiveSourceHeaderInitializer HDSourceHeaderInitializer; //**** For source header directives.
HeaderDirectiveDrilldownInitializer HDDrilldownInitializer; //**** For drilldown header directives.
HeaderDirectiveBypassHeaderInitializer HDBypassHeaderInitializer; //**** For bypass header directives.
HeaderDirectiveWhiteHeaderInitializer HDWhiteHeaderInitializer; //**** For white header directives.

IntegerSetHandler TrainingBypassRuleHandler; // Rules to NOT train GBUdb with source.
IntegerSetInitializer TrainingBypassRuleInitializer;

IntegerSetHandler TrainingWhiteRuleHandler; // Rules to train GBUdb as white source.
IntegerSetInitializer TrainingWhiteRuleInitializer;

bool GBUdbTrainingOn_Off; // True when GBUdb training is allowed.

IntegerSetHandler RulePanicHandler;
IntegerSetInitializer RulePanicInitializer;

bool XCI_OnOff; // XML Command Interface ON or OFF.
int XCI_Port; // XML Command Interface Port number.

bool MessageFileTypeCGP_on_off; // True for scanning communigate msgs.

};

class snfCFGmgr { // Object that manages our config data.

private:

Mutex myMutex; // Serialize control during updates.

snfCFGData A; // This is where we store one copy.
snfCFGData B; // This is where we store the other.

volatile bool AisActive; // This tells us which is active.

void swapCFGData(); // This swaps the active dataset.
snfCFGData& ActiveData(); // This returns the active dataset.
snfCFGData& InactiveData(); // This returns the inactive dataset.

string InitFileName; // Initilization parameters are reused
string InitLicenseId; // any time load() is called.
string InitAuthentication;

string ConfigurationPath; // Path to active configuration file.

public:

snfCFGmgr(); // Constructor - to get things right

void initialize( // In order to initialize we need to
const char* FileName, // collect a path to our config or .snf
const char* LicenseId, // our license id and our
const char* Authentication // authentication.
);

class LoadFailure {}; // What we throw if load fails.

void load(); // Load the configuration data.

//// Access methods for config data...

string RuleFilePath(); // Rulebase file path
string SecurityKey(); // Security key for rulebase

snfCFGData* ActiveConfiguration(); // Pointer to active configuration

};

#include "snfCFGmgr.inline.hpp"

#endif
// End include only once

+ 46
- 0
snfCFGmgr.inline.hpp Прегледај датотеку

@@ -0,0 +1,46 @@
// snfCFGmgr.inline.hpp
//
// (C) Copyright 2006 - 2009 ARM Research Labs, LLC.
//
// Inline functions/methods for snfCFGmgr module.

//// IntegerSetHandler /////////////////////////////////////////////////////////

inline bool IntegerSetHandler::isListed(int x) { // How to check if an int is listed.
return (IntegerSet.end() != IntegerSet.find(x));
}


//// snfCFGmgr /////////////////////////////////////////////////////////////////

inline snfCFGmgr::snfCFGmgr() : // We construct a CFGmgr this way...
AisActive(false), // So that A is active after 1st load()
InitFileName(""), // and all of the Init strings are
InitLicenseId(""), // empty.
InitAuthentication(""),
ConfigurationPath("") {
}

inline void snfCFGmgr::swapCFGData() { // This swaps the active dataset.
AisActive = (AisActive)?false:true;
}

inline snfCFGData& snfCFGmgr::ActiveData() { // This returns the active dataset.
return (AisActive) ? A : B;
}

inline snfCFGData& snfCFGmgr::InactiveData() { // This returns the inactive dataset.
return (AisActive) ? B : A;
}

inline string snfCFGmgr::RuleFilePath() { // Rulebase file path
return ActiveData().RuleFilePath;
}

inline string snfCFGmgr::SecurityKey() { // Security key for rulebase
return ActiveData().SecurityKey;
}

inline snfCFGData* snfCFGmgr::ActiveConfiguration() { // Pointer to active configuration
return &(ActiveData());
}

+ 232
- 0
snfGBUdbmgr.cpp Прегледај датотеку

@@ -0,0 +1,232 @@
// snfGBUdbmgr.cpp
// Copyright (C) 2006 - 2009 ARM Research Labs, LLC.
// See www.armresearch.com for the copyright terms.
//
// See snfGBUdbmgr.hpp for details.

#include "snfGBUdbmgr.hpp"
#include <unistd.h>

using namespace std;

const ThreadType snfGBUdbmgr::Type("snfGBUdbmgr"); // The thread's type.

snfGBUdbmgr::snfGBUdbmgr() : // Clean init and start thread.
Thread(snfGBUdbmgr::Type, "GBUdb Manager"), // XCI Manager type and Name.
CondenseGuardTime(600000), // 10 minute guard time by default.
TimeTriggerOnOff(true), // By default, condense once per day.
TimeTrigger(84600000),
PostsTriggerOnOff(false), // By default do not trigger on posts.
PostsTriggerValue(262144), // but if we do, use a quarter million.
RecordsTriggerOnOff(false), // By default do not trigger on records.
RecordsTriggerValue(150000), // but if we do, use 150K.
SizeTriggerOnOff(true), // By default trigger on size as a
SizeTriggerValue(150), // safety valve at 150Mbytes.
CheckpointOnOff(true), // By default save a snapshot once
CheckpointTrigger(3600000), // every hour.
MyGBUdb(NULL), // NULL our links to avoid
MyLOGmgr(NULL), // any errors when the thread starts.
TimeToStop(false) { // It is not time to stop ;-)
run(); // Start our thread.
}

snfGBUdbmgr::~snfGBUdbmgr() { // Clean shutdown & stop thread.
stop(); // Stop the thread if it's not already.
MyGBUdb = NULL; // NULL our links and false our
MyLOGmgr = NULL; // configuration for safety.
Configured = false;
}

void snfGBUdbmgr::linkGBUdb(GBUdb& G) { // Connect to our GBUdb
ScopeMutex JustMe(MyMutex); // Lock for the config change.
MyGBUdb = &G; // Set the new link.
}

void snfGBUdbmgr::linkLOGmgr(snfLOGmgr& L) { // Connect to our LOGmgr
ScopeMutex JustMe(MyMutex); // Lock for the config change.
MyLOGmgr = &L; // Set the new link.
}

void snfGBUdbmgr::configure(snfCFGData& CFGData) { // Establish or change our CFG.
ScopeMutex JustMe(MyMutex); // Only when we're not busy.

// Set up our configuration from the CFGData provided.

// Being careful not to muck with running timers unless their
// configuration values have actually changed...

const int SECsASms = 1000; // How to convert seconds to milliseconds.

if(CondenseGuardTime.getDuration() != // If the condensation guard time is
(SECsASms * CFGData.gbudb_database_condense_minimum_seconds_between)) { // new and different then set the
CondenseGuardTime.setDuration( // condensation guard timer to the
(SECsASms * CFGData.gbudb_database_condense_minimum_seconds_between) // new value.
);
}

TimeTriggerOnOff = CFGData.gbudb_database_condense_time_trigger_on_off; // Time-Trigger On?

if(TimeTrigger.getDuration() != // Time-Trigger different?
(SECsASms * CFGData.gbudb_database_condense_time_trigger_seconds)) {
TimeTrigger.setDuration( // If it is then adopt the new value.
SECsASms * CFGData.gbudb_database_condense_time_trigger_seconds
);
}

PostsTriggerOnOff = CFGData.gbudb_database_condense_posts_trigger_on_off; // Posts trigger on?
PostsTriggerValue = CFGData.gbudb_database_condense_posts_trigger_posts; // What is the posts trigger threshold?

RecordsTriggerOnOff = CFGData.gbudb_database_condense_records_trigger_on_off; // Records trigger on?
RecordsTriggerValue = CFGData.gbudb_database_condense_records_trigger_records; // What is the records trigger threshold?

SizeTriggerOnOff = CFGData.gbudb_database_condense_size_trigger_on_off; // Size trigger on?
SizeTriggerValue = CFGData.gbudb_database_condense_size_trigger_megabytes; // What is the size trigger threshold?

// Checkpoint

CheckpointOnOff = CFGData.gbudb_database_checkpoint_on_off; // Checkpoint on?

if(CheckpointTrigger.getDuration() != // If the Checkpoint time is
(SECsASms * CFGData.gbudb_database_checkpoint_secs)) { // new and different then
CheckpointTrigger.setDuration( // adopt the new value.
(SECsASms * CFGData.gbudb_database_checkpoint_secs)
);
}

// GBUdb file name

string GBUdbFileName; // Formulate the correct GBUdb file name
GBUdbFileName = CFGData.paths_workspace_path + // using the CFGData.
CFGData.node_licenseid + ".gbx";

if( // If the file name for our GBUdb
NULL == (*MyGBUdb).FileName() || // is not yet set, or
0 != GBUdbFileName.compare((*MyGBUdb).FileName()) // if it is different than the
) { // formulated file name we have then
(*MyGBUdb).FileName(GBUdbFileName.c_str()); // set the GBUdb file name.
}

// Safety check to set the Configured bit.

if(NULL != MyGBUdb && NULL != MyLOGmgr) { // If we have all of our parts
Configured = true; // then set our configured flag.
} else { // If anything is missing then
Configured = false; // make sure the flag is false.
}
}

//// The snfGBUdbmgr::load() method isn't exactly what you would expect. It
// will load the rulebase file if that file exists, but if not it does nothing.
// The intention is that a new GBUdb will alread have been created. If a
// pre-existing GBUdb is available then that one will be loaded for use. If
// it does not exist, then the new, empty GBUdb will be used instead and will
// eventually be saved for later re-use.

void snfGBUdbmgr::load() { // Load the GBUdb as configured.
ScopeMutex JustMe(MyMutex); // Just me while I do this.
if( // Perform some sanity checks.
NULL != MyGBUdb && // If we have a GBUdb and
0 < string(MyGBUdb->FileName()).length() && // it has a file name and
0 == access(MyGBUdb->FileName(),R_OK) // the file can be accessed
) { // then we can proceed:
MyGBUdb->load(); // Load the GBUdb from disk.
} // If that didn't work we'll assume
} // we're starting up a new gbx file ;-)

// DoMaintenanceWork encapsulates all of our maintenance functions. It runs
// with the mutex locked so that the configuration is stable during each pass.

void snfGBUdbmgr::DoMaintenanceWork() { // Do our watchdog work.

if(!Configured) return; // Do nothing if we're not configured.

ScopeMutex JustMe(MyMutex); // No CFG changes while I'm busy.

if(CondenseGuardTime.isExpired()) { // If we are allowed to condense
bool CondenseTriggered = false; // check to see if we should.

// time-trigger

if(
TimeTriggerOnOff && // If the time-trigger is on
TimeTrigger.isExpired() // and the time has expired
) { // then it is time to condense.
CondenseTriggered = true; // Set the condense flag and
TimeTrigger.restart(); // restart the timer.
}

// posts-trigger

if(
PostsTriggerOnOff && // If posts-trigger is on
(*MyGBUdb).Posts() >= PostsTriggerValue // and the Posts() count is high
) { // enough then trigger the
CondenseTriggered = true; // condense operation.
}

// records-trigger

if(
RecordsTriggerOnOff && // If records-trigger is on
(*MyGBUdb).IPCount() >= RecordsTriggerValue // and the number of IPs is high
) { // enough then trigger the
CondenseTriggered = true; // condense operation.
}

// size-trigger

const int MByte = 1048576; // How big is a megabyte anyway?

if(
SizeTriggerOnOff && // If size-trigger is on
((*MyGBUdb).Size()/MByte) >= SizeTriggerValue // and the size of the db is high
) { // enough then trigger
CondenseTriggered = true; // the condense operation.
}

if(CondenseTriggered) { // If we need to condense then
(*MyGBUdb).reduce(); // reduce all counts in the db
(*MyGBUdb).compress(); // and elminate any that drop to zero.
CondenseGuardTime.restart(); // That done, reset the guard timer.
(*MyLOGmgr).RecordCondenseEvent(); // Log the event.
}
}

// Time to save a snapshot?

if(
CheckpointOnOff && // If checkpoints are turned on
CheckpointTrigger.isExpired() // and it is time to create one
) {
(*MyGBUdb).saveSnapshot(); // then save a snapshot and
CheckpointTrigger.restart(); // restart the timer.
(*MyLOGmgr).RecordSaveEvent(); // Log the event.
}
}

// Stopping the thread...

void snfGBUdbmgr::stop() { // To stop the manager thread we
if(!TimeToStop) { // check to see we need to then
TimeToStop = true; // set the time to stop flag
join(); // and join the thread.
}
}

// The thread's task is to call DoMaintenanceWork() once every second.

void snfGBUdbmgr::myTask() { // This is what our thread does.
Sleeper WaitATic(1000); // We need a 1 second sleeper.
while(!TimeToStop) { // While it's not time to stop
WaitATic(); // wait a tic and then do work.
DoMaintenanceWork();
}
}

void snfGBUdbmgr::GetAlertsForSync(list<GBUdbAlert>& AlertList) { // Fill AlertList w/ outgoing alerts.
(*MyGBUdb).GetAlerts(AlertList); // For now, just pass this through.
}

void snfGBUdbmgr::ProcessReflections(list<GBUdbAlert>& Reflections) { // Integrate returning reflections.
(*MyGBUdb).ImportAlerts(Reflections); // For now, just pass this through.
}

+ 68
- 0
snfGBUdbmgr.hpp Прегледај датотеку

@@ -0,0 +1,68 @@
// snfGBUdbmgr.hpp
// Copyright (C) 2006 - 2009 ARM Research Labs, LLC.
// See www.armresearch.com for the copyright terms.
//
// This module manages the GBUdb(s) that are used in the SNF scanner engine.
// It is responsible for setting parameters, monitoring activity, and handling
// scheduled maintenance tasks.

#ifndef snfGBUdbmgr_included
#define snfGBUdbmgr_included

#include "threading.hpp"
#include "timing.hpp"
#include "snfCFGmgr.hpp"
#include "snfLOGmgr.hpp"
#include "GBUdb.hpp"

using namespace std;

class snfLOGmgr;

class snfGBUdbmgr : public Thread {
private:
Mutex MyMutex;
GBUdb* MyGBUdb;
snfLOGmgr* MyLOGmgr;
bool Configured;
volatile bool TimeToStop;

// Condensation parts

Timeout CondenseGuardTime;
bool TimeTriggerOnOff;
Timeout TimeTrigger;
bool PostsTriggerOnOff;
int PostsTriggerValue;
bool RecordsTriggerOnOff;
int RecordsTriggerValue;
bool SizeTriggerOnOff;
int SizeTriggerValue;

// Checkpoint parts

bool CheckpointOnOff;
Timeout CheckpointTrigger;

// Utility functions

void DoMaintenanceWork();

public:
snfGBUdbmgr(); // Clean init and start thread.
~snfGBUdbmgr(); // Clean shutdown & stop thread.
void linkGBUdb(GBUdb& G); // Connect to our GBUdb.
void linkLOGmgr(snfLOGmgr& L); // Connect to our LOGmgr.
void configure(snfCFGData& CFGData); // Establish or change our CFG.
void load(); // Load the GBUdb as configured.
void stop(); // Stop the thread.
void myTask(); // Establish our thread's task.

void GetAlertsForSync(list<GBUdbAlert>& AlertList); // Fill AlertList w/ outgoing alerts.
void ProcessReflections(list<GBUdbAlert>& Reflections); // Integrate returning reflections.

const static ThreadType Type; // The thread's type.

};

#endif

+ 1950
- 0
snfLOGmgr.cpp
Разлика између датотеке није приказан због своје велике величине
Прегледај датотеку


+ 670
- 0
snfLOGmgr.hpp Прегледај датотеку

@@ -0,0 +1,670 @@
// snfLOGmgr.hpp
//
// (C) Copyright 2006 - 2009 ARM Research Labs, LLC.
// See www.armresearch.com for the copyright terms.
//
// SNF Logging and Statistics engine.

////////////////////////////////////////////////////////////////////////////////
//// Begin snfLOGmgr include only once

#ifndef snfLOGmgr_included
#define snfLOGmgr_included

#include <list>
#include <set>
#include <string>
#include <vector>
#include <sstream>
#include <ctime>
#include <cstdio>

#include "timing.hpp"
#include "threading.hpp"
#include "snf_match.h"

#include "snfCFGmgr.hpp"
#include "snfNETmgr.hpp"
#include "GBUdb.hpp"

#include "histogram.hpp"

class snfNETmgr; // Declare snfNETmgr
extern const char* SNF_ENGINE_VERSION; // Declare the Engine Version Data

using namespace std;

//// DiscLogger ////////////////////////////////////////////////////////////////
// Writes log files back to Disc and double buffers data to minimize contention
// and delays. So - if it takes a few milliseconds to post the log to disc, the
// application that post()s to the log does not have to wait. Write back happens
// about once per second when enabled. Files can be appended or overwritten.

class DiscLogger : private Thread { // Double buffered lazy writer.
private:
Mutex BufferControlMutex; // Protects buffers while swapping.
Mutex FlushMutex; // Protects flush operations.
string myPath; // Where the file should be written.
string BufferA; // Log data buffer A.
string BufferB; // Log data buffer B.
bool UseANotB; // Indicates the active buffer.
bool isDirty; // True if data not yet written.
bool isBad; // True if last write failed.
bool isTimeToStop; // True when shutting down.
bool inAppendMode; // True when in append mode.
string& FlushingBuffer() { return ((UseANotB)?BufferA:BufferB); } // Returns the buffer for flushing.
string& PostingBuffer() { return ((UseANotB)?BufferB:BufferA); } // Returns the buffer for posting.
bool isEnabled; // True when this should run.
void myTask(); // Write back thread task.

public:
DiscLogger(string N = "UnNamed"); // Constructs and starts the thread.
~DiscLogger(); // Flushes and stops the thread.

string Path(const string PathName) { // Sets the file path.
ScopeMutex NewSettings(BufferControlMutex);
myPath = PathName;
return myPath;
}
string Path() { // Returns the file path.
ScopeMutex DontMove(BufferControlMutex);
return myPath;
}

bool AppendMode(const bool AppendNotOverwrite) { // Sets append mode if true.
return (inAppendMode = AppendNotOverwrite);
}
bool AppendMode() { return (inAppendMode); } // True if in append mode.

bool OverwriteMode(const bool OverwriteNotAppend) { // Sets overwrite mode if true.
return (inAppendMode = (!OverwriteNotAppend));
}
bool OverwriteMode() { return (!inAppendMode); } // True if in overwrite mode.

void post(const string Input, const string NewPath = ""); // Post Input to log, [set path].
void flush(); // Flush right now!
bool Bad() { return (isBad); } // True if last write failed.
bool Good() { return (!isBad); } // True if not Bad();
bool Dirty() { return (isDirty); } // True if data needs to be written.
bool Enabled(const bool MakeEnabled) { return (isEnabled = MakeEnabled); } // Enables writing if true.
bool Enabled() { return (isEnabled); } // True if enabled.

const static ThreadType Type; // The thread's type.

const static ThreadState DiscLogger_Flush; // Flushing state.
const static ThreadState DiscLogger_Wait; // Waiting state.

};

//// IPTestRecord //////////////////////////////////////////////////////////////
// Contains a complete analysis of a given IP. snf_RulebaseHandler provides a
// test facility that accepts and processes IPTestRecord objects. The calling
// process can then submit the IPTestRecord along with it's action to the
// snfLOGmgr for logging.

class IPTestRecord { // IP Analysis Record.
public:
IP4Address IP; // The IP to be tested.
GBUdbRecord G; // The GBUdb Record for the IP.
snfIPRange R; // The GBUdb classification (range).
int Code; // Code associated with Range.
IPTestRecord(IP4Address testIP) : IP(testIP), Code(0) {} // Construct with an IP.
};

//// snfScanData ///////////////////////////////////////////////////////////////
// Contains testing data for a message.
// It's defined here in the LOGmgr module because this is the module that must
// log and collect statistics for each scanned message. The snfScanData object
// is the standardized way each engine reports it's scan results to snfLOGmgr.

const int MaxIPsPerMessage = 50; // Maximum number of IPs to scan per message.

struct IPScanRecord { // Structure for IP scan results.
int Ordinal; // Which IP starting with zero.
unsigned int IP; // What is the IP.
GBUdbRecord GBUdbData; // GBUdb data.
};

class snfScanData { // Scan Data record for each message.

private:

IPScanRecord MyIPScanData[MaxIPsPerMessage]; // Array of IP scan results.
int MyIPCount; // Count of IP scan results.
bool DrillDownFlags[MaxIPsPerMessage]; // DrillDown flags. (Set Ignore).

int SourceIPOrdinal; // Ordinal to source IP scan data.
bool SourceIPFoundFlag; // True if source IP is set.
snfIPRange SourceIPRangeFlag; // GBUdb detection range for source IP.

IP4Address myCallerForcedSourceIP; // Caller forced source IP if not 0UL.
IP4Address myHeaderDirectiveSourceIP; // Header forced source IP if not 0UL.

public:

snfScanData(int ScanHorizon); // Constructor.
~snfScanData(); // Destructor.

// The ReadyToClear bit helps multi-phase input situations where the first
// phase might add some input data before calling the base-level scanner.
// In those cases, the pre-scan-phase will clear() the ScanData (and with
// it the ReadyToClear bit) before adding a few critical pieces of data -
// such as the scan name and the scan-start UTC for example. When the base
// level scanner is called to perform the actual scan, the clear() call
// will be inert so that any pre-set data will be preserved.

bool ReadyToClear; // True when Logging is done.
void clear(); // Clear for a new message.

class NoFreeIPScanRecords {}; // Thrown when we run out of scan records.
class OutOfBounds {}; // Thrown in IPScanData if no record at i.

int IPScanCount(); // Return the number of IPs.
IPScanRecord& newIPScanRecord(); // Get the next free IP scan record.
IPScanRecord& IPScanData(int i); // Return the IP scan record i.

// 20080221 _M We can now define in header directives patterns for Received
// headers that we should drill past if they show up as a message source
// candidate. This allows GBUdb to learn to ignore certain IPs automatically
// as they arrive either by IP stubs such as "[12.34.56." or by reverse DNS
// data such as "friendly.example.com [". When the header directives engine
// scans the headers it will call drillPastOrdinal for any Received header
// that matches a <drilldown/> directive. Later when the header analysis
// engine tries to pick the source for the message it will check each source
// candidate against the isDrillDownSource() method. If the source is to be
// ignored then it will set the ignore flag for that IP, process it as if
// it were ignored, and continue searching for the actual source.

void drillPastOrdinal(int O); // Sets Drill Down flag for IP record O.
bool isDrillDownSource(IPScanRecord& X); // True if we drill through this source.

IP4Address HeaderDirectiveSourceIP(IP4Address A); // set Header directive source IP.
IP4Address HeaderDirectiveSourceIP(); // get Header directive source IP.
IP4Address CallerForcedSourceIP(IP4Address A); // set Caller forced source IP.
IP4Address CallerForcedSourceIP(); // get Caller forced source IP.

IPScanRecord& SourceIPRecord(IPScanRecord& X); // Sets the source IP record.
IPScanRecord& SourceIPRecord(); // Gets the source IP record.
bool FoundSourceIP(); // True if the source IP record was set.
snfIPRange SourceIPRange(); // GET Source IP range.
snfIPRange SourceIPRange(snfIPRange R); // SET Source IP range for this scan.

// Direct access data...

string SourceIPEvaluation; // GBUdb Source IP evaluation.

// LogControl and General Message Flags

time_t StartOfJobUTC; // Timestamp at start of job.
int SetupTime; // Time in ms spent setting up to scan.
string ScanName; // Identifying name or message file name.
Timer ScanTime; // Scan time in ms.
int ScanDepth; // Scan Depth in evaluators.

string ClassicLogText; // Classic log entry text if any.
string XMLLogText; // XML log entry text if any.
string XHDRsText; // XHeaders text if any.
bool XHeaderInjectOn; // True if injecting headers is on.
bool XHeaderFileOn; // True if creating .xhdr file is on.

bool MessageFileTypeCGPOn; // Expect a CGP type message file.

int ScanSize; // What size is the scan request.

// GBUdb Activity Flags

bool GBUdbNormalTriggered; // True if GBUdb indeterminate IP source.

bool GBUdbWhiteTriggered; // True if GBUdb found source IP white.
bool GBUdbWhiteSymbolForced; // True if white was on and symbol was set.
bool GBUdbPatternSourceConflict; // True if pattern was found with white IP.
bool GBUdbAutoPanicTriggered; // True if autopanic was triggered.
bool GBUdbAutoPanicExecuted; // True if an autopanic was added.

bool GBUdbBlackTriggered; // True if GBUdb found source IP black.
bool GBUdbBlackSymbolForced; // True if black was on and symbol was set.
bool GBUdbTruncateTriggered; // True if Truncate was possible.
bool GBUdbPeekTriggered; // True if we could peek.
bool GBUdbSampleTriggered; // True if we could sample.
bool GBUdbTruncateExecuted; // True if we actually did truncate.
bool GBUdbPeekExecuted; // True if we peeked instead of truncating.
bool GBUdbSampleExecuted; // True if we sampled.

bool GBUdbCautionTriggered; // True if GBUdb found source IP suspicous.
bool GBUdbCautionSymbolForced; // True if caution was on and symbol was set.

// Rule panics

set<int> RulePanics; // A list of rule IDs panicked this scan.

// Pattern Engine Scan Result Data

vector<unsigned char> FilteredData; // Message data after filter chain.
unsigned long int HeaderDirectiveFlags; // Flags set by header directives.

bool PatternWasFound; // True if the pattern engine matched.
int PatternID; // The winning rule ID.
int PatternSymbol; // The associated symbol.

list<snf_match> MatchRecords; // List of match records.
list<snf_match>::iterator MatchRecordsCursor; // Localized iterator for match records.
int MatchRecordsDelivered; // Match records seen so far.

int CompositeFinalResult; // What the scan function returned.

};

//// SMHDMY counter
//
// Provides a running SUM for a series of sliding windows. The input() expects
// a new piece of data every second (or so). It is presumed that another counter
// will keep track of the actual milliseconds if accuracy is required. The object
// is all primative data parts so it is possible to store and retrieve this object
// in binary format on the same system when that's helpful.

class snf_SMHDMY_Counter { // Sliding window "live" counter.
private:

bool do_input(int X, int& SUM, int* DATA, int& ORDINAL, int SIZE); // Subroutine for assimilating input.

public:
snf_SMHDMY_Counter() { // When making a new one, reset all
memset(this, 0, sizeof(snf_SMHDMY_Counter)); // data to zero. It's all ints ;-)
}

// 60 seconds is a minute (6 x 10)

int SEC6DATA[6], SEC6SUM, SEC6ORDINAL;
int SEC10DATA[10], SEC10SUM, SEC10ORDINAL;

// 60 minutes is an hour (6 x 10)

int MIN6DATA[6], MIN6SUM, MIN6ORDINAL;
int MIN10DATA[10], MIN10SUM, MIN10ORDINAL;

// 24 hours is a day (4 x 6)

int HOUR4DATA[4], HOUR4SUM, HOUR4ORDINAL;
int HOUR6DATA[6], HOUR6SUM, HOUR6ORDINAL;

// 7 days is a week (7)

int WEEK7DATA[7], WEEK7SUM, WEEK7ORDINAL;

// 30 days is a month (5 x 6)

int MONTH5DATA[5], MONTH5SUM, MONTH5ORDINAL;
int MONTH6DATA[6], MONTH6SUM, MONTH6ORDINAL;

// 12 months (almost) is a year (3 x 4)

int YEAR3DATA[3], YEAR3SUM, YEAR3ORDINAL;
int YEAR4DATA[4], YEAR4SUM, YEAR4ORDINAL;

// 365 days is a year

int YEAR365DATA[365], YEAR365SUM, YEAR365ORDINAL;

void input(int X); // Add new data to the counter.

bool Cycled60Seconds() { return (0 == SEC6ORDINAL && 0 == SEC10ORDINAL); } // Full cycle of data for seconds.
int Sum60Seconds() { return SEC10SUM; }
int Sum66Seconds() { return (SEC6SUM + SEC10SUM); }
int SumThru1Minute() { return Sum66Seconds(); } // All samples thru one minute.

bool Cycled60Minutes() { // Full cycle of data for minutes.
return (Cycled60Seconds() && 0 == MIN6ORDINAL && 0 == MIN10ORDINAL);
}

int Sum60Minutes() { return MIN10SUM; }
int Sum66Minutes() { return (MIN6SUM + MIN10SUM); }
int SumThru1Hour() { return SumThru1Minute() + Sum66Minutes(); } // All samples thru one hour.

bool Cycled24Hours() { // Full cycle of data for hours.
return (Cycled60Minutes() && 0 == HOUR4ORDINAL && 0 == HOUR6ORDINAL);
}

int Sum24Hours() { return HOUR6SUM; }
int Sum28Hours() { return (HOUR4SUM + HOUR6SUM); }
int SumThru1Day() { return SumThru1Hour() + Sum28Hours(); } // All samples thru one day.

bool Cycled7Days() { return (Cycled24Hours() && 0 == WEEK7ORDINAL); } // Full cycle of data for week.

int Sum7Days() { return WEEK7SUM; }
int SumThru1Week() { return SumThru1Day() + Sum7Days(); } // All samples thru one week.

bool Cycled30Days() { // Full cycle of data for month.
return (Cycled24Hours() && 0 == MONTH6ORDINAL && 0 == MONTH5ORDINAL);
}

int Sum30Days() { return MONTH6SUM; }
int Sum35Days() { return (MONTH5SUM + MONTH6SUM); }
int SumThru1Month() { return SumThru1Day() + Sum35Days(); } // All samples thu one month.

bool Cycled12Months() { // Full cycle of data for 12 months.
return (Cycled30Days() && 0 == YEAR3ORDINAL && 0 == YEAR4ORDINAL);
}

int Sum450Days() { return (YEAR3SUM + YEAR4SUM); }
int SumThru1Year() { return SumThru1Month() + Sum450Days(); } // All samples thru one year.

bool Cycled365Days() { return (Cycled24Hours() && 0 == YEAR365ORDINAL); } // Full cycle of data for 365 days.

int Sum365Days() { return YEAR365SUM; }

};

//// snfLOGmgr /////////////////////////////////////////////////////////////////

// A note about the LOG manager and configuration data:
// Events that are logged with the log manager may come from scans using
// different configurations. In order to keep things as sane as possible,
// operations that are dependent on configuration information such as creating
// log file entries or producing status page data will require that an
// appropriate snfCFGData object be provided by reference and that the
// snfCFGData object be guaranteed to remain stable for the duration of the
// call. Changing snfCFGData may result in inconsistent results.
//
// This requirement is fairly easy to accomplish since posts to the LOGmgr
// will come from scanning engines that have a snfCFGPacket "grab()ed" during
// their operations, and executive requests will come from the ruelbase
// manager which can grab a snfCFGPacket for the duration of the request.

const int NumberOfResultCodes = 64;

class snfCounterPack {
public:
snfCounterPack(); // Construct new CounterPacks clean.
void reset(); // How to reset a counter pack.

Timer ActiveTime; // Measures Active (swapped in) Time.

struct {

unsigned long Scans; // Number of messages scanned.
unsigned long Spam; // Count of spam results.
unsigned long Ham; // Count of ham results.

unsigned long GBUdbNormalTriggered; // Count of indeterminate gbudb IP hits.

unsigned long GBUdbWhiteTriggered; // Count of GBUdb found source IP white.
unsigned long GBUdbWhiteSymbolForced; // Count of white was on and symbol was set.
unsigned long GBUdbPatternSourceConflict; // Count of pattern was found with white IP.
unsigned long GBUdbAutoPanicTriggered; // Count of autopanic was triggered.
unsigned long GBUdbAutoPanicExecuted; // Count of an autopanic was added.

unsigned long GBUdbBlackTriggered; // Count of GBUdb found source IP black.
unsigned long GBUdbBlackSymbolForced; // Count of black was on and symbol was set.
unsigned long GBUdbTruncateTriggered; // Count of Truncate was possible.
unsigned long GBUdbPeekTriggered; // Count of we could peek.
unsigned long GBUdbSampleTriggered; // Count of we could sample.
unsigned long GBUdbTruncateExecuted; // Count of if we actually did truncate.
unsigned long GBUdbPeekExecuted; // Count of we peeked instead of truncating.
unsigned long GBUdbSampleExecuted; // Count of we sampled.

unsigned long GBUdbCautionTriggered; // Count of GBUdb found source IP suspicous.
unsigned long GBUdbCautionSymbolForced; // Count of caution was on and symbol was set.

unsigned long PatternWasFound; // Count of scanner matches.

unsigned long RulePanicFound; // Count of rule panics.

} Events;
};

//// Interval timers precisely track the time between hack()s. There are
//// two timers inside. One is active, the other is stopped. Each time hack()
//// is called, one timer becomes active at the moment the other is stopped.

class IntervalTimer { // Precision interval timer.

private:

Timer A; // Here is one timer.
Timer B; // Here is the other timer.
bool ANotB; // True if A is the active timer.

Timer& Active(); // Selects the active timer.
Timer& Inactive(); // Selects the inactive timer.

public:

msclock hack(); // Chop off a new interval & return it.
msclock Interval(); // Return the last interval.
msclock Elapsed(); // Return the time since last hack.
};

//// PersistentState stores the counters we keep between runs.

class snfLOGPersistentState {
public:

snfLOGPersistentState() : Ready(0) {}

bool Ready; // True if we're ready to use.

void store(string& FileNameToStore); // Write the whole thing to a file.
void restore(string& FileNameToRestore); // Read the whole thing from a file.

time_t LastSyncTime; // time_t of last Sync event.
time_t LastSaveTime; // time_t of last GBUdb Save event.
time_t LastCondenseTime; // time_t of last GBUdb Condense event.

int LatestRuleID; // Latest rule ID seen so far.
int SerialNumberCounter; // Remembers the serial number.
};

class snfLOGmgr : private Thread {

private:

Mutex MyMutex; // Mutex to serialize updates & queries.
Mutex ConfigMutex; // Mutex to protect config changes.
Mutex SerialNumberMutex; // Protects the serial number.
Mutex PeekMutex; // Protects Peek Loop Counter.
Mutex SampleMutex; // Protects Sample Loop Counter.
Mutex StatusReportMutex; // Protects status report post & get.

volatile int PeekEnableCounter; // How many peek attempts recently?
volatile int SampleEnableCounter; // How many sample attempts recently?

snfCounterPack CounterPackA, CounterPackB; // Swapable counter packs.

snfCounterPack* CurrentCounters; // Current Event Counters.
snfCounterPack* ReportingCounters; // Counters being used to collect data.

snfCounterPack* getSnapshot(); // Get a copy of the current counters.

volatile bool Configured; // True if we're properly configured.
volatile bool TimeToDie; // True when the thread should stop.

void myTask(); // Thread task.

time_t StartupTime; // Time since engine started.

snfLOGPersistentState Status; // Persistent State Data.
string PersistentFileName; // File name for the State Data.

snfNETmgr* myNETmgr; // Net manager link.
GBUdb* myGBUdb; // GBUdb link.

// Configuration

string ActiveRulebaseUTC; // UTC of last successful load.
string AvailableRulebaseUTC; // UTC of rulebase available for update.
bool NewerRulebaseIsAvailable; // True if a newer rulebase is available.

string myPlatformVersion; // Version info for platform.

bool Rotate_LocalTime; // Rotate logs using localtime.

string LogsPath; // Path to logs directory.
bool ClassicLogRotate; // True = Rotate Classic Log.
bool XMLLogRotate; // True = Rotate XML Log.

// Live stats

snf_SMHDMY_Counter MessageCounter;
snf_SMHDMY_Counter HamCounter;
snf_SMHDMY_Counter SpamCounter;
snf_SMHDMY_Counter WhiteCounter;
snf_SMHDMY_Counter CautionCounter;
snf_SMHDMY_Counter BlackCounter;
snf_SMHDMY_Counter TruncateCounter;
snf_SMHDMY_Counter SampleCounter;
snf_SMHDMY_Counter AutoPanicCounter;
snf_SMHDMY_Counter RulePanicCounter;
snf_SMHDMY_Counter TimeCounter;

// Histograms

Histogram ResultsSecond;
Histogram ResultsMinute;
Histogram ResultsHour;
Histogram RulesSecond;
Histogram RulesMinute;
Histogram RulesHour;
Histogram PanicsSecond;
Histogram PanicsMinute;
Histogram PanicsHour;

// Reporting

string NodeId; // We need this for our status msgs.
void do_StatusReports(); // Update & sequence status reports.

int XML_Log_Mode; // What is the XML log mode.
int Classic_Log_Mode; // What is the Classic log mode.

// Every second we get the basics and collect data. (local only)

bool SecondReport_Log_OnOff;
bool SecondReport_Append_OnOff;
string SecondReport_Log_Filename;
string SecondReportText;
string SecondReportTimestamp;
bool do_SecondReport(); // Send our 1 second status report.

// Every minute we get hard data and event logs. (for sync)

bool MinuteReport_Log_OnOff;
bool MinuteReport_Append_OnOff;
string MinuteReport_Log_Filename;
string MinuteReportText;
string MinuteReportTimestamp;
Histogram PatternRulesHistogram;
bool do_MinuteReport(); // Send our 1 minute status report.

// Every hour we get a summary.

bool HourReport_Log_OnOff;
bool HourReport_Append_OnOff;
string HourReport_Log_Filename;
string HourReportText;
string HourReportTimestamp;
bool do_HourReport(); // Send our 1 hour status report.

void postStatusLog( // Post a Status log if required.
const string& LogData, // Here's the log entry's data.
const string& LogFileName, // Here is where it should go.
const bool LogEnabled, // This is true if we should write it.
const bool AppendNotOverwrite, // True=Append, False=Overwrite.
DiscLogger& Logger // Lazy Log Writer to use.
);

DiscLogger SecondStatusLogger; // Lazy writer for Second status.
DiscLogger MinuteStatusLogger; // Lazy writer for Minute status.
DiscLogger HourStatusLogger; // Lazy writer for Hour status.
DiscLogger XMLScanLogger; // Lazy writer for XML Scan log.
DiscLogger ClassicScanLogger; // Lazy writer for Classic Scan log.

void doXHDRs(snfCFGData& CFGData, snfScanData& ScanData); // XHDR sub routine for LogThisScan()
void doXMLLogs(snfCFGData& CFGData, snfScanData& ScanData); // XML sub routine for LogThisScan()
void doClassicLogs(snfCFGData& CFGData, snfScanData& ScanData); // Classic sub routine for LogThisScan()

void captureLTSMetrics(snfCFGData& CFGData, snfScanData& ScanData); // LogThisScan section 1, Locked.
void performLTSLogging(snfCFGData& CFGData, snfScanData& ScanData); // LogThisScan section 2, Unlocked.

public:

snfLOGmgr(); // Initialize & start the thread.
~snfLOGmgr(); // Stop the thread & clean up.

void stop(); // Stops the manager.

void linkNETmgr(snfNETmgr& N); // Link in my NETmgr
void linkGBUdb(GBUdb& G); // Link in my GBUdb

void configure(snfCFGData& CFGData); // Update the configuration.

void updateActiveUTC(string ActiveUTC); // Set active rulebase UTC.

void logThisIPTest(IPTestRecord& I, string Action); // Capthre the data from an IP test.

void logThisScan(snfCFGData& CFGData, snfScanData& ScanData); // Capture the data from this scan.

void logThisError(snfScanData& ScanData, const string ContextName, // Inject an error log entry for this
const int Code, const string Text // scan using this number & message.
);

void logThisError(string ContextName, int Code, string Text); // Log an error message.

void logThisInfo(string ContextName, int Code, string text); // Log an informational message.

string PlatformVersion(string NewPlatformVersion); // Set platform version info.
string PlatformVersion(); // Get platform version info.

string EngineVersion(); // Get engine version info.

void updateAvailableUTC(string& AvailableRulebaseTimestamp); // Stores Available, true==update ready.
string ActiveRulebaseTimestamp(); // Get active rulebase timestamp.
string AvailableRulebaseTimestamp(); // Get available rulebase timestamp.
bool isUpdateAvailable(); // True if update is available.


bool OkToPeek(int PeekOneInX); // Check to see if it's ok to peek.
bool OkToSample(int SampleOneInX); // Check to see if it's ok to sample.

time_t Timestamp(); // Get an ordinary timestamp.
string Timestamp(time_t t); // Convert time_t to a timestamp s.
string& Timestamp(string& s); // Appends a current timestamp in s.
string LocalTimestamp(time_t t); // Convert time_t to a local timestamp s.
string& LocalTimestamp(string& s); // Appends a current local timestamp in s.
unsigned int SerialNumber(); // Returns the next serial number.
string& SerialNumber(string& s); // Appends the next serial number.

void RecordSyncEvent(); // Sets timestamp of latest Sync.
int SecsSinceLastSync(); // Gets seconds since latest Sync.
void RecordSaveEvent(); // Sets timestamp of latest Save.
int SecsSinceLastSave(); // Gets seconds since latest Save.
void RecordCondenseEvent(); // Sets timestamp of latest Condense.
int SecsSinceLastCondense(); // Gets seconds since latest Condense.

// Live stats functions

double MessagesPerMinute(); // Avg Msgs/Minute.
double HamPerMinute(); // Avg Ham/Minute.
double SpamPerMinute(); // Avg Spam/Minute.
double WhitePerMinute(); // Avg White/Minute.
double CautionPerMinute(); // Avg Caution/Minute.
double BlackPerMinute(); // Avg Black/Minute.
double TruncatePerMinute(); // Avg Truncate/Minute.
double SamplePerMinute(); // Avg Sample/Minute.
int LatestRuleID(); // Returns the latest Rule ID seen.

int RunningTime(); // Seconds running since startup.

string getStatusSecondReport(); // Get latest status.second report.
string getStatusMinuteReport(); // Get latest status.minute report.
string getStatusHourReport(); // Get latest status.hour report.

const static ThreadType Type; // The thread's type.

};

#include "snfLOGmgr.inline.hpp"

#endif

//// End snfLOGmgr include only once
////////////////////////////////////////////////////////////////////////////////

+ 121
- 0
snfLOGmgr.inline.hpp Прегледај датотеку

@@ -0,0 +1,121 @@
// snfLOGmgr.inline.hpp
//
// (C) Copyright 2006 - 2009 ARM Research Labs, LLC.
// Inline methods for the snfLOGmgr

//// snfScanData ///////////////////////////////////////////////////////////////

inline int snfScanData::IPScanCount() { // Return the number of IPs.
return MyIPCount;
}

inline IPScanRecord& snfScanData::newIPScanRecord() { // Get the next free IP scan record.
if(MaxIPsPerMessage <= MyIPCount) { // Check that we have more records.
throw NoFreeIPScanRecords(); // If we do not then throw!
} // If we do have more records then
IPScanRecord& NewRecord = MyIPScanData[MyIPCount]; // Pick the next available one,
NewRecord.Ordinal = MyIPCount; // set the ordinal value,
++MyIPCount; // increase our count, and
return NewRecord; // return the one we picked.
}

inline IPScanRecord& snfScanData::IPScanData(int i) { // Return the IP scan record i.
if(MyIPCount <= i || 0 > i) { // First check that i is in bounds.
throw OutOfBounds(); // if it is not then throw!
} // If the record for [i] is available
return MyIPScanData[i]; // return it.
}

inline void snfScanData::drillPastOrdinal(int O) { // Sets Drill Down flag for IP record O.
if(0 <= O && O < MaxIPsPerMessage) { // If O is a useable Received ordinal
DrillDownFlags[O] = true; // then set the Drill Down Flag for O.
}
}

inline bool snfScanData::isDrillDownSource(IPScanRecord& X) { // True if we drill through this source.
if(
(0UL != myCallerForcedSourceIP) || // If the source IP has been forced by
(0UL != myHeaderDirectiveSourceIP) // the caller or by a header directive
) return false; // then drilldowns are disabled.
// Otherwise check for a drilldown flag.
return DrillDownFlags[X.Ordinal]; // Presuming X is valid, return the flag.
} // If X is not valid we may blow up!

inline IPScanRecord& snfScanData::SourceIPRecord(IPScanRecord& X) { // Sets the source IP record.
SourceIPOrdinal = X.Ordinal; // Here's the ordinal.
SourceIPFoundFlag = true; // Here's the truth flag.
}

inline IPScanRecord& snfScanData::SourceIPRecord() { // Gets the source IP record.
return IPScanData(SourceIPOrdinal); // Return the IP record, or throw
} // OutOfBounds.

inline bool snfScanData::FoundSourceIP() { // True if the source IP record was set.
return SourceIPFoundFlag; // Return what the flag says.
}

inline snfIPRange snfScanData::SourceIPRange(snfIPRange R) { // Establish the IP range.
return (SourceIPRangeFlag = R); // set and return the value w/ R.
}

inline snfIPRange snfScanData::SourceIPRange() { // Gets the source IP detection range.
return SourceIPRangeFlag; // Return what the flag says.
}

inline IP4Address snfScanData::HeaderDirectiveSourceIP(IP4Address A) { // set Header directive source IP.
if(0UL == myHeaderDirectiveSourceIP) myHeaderDirectiveSourceIP = A; // If this value is not set, set it.
return myHeaderDirectiveSourceIP; // Return the value.
}

inline IP4Address snfScanData::HeaderDirectiveSourceIP() { // get Header directive source IP.
return myHeaderDirectiveSourceIP; // Return the current value.
}

inline IP4Address snfScanData::CallerForcedSourceIP(IP4Address A) { // set Caller forced source IP.
if(0UL == myCallerForcedSourceIP) myCallerForcedSourceIP = A; // If this value is not set, set it.
return myCallerForcedSourceIP; // Return the value.
}

inline IP4Address snfScanData::CallerForcedSourceIP() { // get Caller forced source IP.
return myCallerForcedSourceIP; // Return the current value.
}

//// snfLOGmgr /////////////////////////////////////////////////////////////////

inline void snfLOGmgr::updateActiveUTC(string ActiveUTC) { // Update Active Rulebase UTC.
ScopeMutex Freeze(MyMutex); // Protect the strings.
ActiveRulebaseUTC = ActiveUTC; // Update the active timestamp.
NewerRulebaseIsAvailable = false; // Update availability is now unknown.
}

inline void snfLOGmgr::updateAvailableUTC(string& AvailableRulebaseTimestamp) { // Changes update avialability stamp.
ScopeMutex Freeze(MyMutex); // Protect the strings.
AvailableRulebaseUTC = AvailableRulebaseTimestamp; // Store the new timestamp.
if(0 < AvailableRulebaseUTC.compare(ActiveRulebaseUTC)) { // If the available timestamp is newer
NewerRulebaseIsAvailable = true; // than the active then set the flag.
} else { // If it is not newer then
NewerRulebaseIsAvailable = false; // reset the flag.
}
}

inline string snfLOGmgr::ActiveRulebaseTimestamp() { // Get active rulebase timestamp.
ScopeMutex Freeze(MyMutex); // Protect the string.
return ActiveRulebaseUTC; // Return it.
}

inline string snfLOGmgr::AvailableRulebaseTimestamp() { // Get available rulebase timestamp.
ScopeMutex Freeze(MyMutex); // Protect the strings.
return AvailableRulebaseUTC; // Return the available timestamp.
}

inline bool snfLOGmgr::isUpdateAvailable() { // True if update is available.
return NewerRulebaseIsAvailable; // Return the flag's value.
}

inline int snfLOGmgr::LatestRuleID() { // Query the latest rule id.
return Status.LatestRuleID; // This simple value is atomic
} // so we can read it without the mutex.

inline int snfLOGmgr::RunningTime() { // Get the time we've been alive.
return (int) difftime(Timestamp(), StartupTime);
}

+ 773
- 0
snfNETmgr.cpp Прегледај датотеку

@@ -0,0 +1,773 @@
// snfNETmgr.cpp
//
// (C) Copyright 2006 - 2009 ARM Research Labs, LLC
// See www.armresearch.com for the copyright terms.
//
// See snfNETmgr.hpp for details.

#include <sys/types.h>
#include <sys/stat.h>
#include <ctime>
#include <cstring>
#include <string>
#include <vector>
#include <fstream>
#include <sstream>
#include "snfNETmgr.hpp"
#include "snf_sync.hpp"
#include "mangler.hpp"
#include "base64codec.hpp"
// #include "tcp_watchdog.hpp" No longer using TCPWatchdog -- see below _M

using namespace std;

//// snfNETmgr /////////////////////////////////////////////////////////////////

const ThreadType snfNETmgr::Type("snfNETManager"); // The thread's type.

const ThreadState snfNETmgr::Sleeping("Sleeping"); // Taking a break.
const ThreadState snfNETmgr::SYNC_Connect("Connecting"); // Connecting to SYNC server.
const ThreadState snfNETmgr::SYNC_Read_Challenge("Reading challenge"); // Reading challenge.
const ThreadState snfNETmgr::SYNC_Compute_Response("Computing crypto"); // Computing crypto response.
const ThreadState snfNETmgr::SYNC_Send_Response("Sending crypto"); // Sending crypto response.
const ThreadState snfNETmgr::SYNC_Read_Availabilty("Reading Availability"); // Reading rulebase status.
const ThreadState snfNETmgr::SYNC_Send_GBUdb_Alerts("Sending GBUdb"); // Sending GBUdb alerts.
const ThreadState snfNETmgr::SYNC_Send_Status_Reports("Sending Status"); // Sending status reports.
const ThreadState snfNETmgr::SYNC_Send_Samples("Sending Samples"); // Sending message samples.
const ThreadState snfNETmgr::SYNC_Send_End_Of_Report("Sending End"); // Sending end of client data.
const ThreadState snfNETmgr::SYNC_Read_Server_Response("Reading Server"); // Reading server data.
const ThreadState snfNETmgr::SYNC_Close_Connection("Closing Connection"); // Closing connection.
const ThreadState snfNETmgr::SYNC_Parse_GBUdb_Reflections("Parsing GBUdb"); // Parsing GBUdb reflections.
const ThreadState snfNETmgr::SYNC_Log_Event("Logging SYNC"); // Logging SYNC event.

snfNETmgr::snfNETmgr() : // Starting up the NETmgr
Thread(snfNETmgr::Type, "NET Manager"), // Network manager and Name.
SYNCTimer(30000), // Sync every 30 secs by default.
SyncSecsOverride(-1), // Override is -1 by default.
myLOGmgr(NULL),
isTimeToStop(false),
isConfigured(false) { // On construction, NETmgr
run(); // runs it's thread.
}

snfNETmgr::~snfNETmgr() { // On descruction, NETmgr must
stop(); // Stop it's thread (if not already)
myLOGmgr = NULL; // Clear out the LOGmgr hookup
isConfigured = false; // and the configured flag.
}

void snfNETmgr::stop() { // The stop method...
if(!isTimeToStop) { // only does it's work once:
isTimeToStop = true; // tells it's thread to stop
join(); // and waits for it to shut down.
}
}

void snfNETmgr::myTask() { // Here's the thread task.
Sleeper WaitASecond(1000); // Heartbeat timer.
while(false == isTimeToStop) { // Until it's time to stop,
CurrentThreadState(Sleeping); // post our status,
WaitASecond(); // pause for a second,
if(isConfigured) { // then poll our tasks.

// Do stuff here that requires configuration data.

if(SYNCTimer.isExpired()) { sync(); SYNCTimer.restart(); } // If it's time to sync - do it :-)

}
}
}

void snfNETmgr::linkLOGmgr(snfLOGmgr& L) { // Set the LOGmgr.
myLOGmgr = &L;
}

void snfNETmgr::linkGBUdbmgr(snfGBUdbmgr& G) { // Set the GBUdbmgr.
myGBUdbmgr = &G;
}

// In theory, configure will get called each time the rulebase manager loads
// a new configuration / rulebase. The configure() method updates the bits of
// NETmgr that run background tasks. Live-Data tasks pass their grab()bed
// CFGData object in order to maintain self-consistency.

void snfNETmgr::configure(snfCFGData& CFGData) { // Update the configuration.
ScopeMutex CFGDataExchange(ConfigMutex); // Lock the config data during updates.

// Update the internal config data from CFGData while we are locked.
// Internal functions which depend on this data will lock the object,
// grab the bits they depend upon for that pass, and then unlock.

RulebaseFilePath = CFGData.RuleFilePath; // Where we can find our rulebase?
SyncHostName = CFGData.network_sync_host; // Where do we connect to sync?
SyncHostPort = CFGData.network_sync_port; // What port do we use to sync?

HandshakeFilePath = CFGData.paths_workspace_path + ".handshake"; // Where we store our handshake.
UpdateReadyFilePath = CFGData.paths_workspace_path + "UpdateReady.txt"; // Where we put update trigger files.

const int SecsAsms = 1000; // Multiplier - seconds to milliseconds.

SyncSecsConfigured = CFGData.network_sync_secs; // Capture the configured sync time.

if(0 > SyncSecsOverride) { // If the sync timer isn't in override,
if(SYNCTimer.getDuration() != (SyncSecsConfigured * SecsAsms)) { // And the config time is different than
SYNCTimer.setDuration(SyncSecsConfigured * SecsAsms); // the timer's current setting then set
} // the timer to the new value.
} // If we are in override, timer is set.

License = CFGData.node_licenseid; // Capture our node id (license id).
SecurityKey = CFGData.SecurityKey; // Capture our security key.
evolvePad(CFGData.SecurityKey); // Seed our Pad generator with it.

// Safety check before turning this on ;-)

if(
NULL != myLOGmgr &&
NULL != myGBUdbmgr
) { // If we are properly linked then
isConfigured = true; // at this point we are configured!
}
}

void snfNETmgr::sendSample( // Send a sampled message...
snfCFGData& CFGData, // Use this configuration,
snfScanData& ScanData, // Include this scan data,
const unsigned char* MessageBuffer, // This is the message itself
int MessageLength // and it is this size.
) {
string TimeStamp; (*myLOGmgr).Timestamp(TimeStamp); // Grab a timestamp.
ostringstream XML; // Make formatting easier with this.

//-- <sample...>

XML << "<sample node=\'" << CFGData.node_licenseid << "\' "
<< "time=\'" << TimeStamp << "\' "
<< "result=\'" << ScanData.CompositeFinalResult << "\'>" << endl;

//-- <ip...>

XML << "<ip range=\'";
string IPRange;
switch(ScanData.SourceIPRange()) {
case Unknown: { IPRange = "Unknown"; break; } // Unknown - not defined.
case White: { IPRange = "White"; break; } // This is a good guy.
case Normal: { IPRange = "Normal"; break; } // Benefit of the doubt.
case New: { IPRange = "New"; break; } // It is new to us.
case Caution: { IPRange = "Caution"; break; } // This is suspicious.
case Black: { IPRange = "Black"; break; } // This is bad.
case Truncate: { IPRange = "Truncate"; break; } // Don't even bother looking.
}

SocketAddress IP;
IP.setAddress(ScanData.SourceIPRecord().IP);

XML << IPRange << "\' ip=\'" << (string) IP4Address(IP.getAddress()) << "\' t=\'";

string IPType;
switch(ScanData.SourceIPRecord().GBUdbData.Flag()) {
case Good: { IPType = "Good"; break; }
case Bad: { IPType = "Bad"; break; }
case Ugly: { IPType = "Ugly"; break; }
case Ignore: { IPType = "Ignore"; break; }
}

XML << IPType << "\' b=\'" << ScanData.SourceIPRecord().GBUdbData.Bad()
<< "\' g=\'" << ScanData.SourceIPRecord().GBUdbData.Good()
<< "\'/>" << endl;

//-- <match...> as many as needed

if(0 < ScanData.MatchRecords.size()) { // If we have match records - emit them.
list<snf_match>::iterator iM; // Grab an iterator.
for( // Emit each snf_match entry.
iM = ScanData.MatchRecords.begin();
iM != ScanData.MatchRecords.end();
iM++) {
XML << "<match r=\'" << (*iM).ruleid << "\' "
<< "g=\'" << (*iM).symbol << "\' "
<< "i=\'" << (*iM).index << "\' "
<< "e=\'" << (*iM).endex << "\' "
<< "f=\'" << (*iM).flag << "\'/>";
}
}

//-- <msg...>

XML << "<msg size=\'" << ScanData.ScanSize << "'>" << endl; // Starting with the msg element.
to_base64 EncodedMessageData(
reinterpret_cast<const char*>(MessageBuffer), MessageLength); // Encode the message to base64.

const int SampleLineLength = 64; // 64 bytes per line is good.
for(int i = 0; i < MessageLength;) { // Now we break it into lines
for(int l = 0; l < SampleLineLength && i < MessageLength; l++, i++) { // that are a reasonable length.
XML << EncodedMessageData.at(i); // Emit one character at a time...
} // At the end of a reasonable
XML << endl; // length we terminate the line.
}
XML << "</msg>" << endl; // End of the <msg> element.

//-- done with the sample!

XML << "</sample>" << endl;

// Last thing we do is post the formatted string to the buffer.
const int SampleSafetyLimit = 100000; // 100 Kbyte limit on samples.
ScopeMutex DoNotDisturb(myMutex); // Don't bug me man I'm busy.
if(SampleSafetyLimit < SamplesBuffer.length()) // If the samples buffer is full
SamplesBuffer.clear(); // clear it before adding more.
SamplesBuffer.append(XML.str()); // Append the XML to the buffer.
}

string snfNETmgr::getSamples() { // Synchronized way to get Samples.
ScopeMutex DoNotDisturb(myMutex); // Lock the mutex to protect our work.
string SamplesBatch = SamplesBuffer; // Copy the samples to a new string.
SamplesBuffer.clear(); // Clear the samples buffer.
return SamplesBatch; // Return a batch of Samples.
}

void snfNETmgr::sendReport(const string& S) { // How to send a status report.
const int ReportSafetyLimit = 100000; // 100 Kbytes limit on reports.
ScopeMutex DoNotDisturb(myMutex); // Lock the mutex for a moment.
if(ReportSafetyLimit < ReportsBuffer.length()) // If the reports buffer is full
ReportsBuffer.clear(); // clear it before adding more.
ReportsBuffer.append(S); // Append the report.
}

string snfNETmgr::getReports() { // Synchronized way to get Reports.
ScopeMutex DoNotDisturb(myMutex); // Lock the mutex to protect our work.
string ReportsBatch = ReportsBuffer; // Copy the reports to a new string.
ReportsBuffer.clear(); // Clear the reports buffer.
return ReportsBatch; // Return a batch of Reports.
}

string& snfNETmgr::RulebaseUTC(string& t) { // Gets local rulebase file UTC.
struct stat RulebaseStat; // First we need a stat buffer.
if(0 != stat(RulebaseFilePath.c_str(), &RulebaseStat)) { // If we can't get the stat we
t.append("000000000000"); return t; // will return 000000000000 to
} // make sure we should get the file.
struct tm RulebaseTime; // Allocate a time structure.
RulebaseTime = *(gmtime(&RulebaseStat.st_mtime)); // Copy the file time to it as UTC.

char TimestampBfr[20]; // Timestamp buffer.

sprintf(TimestampBfr,"%04d%02d%02d%02d%02d%02d\0", // Format yyyymmddhhmmss
RulebaseTime.tm_year+1900,
RulebaseTime.tm_mon+1,
RulebaseTime.tm_mday,
RulebaseTime.tm_hour,
RulebaseTime.tm_min,
RulebaseTime.tm_sec
);

t.append(TimestampBfr); // Append the timestamp to t
return t; // and return it to the caller.
}

unsigned long snfNETmgr::ResolveHostIPFromName(const string& N) { // Host name resolution tool.
ScopeMutex OneAtATimePlease(ResolverMutex); // Resolve only one at a time.
unsigned long IP = inet_addr(N.c_str()); // See if it's an IP.
if (INADDR_NONE == IP) { // If it's not an IP resolve it.
hostent* H = gethostbyname(N.c_str()); // Resolve the host.
if (NULL == H) { // If we didn't get a resolution
return INADDR_NONE; // return no address.
} // If we did resolve the address
IP = *((unsigned long*)H->h_addr_list[0]); // get the primary entry.
}
return ntohl(IP); // Return what we got (host order)
}

// The Evolving One Time Pad engine is just slightly better than calling
// rand() with the system time as a seed. However, it does have the advantage
// that in order to guess it's initial state an attacker would need to already
// know the license id and authentication. It also has the advantage that it
// adds small amounts of entropy over time and never really forgets them. For
// example, the exact time between calls to evolvePad is dependent on how long
// it takes to sync which is dependent on how much data there is to report
// which is dependent on the number and size of messages scanned etc... and
// this is also impacted a bit by network performance issues during the sync.
// Sensitivity to this entropy has millisecond resolution. This is a cross-
// platform solution that depends only on our own code ;-)

void snfNETmgr::evolvePad(string Entropy) { // Add entropy and evolve.
ScopeMutex OneAtATimePlease(PadMutex); // Protect the one time pad.
myLOGmgr->Timestamp(Entropy); // Time matters ;-)
int x; // We want to capture this.
for(int a = 0; a < Entropy.length(); a++) { // Add the entropy to our generator.
x = PadGenerator.Encrypt(Entropy.at(a));
}
msclock rt = myLOGmgr->RunningTime(); // Get the elapsed running time so far.
unsigned char* rtb = reinterpret_cast<unsigned char*>(&rt); // Convert that long long into bytes.
for(int a = 0; a < sizeof(msclock); a++) { // Encrypt those bytes one by one
PadGenerator.Encrypt(rtb[a]); // to add more entropy.
}
}

// To get a pad of any length you like, use the OneTimePad()
// Note that we don't assign a value to x before using it! If we get lucky,
// we will get some random value from ram as additional entropy ;-) If we end
// up starting with zero, that's ok too.

PadBuffer snfNETmgr::OneTimePad(int Len) { // Get Len bytes of one time pad.
PadBuffer B; // Start with a buffer.
B.reserve(Len); // Reserve Len bytes.
unsigned char x; // Get an unsigned char, unknown value.
for(int a = 0; a < Len; a++) { // Create Len bytes of pad by evolving
B.push_back(x = PadGenerator.Encrypt(x)); // x through itself and copying the
} // data into the buffer.
return B; // Return the result.
}

// Handshake tries to return the current stored handshake. If it can't then it
// returns a new handshake based on data from the pad generator.

PadBuffer snfNETmgr::Handshake() { // What is the current handshake?
if(CurrentHandshake.size() != SNFHandshakeSize) { // If we don't have one make one!
CurrentHandshake = OneTimePad(SNFHandshakeSize); // Set up a default handshake to use
try { // if we can't remember the real one.
ifstream HSF(HandshakeFilePath.c_str(), ios::binary); // Open the handshake file.
char* bfr = reinterpret_cast<char*>(&CurrentHandshake[0]); // Manufacture a proper pointer.
HSF.read(bfr, SNFHandshakeSize); // Read the data (overwrite the HSB).
HSF.close(); // Close the file.
} catch(...) { } // Ignore any errors.
}
return CurrentHandshake; // Return the buffer.
}

PadBuffer& snfNETmgr::Handshake(PadBuffer& NewHandshake) { // Store a new handshake.
CurrentHandshake = NewHandshake; // Grab the new handshake
try { // then try to store it...
ofstream HSF(HandshakeFilePath.c_str(), ios::binary | ios::trunc); // Open the handshake file.
char* bfr = reinterpret_cast<char*>(&NewHandshake[0]); // Access the raw buffer.
HSF.write(bfr, NewHandshake.size()); // Replace the old handshake
HSF.close(); // close the file.
} catch(...) {} // Ignore errors.
return NewHandshake; // Return what we were given.
}

void snfNETmgr::postUpdateTrigger(string& updateUTC) { // Post an update trigger file.
try { // Safely post an update trigger.
ofstream HSF(UpdateReadyFilePath.c_str(), ios::binary | ios::trunc); // Open/create the trigger file.
char* bfr = reinterpret_cast<char*>(&updateUTC[0]); // Access the raw UTC buffer.
HSF.write(bfr, updateUTC.size()); // Write the update timestamp.
HSF.close(); // close the file.
} catch(...) {} // Ignore errors.
}

// Utility to read a line from a non-blocking TCPHost & check the timeout.

const int MaxReadLineLength = 1024; // How long a line can be.
string readLineTimeout(TCPHost& S, Timeout& T) { // Read a line from S until T.
Sleeper WaitForMoreData(50); // How long to wait when no data.
string LineBuffer = ""; // Buffer for the line.
while( // Keep going as long as:
false == T.isExpired() && // our timeout has not expired AND
MaxReadLineLength > LineBuffer.length() // we haven't reached our limit.
) {
char c = 0; // One byte at a time
if(1 == S.receive(&c, sizeof(c))) { // Read from the TCPHost.
LineBuffer.push_back(c); // Push the byte onto the string.
if('\n' == c) break; // If it was a newline we're done!
} else { // If we didn't get any data
WaitForMoreData(); // pause before our next run.
}
}
return LineBuffer; // Always return our buffer.
}

// Utility to write data to a non-blocking TCPHost & check the timeout.

// Some networks can only handle small packets and fragmentation can be a
// problem. Also, on Win* especially, sending small chunks is _MUCH_ more
// reliable than trying to send large buffers all at once. SO - here we break
// down our sending operations into medium sized chunks of data. The underlying
// os can reorganize these chunks as needed for the outgouing stream. If the OS
// needs us to slow down (doesn't send full chunks) then we introduce a small
// delay between chunks to give the channel more time.

const int MaxSendChunkSize = 512; // Size of one chunk in a write.
void sendDataTimeout(TCPHost& S, Timeout& T, char* Bfr, int Len) { // Send and keep track of time.
Sleeper WaitForMoreRoom(15); // Wait to send more data.
int Remaining = Len; // This is how much we have left.
while( // For as long as:
false == T.isExpired() && // We still have time left AND
0 < Remaining // We still have data left
) {
int ThisChunkSize = Remaining; // Hope to send it all in one chunk
if(MaxSendChunkSize < ThisChunkSize) ThisChunkSize = MaxSendChunkSize; // but break it down as needed.
int SentThisTime = S.transmit(Bfr, ThisChunkSize); // Send the data. How much went?
Remaining -= SentThisTime; // Calculate how much is left.
Bfr += SentThisTime; // Move our pointer (old school!)
if(ThisChunkSize > SentThisTime) WaitForMoreRoom(); // If some of this chunk didn't go
} // the pause before the next chunk.
}

void sendDataTimeout(TCPHost& S, Timeout& T, string& D) { // Send a string and keep track
sendDataTimeout(S, T, const_cast<char*>(D.c_str()), D.length()); // of time. (Polymorphism is fun)
}

void snfNETmgr::sync() { // Synchronize with central command.

// Keep these things in scope. This is how we roll.

string HostName;
int HostPort;
string Secret;
string Node;

// Grab our configuration data (marchng orders).

if(!isConfigured) return; // If we're not configured, don't!
else {
ScopeMutex GettingConfig(ConfigMutex); // Temporarily lock our config.
HostName = SyncHostName; // We will connect to this host.
HostPort = SyncHostPort; // We will connect to this port.
Secret = SecurityKey; // Get the security key.
Node = License; // Get the Node ID.
}

try { // Lots can go wrong so catch it :-)

// 20080326 _M Blocking sockets tend to lock up so I've refactored this
// code to use non-blocking sockets. This is actually part of the previous
// refactor (TCPWatchdog see below) since without the watchdog there is no
// way to get out of a blocking socket if it's dead.

// 20080325 _M TCPWatchdog is a brute. It doesn't pay attention to thread
// states. A weird bug showed up where the SYNC session seemed to hang and
// the TCPWatchdog was left alive. In the process of hunting down this bug
// I decided to remove the TCPWatchdog and put appropriate timeout checking
// in each of the comms loops instead. So, from now on:
// if(SessionDog.isExpired()) throw SyncFailed("Out Of Time");

const int SyncSessionTimeout = 2 * SYNCTimer.getDuration(); // Timeout is twice poll time.
Timeout SessionDog(SyncSessionTimeout); // Give this long for a session.

// Connect to the sync host.

CurrentThreadState(SYNC_Connect);

SocketAddress SyncHostAddress; // We'll need an address.
SyncHostAddress.setPort(HostPort); // Set the port.
SyncHostAddress.setAddress(ResolveHostIPFromName(HostName)); // Resolve and set the IP.
TCPHost SyncServer(SyncHostAddress); // Set up a host connection.
SyncServer.makeNonBlocking(); // Make the connection non-blocking.

PollTimer WaitForOpen(10, 340); // Expand 10ms to 340ms between tries.
while(!SessionDog.isExpired()) { // Wait & Watch for a good connection.
try { SyncServer.open(); } // Try opening the connection.
catch(exception& e) { // If we get an exception then
string ConnectFailMessage = "snfNETmgr::sync().open() "; // format a useful message about
ConnectFailMessage.append(e.what()); // the error and then throw
throw SyncFailed(ConnectFailMessage); // a SyncFailed exception.
}
if(SyncServer.isOpen()) break; // When successful, let's Go!
else WaitForOpen.pause(); // When not yet successful, pause
} // then try again if we have time.

if(!SyncServer.isOpen()) throw SyncFailed("Connect Timed Out"); // Check our connection.

if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.

// Start communicating.

string LineBuffer = ""; // Input Line Buffer.

// Read challenge

CurrentThreadState(SYNC_Read_Challenge);

LineBuffer = readLineTimeout(SyncServer, SessionDog); // Read the challenge line.
snf_sync Challenge(LineBuffer.c_str(), LineBuffer.length()); // Interpret what we read.
if( // Check that it's good...
Challenge.bad() || // A complete packet was read
0 >= Challenge.snf_sync_challenge_txt.length() // and the challenge is present.
) throw SyncFailed("sync() Challenge.bad()"); // If not then throw.

if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.

// Write response

CurrentThreadState(SYNC_Compute_Response);

from_base64 DecodedChallenge(Challenge.snf_sync_challenge_txt); // Decode the challenge.

//--- Prepare the secret.

MANGLER ResponseGenerator; // Grab a mangler.
for(int i = 0; i < Secret.length(); i++) // Fill it with the
ResponseGenerator.Encrypt(Secret.at(i)); // security key.

const int ManglerKeyExpansionCount = 1024; // Loop this many to randomize.
for(int x = 0, i = 0; i < ManglerKeyExpansionCount; i++) // For the required number of loops,
x = ResponseGenerator.Encrypt(x); // have Mangler chase it's tail.

//--- Absorb the challenge.

for(int i = 0; i < DecodedChallenge.size(); i++) // Evolve through the challenge.
ResponseGenerator.Encrypt(DecodedChallenge.at(i));

/*** We now have half of the key for this session ***/

//--- Encrypt our Pad.

PadBuffer NewPad = OneTimePad(); // Grab a new Pad (default size).

base64buffer ResponseBin; // With the key now established,
for(int i = 0; i < NewPad.size(); i++) // encrypt the one time pad for
ResponseBin.push_back( // transfer.
ResponseGenerator.Encrypt(NewPad[i]));

//--- Encrypt our Handshake.

PadBuffer CurrentHandshake = Handshake(); // Recall the secret handshake.
for(int i = 0; i < CurrentHandshake.size(); i++) // Encrypt that into the stream.
ResponseBin.push_back(
ResponseGenerator.Encrypt(CurrentHandshake[i]));

//--- Encrypt our Signature.

for(int x = 0, i = 0; i < SNFSignatureSize; i++) // Generate a hash by having Mangler
ResponseBin.push_back( // chase it's tail for the appropriate
x = ResponseGenerator.Encrypt(x)); // number of bytes.

//--- Encode our response as base64 and send it.

to_base64 ResponseTxt(ResponseBin); // Encode the cyphertext as base64.
string ResponseTxtString; // Create a handy string and place
ResponseTxtString.assign(ResponseTxt.begin(), ResponseTxt.end()); // the base 64 text into it.

string ResponseMsg; // Build an appropriate response
ResponseMsg.append("<snf><sync><response nodeid=\'"); // identifying this node
ResponseMsg.append(Node); // with the license id
ResponseMsg.append("\' text=\'"); // and providing an appropriately
ResponseMsg.append(ResponseTxtString); // mangled response string
ResponseMsg.append("\'/></sync></snf>\n"); // for authentication.

CurrentThreadState(SYNC_Send_Response);

sendDataTimeout(SyncServer, SessionDog, ResponseMsg); // Send the response.
if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.

// Read rulebase info or error

CurrentThreadState(SYNC_Read_Availabilty);

LineBuffer = readLineTimeout(SyncServer, SessionDog); // Read the rulebase status line.
snf_sync RulebaseResponse(LineBuffer.c_str(), LineBuffer.length()); // Interpret what we read.
if( // Check that it's good...
RulebaseResponse.bad() // A complete packet was read.
) throw SyncFailed("sync() Response.bad()"); // If not then throw.

if(0 < RulebaseResponse.snf_sync_error_message.length()) { // If the response was an error
PadBuffer NewNullHandshake; // then we will assume we are out
NewNullHandshake.assign(SNFHandshakeSize, 0); // of sync with the server so we
Handshake(NewNullHandshake); // will set the NULL handshake and
throw SyncFailed("sync() Response error message"); // fail this sync attempt.
}
if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.

// Update Handshake

for(int x = 0, i = 0; i < ManglerKeyExpansionCount; i++) // For the required number of loops,
x = ResponseGenerator.Encrypt(x); // have Mangler chase it's tail.

PadBuffer NewHandshake; // Grab a new handshake buffer.
for(int x = 0, i = 0; i < SNFHandshakeSize; i++) // Create the new handshake as a
NewHandshake.push_back( // mangler hash of the current
x = ResponseGenerator.Encrypt(x)); // key state (proper length of course).

Handshake(NewHandshake); // Save our new handshake to disk.

// Interpret Rulebase Response

myLOGmgr->updateAvailableUTC(RulebaseResponse.snf_sync_rulebase_utc); // Store the latest update UTC.
if(myLOGmgr->isUpdateAvailable()) { // If a new update is read then
postUpdateTrigger(RulebaseResponse.snf_sync_rulebase_utc); // create an update trigger file.
}

// Write our Client reports (multi-line)

CurrentThreadState(SYNC_Send_GBUdb_Alerts);

string ClientReport;
ClientReport.append("<snf><sync><client>\n");
sendDataTimeout(SyncServer, SessionDog, ClientReport);
ClientReport = "";

if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.

// Insert our GBUdb Alerts.

list<GBUdbAlert> Alerts; // Make a list of GBUdb Alerts.
myGBUdbmgr->GetAlertsForSync(Alerts); // Get them from our GBUdb.
list<GBUdbAlert>::iterator iA;
for(iA = Alerts.begin(); iA != Alerts.end(); iA++) { // Convert each alert in our list
ClientReport.append((*iA).toXML()); // into XML, follow it up
ClientReport.append("\n"); // with a new line, and send it
}
sendDataTimeout(SyncServer, SessionDog, ClientReport); // Send the Client report data.
ClientReport = ""; // Clear the buffer.
if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.

// Send Status Reports - one line at a time.

CurrentThreadState(SYNC_Send_Status_Reports);

/**
*** Instead of splitting up the reports by line we will try sending them
*** all at once using the new sendDataTimeout() function.
***
if(0 < ReportsBuffer.length()) { // If we have reports - send them.
string DataToSend = getReports(); // Grab a copy and clear the buffer.
int Cursor = 0; // We need a cursor and a length
int Length = 0; // to help us feed this line by line.
while(Cursor < DataToSend.length()) { // While we have more data...
Length = DataToSend.find_first_of('\n', Cursor); // Find the end of the first line.
if(string::npos == Length) break; // If we can't then we're done.
Length = (Length + 1) - Cursor; // If we can, convert that to length.
SyncServer.transmit( // Get and send the line using the
DataToSend.substr(Cursor, Length).c_str(), // substring function.
Length
);
Cursor = Cursor + Length; // Move the cursor for the next line.
if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.
}
}

**/

if(0 < ReportsBuffer.length()) { // If we have reports to send
string DataToSend = getReports(); // get (and clear) the reports and
sendDataTimeout(SyncServer, SessionDog, DataToSend); // send them (mindful of timeout).
}
if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.


// Send Samples - one line at a time.

CurrentThreadState(SYNC_Send_Samples);

/***

if(0 < SamplesBuffer.length()) {
string DataToSend = getSamples();
int Cursor = 0; // We need a cursor and a length
int Length = 0; // to help us feed this line by line.
while(Cursor < DataToSend.length()) { // While we have more data...
Length = DataToSend.find_first_of('\n', Cursor); // Find the end of the first line.
if(string::npos == Length) break; // If we can't then we're done.
Length = (Length + 1) - Cursor; // If we can, convert that to length.
SyncServer.transmit( // Get and send the line using the
DataToSend.substr(Cursor, Length).c_str(), // substring function.
Length
);
Cursor = Cursor + Length; // Move the cursor for the next line.
if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.
}
}

***/

if(0 < SamplesBuffer.length()) { // If we have samples to send
string DataToSend = getSamples(); // get (and clear) the samples and
sendDataTimeout(SyncServer, SessionDog, DataToSend); // send them (mindful of timeout).
}
if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.

// Terminate the client messages.

CurrentThreadState(SYNC_Send_End_Of_Report);

ClientReport.append("</client></sync></snf>\n");

sendDataTimeout(SyncServer, SessionDog, ClientReport); // Send the Client report.
if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.

// Read the Server response (multi-line)

CurrentThreadState(SYNC_Read_Server_Response);

string ServerResponse;
string ResponseLine;
while(string::npos == ResponseLine.find("</snf>\n")) { // Until we find the ending...
ResponseLine = readLineTimeout(SyncServer, SessionDog); // Read a line.
if(0 >= ResponseLine.length()) { // If we get an empty line
throw SyncFailed("sync() server response empty line"); // then it's an error.
}
ServerResponse.append(ResponseLine); // Append the line.
if(SessionDog.isExpired()) throw SyncFailed("Out Of Time"); // Check our session time.
}

snf_sync ServerMessages(
ServerResponse.c_str(), ServerResponse.length()); // Interpret what we read.
if( // Check that it's good...
ServerMessages.bad() // A complete packet was read.
) throw SyncFailed("sync() ServerMessages.bad()"); // If not then throw.

// At this point we should have a good Server response.

CurrentThreadState(SYNC_Close_Connection);

SyncServer.close(); // Close the connection.
evolvePad(Challenge.snf_sync_challenge_txt); // Use this event for more entropy.

// Import any GBUdb reflections.

CurrentThreadState(SYNC_Parse_GBUdb_Reflections);

if(0 < ServerMessages.ServerGBUAlertHandler.AlertList.size()) { // If we have received reflections
myGBUdbmgr->ProcessReflections( // then process them through our
ServerMessages.ServerGBUAlertHandler.AlertList // GBUdb.
);
}

/*** On Sync Override set sync timer to override time. If no override
**** then be sure to reset the timer to the current CFG value if it
**** is not already there. Also, if sync override is not engaged then
**** be sure the overrid flag is set to -1 indicating it is off.
**** Configure() code assumes we are handling the override sync timer
**** functions this way.
***/

// Assign the SyncSecsOverride with the value we retrieved. It will
// either be a seconds value, or a -1 indicating it was absent from
// the server message.

SyncSecsOverride = ServerMessages.snf_sync_server_resync_secs; // What was the SyncOverride?
const int SecsAsms = 1000; // Multiplier - seconds to milliseconds.

if(0 > SyncSecsOverride) { // If the sync timer IS NOT in override,
if(SYNCTimer.getDuration() != (SyncSecsConfigured * SecsAsms)) { // And the config time is different than
SYNCTimer.setDuration(SyncSecsConfigured * SecsAsms); // the timer's current setting then set
} // the timer to the new value.
} else { // If the sync timer IS in override now,
if(SYNCTimer.getDuration() != (SyncSecsOverride * SecsAsms)) { // and the override is different than the
SYNCTimer.setDuration(SyncSecsOverride * SecsAsms); // current setting then override the setting
} // with the new value.
}

// All done

CurrentThreadState(SYNC_Log_Event);

(*myLOGmgr).RecordSyncEvent(); // Finished that -- so log the event.

}
catch (exception& e) { // SYNC Failed and we know more.
const int snf_UNKNOWN_ERROR = 99; // Report an error (unknown code)
string ERROR_SYNC_FAILEDmsg = CurrentThreadState().Name; // Format a useful state message.
ERROR_SYNC_FAILEDmsg.append(": ");
ERROR_SYNC_FAILEDmsg.append(e.what());
(*myLOGmgr).logThisError( // Log the error (if possible)
"SNF_NETWORK", snf_UNKNOWN_ERROR, ERROR_SYNC_FAILEDmsg
);
}
catch (...) { // SYNC Failed if we're here.
const int snf_UNKNOWN_ERROR = 99; // Report an error (unknown code)
string ERROR_SYNC_FAILEDmsg = CurrentThreadState().Name; // Format a useful state message.
ERROR_SYNC_FAILEDmsg.append(": Panic!");
(*myLOGmgr).logThisError( // Log the error (if possible)
"SNF_NETWORK", snf_UNKNOWN_ERROR, ERROR_SYNC_FAILEDmsg
);
}
}

+ 138
- 0
snfNETmgr.hpp Прегледај датотеку

@@ -0,0 +1,138 @@
// snfNETmgr.hpp
//
// (C) Copyright 2006 - 2009 ARM Research Labs, LLC.
// See www.armresearch.com for the copyright terms.
//
// SNF network node manager.

// 20080312 _M Refactored exceptions to std::runtime_exception

#ifndef snfNETmgr_included
#define snfNETmgr_included

#include <stdexcept>
#include <vector>
#include "networking.hpp"
#include "timing.hpp"
#include "threading.hpp"
#include "snfCFGmgr.hpp"
#include "snfLOGmgr.hpp"
#include "snfGBUdbmgr.hpp"
#include "mangler.hpp"

class snfScanData; // Declare snfScanData;
class snfLOGmgr; // Declare snfLOGmgr;
class snfGBUdbmgr; // Declare snfGBUdbmgr;

using namespace std;

typedef vector<unsigned char> PadBuffer; // Holds one time pads etc.
const int SNFHandshakeSize = 8; // Size of an SNF Handshake.
const int SNFChallengeSize = 32; // Size of an SNF Challenge.
const int SNFPadSize = 16; // Size of an SNF One Time Pad.
const int SNFSignatureSize = SNFHandshakeSize; // Size of an SNF Signature.

class snfNETmgr : public Thread { // The network process manager.
private:

Mutex myMutex; // Object is busy mutex.
Mutex ResolverMutex; // Mutex to protect lookups.
Mutex ConfigMutex; // Configuration change/use mutex.
Mutex PadMutex; // Pad use/evoloution mutex.

snfLOGmgr* myLOGmgr; // Log manager to use.
snfGBUdbmgr* myGBUdbmgr; // GBUdb manager to use.

volatile bool isTimeToStop; // Time to shutdown flag.
volatile bool isConfigured; // True once ready to run.

Timeout SYNCTimer; // SYNC timer.

void evolvePad(string Entropy = ""); // Add entropy to and evolve.
MANGLER PadGenerator; // Random pad source.
PadBuffer OneTimePad(int Len = SNFPadSize); // Provides Len bytes of one time pad.

// Configuration data

string License; // Node (license) Id?
string SecurityKey; // Security key for this rulebase?
string RulebaseFilePath; // Where we can find our rulebase?
string HandshakeFilePath; // Where do we keep our handshake?
string UpdateReadyFilePath; // Where do I put update trigger files?
string SyncHostName; // Where do we connect to sync?
int SyncHostPort; // What port do we use to sync?
int SyncSecsOverride; // How may secs between sync (override)?
int SyncSecsConfigured; // How many secs to sync (nominally)?

PadBuffer Handshake(); // What is the current handshake?
PadBuffer& Handshake(PadBuffer& NewHandshake); // Store a new handshake.
PadBuffer CurrentHandshake; // Where we keep our current handshake.

void postUpdateTrigger(string& updateUTC); // Post an update trigger file.

string SamplesBuffer; // Message Samples Appended Together.
string getSamples(); // Syncrhonized way to get Samples.
string ReportsBuffer; // Status Reports Appended Together.
string getReports(); // Synchronized way to get Reports.

public:

snfNETmgr(); // Construct and start.
~snfNETmgr(); // Shutdown and destruct.

void stop(); // How to stop the thread.
void myTask(); // Define the thread task.

void linkLOGmgr(snfLOGmgr& L); // Set the LOGmgr.
void linkGBUdbmgr(snfGBUdbmgr& G); // Set the GBUdbmgr.
void configure(snfCFGData& CFGData); // Update the configuration.

class SyncFailed : public runtime_error { // Thrown if sync doesn't work.
public: SyncFailed(const string& w):runtime_error(w) {}
};

// Operations

// Why have configure AND pass CFGData in action calls?
// The configure() method updates background task configuration itmes.
// The CFGData passed on action calls informs the configuration in use with
// that particular operation -- it might be different than the current CFG
// if the CFG has been updated recently (reload).

void sendSample( // Send a sampled message...
snfCFGData& CFGData, // Use this configuration,
snfScanData& ScanData, // Include this scan data,
const unsigned char* MessageBuffer, // This is the message itself
int MessageLength // and it is this size.
);

void sendReport(const string& StatusReportText); // Send a status report...

void sync(); // Do the whole "sync" thing.

// Utility Functions

unsigned long ResolveHostIPFromName(const string& N); // Find the IP.
string& RulebaseUTC(string& t); // Gets local rulebase file UTC.

const static ThreadType Type; // The thread's type.

const static ThreadState Sleeping; // Taking a break.
const static ThreadState SYNC_Connect; // Connecting to SYNC server.
const static ThreadState SYNC_Read_Challenge; // Reading challenge.
const static ThreadState SYNC_Compute_Response; // Computing crypto response.
const static ThreadState SYNC_Send_Response; // Sending crypto response.
const static ThreadState SYNC_Read_Availabilty; // Reading rulebase status.
const static ThreadState SYNC_Send_GBUdb_Alerts; // Sending GBUdb alerts.
const static ThreadState SYNC_Send_Status_Reports; // Sending status reports.
const static ThreadState SYNC_Send_Samples; // Sending message samples.
const static ThreadState SYNC_Send_End_Of_Report; // Sending end of client data.
const static ThreadState SYNC_Read_Server_Response; // Reading server data.
const static ThreadState SYNC_Close_Connection; // Closing connection.
const static ThreadState SYNC_Parse_GBUdb_Reflections; // Parsing GBUdb reflections.
const static ThreadState SYNC_Log_Event; // Logging SYNC event.

};

#endif


+ 787
- 0
snfXCImgr.cpp Прегледај датотеку

@@ -0,0 +1,787 @@
// snfXCImgr.cpp
// Copyright (C) 2007 - 2009 ARM Research Labs, LLC
// See www.armresearch.com for the copyright terms.
//
// See snfXCImgr.hpp for details.

#include "SNFMulti.hpp"
#include "snfXCImgr.hpp"

using namespace std;

// snfXCIServerCommandHandler Virtual Base Class Default Processor.

const string XCIServerCommandDefaultResponse =
"<snf><xci><server><response message=\'Not Implemented\' code=\'-1\'/></server></xci></snf>\n";

string snfXCIServerCommandHandler::processXCIRequest(snf_xci& X) { // A Server using SNFMulti
return XCIServerCommandDefaultResponse; // can provide a useful processor.
}

// snfXCIJob encapsulates a single XCI transaction.

void snfXCIJob::clear() { // Clear the buffers.
Request.clear(); // Clear the request and
Response.clear(); // response buffers.
SetupTime = 0; // No setup time yet.
}

// snfXCIJobProcessor encapsulates the logic to respond to an XCI request.

snfXCIJobProcessor::snfXCIJobProcessor(snf_RulebaseHandler* H) : // Setup scanner.
myHome(H) { // Establish myHome from H.
myEngine = new snf_EngineHandler(); // Create an engine handler and
myEngine->open(H); // tie it in to our home rulebase.
}

snfXCIJobProcessor::~snfXCIJobProcessor() { // Tear down scanner.
if(myEngine) { // Checking first that we have one,
myEngine->close(); // close the engine and then
delete myEngine; // delete it. Set the pointer to
myEngine = 0; // NULL to enforce the point.
}
myHome = 0; // NULL out our home too.
}

//// This collection of functions handle the processing of all XCI requests.

bool snfXCIJobProcessor::isScanJob() { // True if myXCI is a scan job.
if(0 < myXCI.scanner_scan_file.length()) return true; // If we have a scan file: true!
return false; // otherwise false.
}

bool snfXCIJobProcessor::isGBUdbJob() { // True if myXCI is a GBUdb job.
if( // GBUdb jobs have either
0 < myXCI.gbudb_test_ip.length() || // an IP to test or
0 < myXCI.gbudb_set_ip.length() || // an IP to setup or
0 < myXCI.gbudb_bad_ip.length() || // a bad IP to flag or
0 < myXCI.gbudb_good_ip.length() || // a good IP to flag or
0 < myXCI.gbudb_drop_ip.length()
) return true; // If we have one of these: true!
return false; // otherwise false.
}

bool snfXCIJobProcessor::isReportJob() { // True if myXCI is a Report job.
if(0 < myXCI.report_request_status_class.length()) return true; // If we have a report status class
return false; // it's a report otherwise it's not.
}

bool snfXCIJobProcessor::isCommandJob() { // True if myXCI is a Command job.
if(0 < myXCI.xci_server_command.length()) return true; // If we have a command string: true!
return false; // otherwise false.
}

void snfXCIJobProcessor::processScan(snfXCIJob& J) { // Process a scan request.
try { // Safely perform our scan.

// Check for forced IP.

IP4Address ForcedIP = 0UL; // Don't expect a forced IP.
if(0 < myXCI.scanner_scan_ip.length()) { // If we have one then
ForcedIP = myXCI.scanner_scan_ip; // convert it from the string.
}

// Scan the message file.

int ScanResult = // Scan the file using our
myEngine->scanMessageFile( // engine. Use the file
myXCI.scanner_scan_file.c_str(), // path in the XCI request, and
J.SetupTime, // the recorded setup time. Use the
ForcedIP // forced IP if provided.
);

// Create a proper xci resposne.

ostringstream ResultString; // Use a stringstream to make it easier.
ResultString
<< "<snf><xci><scanner><result code=\'" // Emit the preamble.
<< ScanResult << "\'"; // Emit the scan result.

if( // Check for optional data requests.
false == myXCI.scanner_scan_xhdr &&
false == myXCI.scanner_scan_log
) { // If no optional data was requested
ResultString // then close the <request/> and
<< "/></scanner></xci></snf>" // emit the closing elements.
<< endl; // End of the line.

} else { // If optional data is requested:
ResultString << ">" << endl; // Complete the <result> open tag.

if(true == myXCI.scanner_scan_xhdr) { // Optionally include XHDR data.
ResultString // If xheaders are requested...
<< "<xhdr>" << myEngine->getXHDRs() // Emit the xhdr element & contents.
<< "</xhdr>" << endl; // End the xhdr and end of line.
}

if(true == myXCI.scanner_scan_log) { // Optionally include XMLLog data.
ResultString // If the log data is requested...
<< "<log>" << myEngine->getXMLLog() // Emit the log element & data.
<< "</log>" << endl; // End the log data and end of line.
}

ResultString << "</result></scanner></xci></snf>"; // Emit the closing elements.
}

J.Response = ResultString.str(); // Capture the formatted response.
}

// Decode the known exceptions

catch(snf_EngineHandler::AllocationError& e) {
J.Response = "<snf><xci><error message=\'AllocationError ";
J.Response.append(e.what());
J.Response.append("\'/></xci></snf>\n");
}

catch(snf_EngineHandler::BadMatrix& e) {
J.Response = "<snf><xci><error message=\'BadMatrix ";
J.Response.append(e.what());
J.Response.append("\'/></xci></snf>\n");
}

catch(snf_EngineHandler::Busy& e) {
J.Response = "<snf><xci><error message=\'Busy ";
J.Response.append(e.what());
J.Response.append("\'/></xci></snf>\n");
}

catch(snf_EngineHandler::FileError& e) {
J.Response = "<snf><xci><error message=\'FileError ";
J.Response.append(e.what());
J.Response.append("\'/></xci></snf>\n");
}

catch(snf_EngineHandler::MaxEvals& e) {
J.Response = "<snf><xci><error message=\'MaxEvals ";
J.Response.append(e.what());
J.Response.append("\'/></xci></snf>\n");
}

catch(snf_EngineHandler::Panic& e) {
J.Response = "<snf><xci><error message=\'Panic ";
J.Response.append(e.what());
J.Response.append("\'/></xci></snf>\n");
}

catch(snf_EngineHandler::XHDRError& e) {
J.Response = "<snf><xci><error message=\'XHDRError ";
J.Response.append(e.what());
J.Response.append("\'/></xci></snf>\n");
}

// Decode the unknown exceptions

catch(exception& e) {
J.Response = "<snf><xci><error message=\'Exception! ";
J.Response.append(e.what());
J.Response.append("\'/></xci></snf>\n");
}

catch(...) {
J.Response = "<snf><xci><error message=\'... Thrown!\'/></xci></snf>\n";
}
}

string snfXCIJobProcessor::processGBUdb() { // Process a GBUdb request.
GBUdb& myGBUdb = myHome->MyGBUdb; // Make a convenient GBUdb handle.
IP4Address IP; // We will work with an IP.
GBUdbRecord R; // We will get a record to return.

// Test an IP - return it's current data.

if(0 < myXCI.gbudb_test_ip.length()) { // IF: Test an IP
IP = myXCI.gbudb_test_ip; // Convert the IP.
} else

// Set or update an IP's data.

if(0 < myXCI.gbudb_set_ip.length()) { // IF: Set an IP's data.
IP = myXCI.gbudb_set_ip; // Convert the IP.
if( // Check for a compound update:
0 <= myXCI.gbudb_set_bad_count || // If we are changing the bad
0 <= myXCI.gbudb_set_good_count // or good count then this is
) { // a compound update (read then write).
R = myGBUdb.getRecord(IP); // Get the record (or a safe blank).
if(0 <= myXCI.gbudb_set_bad_count) // If we have a bad count to set
R.Bad(myXCI.gbudb_set_bad_count); // then set the bad count.
if(0 <= myXCI.gbudb_set_good_count) // If we have a good count to set
R.Good(myXCI.gbudb_set_good_count); // then set the good count.
if(0 < myXCI.gbudb_set_type.length()) { // If type, set type...
switch(myXCI.gbudb_set_type.at(0)) { // Determine the type based on the
case 'g': case 'G': { R.Flag(Good); break; } // first character of the name and
case 'b': case 'B': { R.Flag(Bad); break; } // set the appropriate flag.
case 'u': case 'U': { R.Flag(Ugly); break; }
case 'i': case 'I': { R.Flag(Ignore); break; }
}
}
myGBUdb.setRecord(IP, R); // Save the data.

} else // This might be a simple flag change.
if(0 < myXCI.gbudb_set_type.length()) { // If type, set type...
switch(myXCI.gbudb_set_type.at(0)) { // Determine the type based on the
case 'g': case 'G': { R = myGBUdb.setGood(IP); break; } // first character of the name and
case 'b': case 'B': { R = myGBUdb.setBad(IP); break; } // set the appropriate flag. Simple
case 'u': case 'U': { R = myGBUdb.setUgly(IP); break; } // flag changes are atomic so there is
case 'i': case 'I': { R = myGBUdb.setIgnore(IP); break; } // no need to "save" later.
}
} else { // Empty set command?
return XCIBadSetResponse; // That's bad. Use test!
}
} else

// Add a bad event to an IPs data.

if(0 < myXCI.gbudb_bad_ip.length()) { // IF: Add a bad mark for this IP
IP = myXCI.gbudb_bad_ip; // Convert the IP.
R = myGBUdb.addBad(IP); // Add a bad mark.
} else

// Add a good event to an IPs data.

if(0 < myXCI.gbudb_good_ip.length()) { // IF: Add a good mark for this IP
IP = myXCI.gbudb_good_ip; // Convert the IP.
R = myGBUdb.addGood(IP); // Add a bad mark.
} else

// Drop an IP from the database.

if(0 < myXCI.gbudb_drop_ip.length()) { // IF: Drop an IP's data.
IP = myXCI.gbudb_drop_ip; // Convert the IP.
myGBUdb.dropRecord(IP); // Forget about it.
}

// Return the final state of the IP's data.

IPTestRecord IPState(IP);
myHome->performIPTest(IPState);

ostringstream Response; // Use a stringstream for our output.
Response
<< "<snf><xci><gbudb><result " // Get the response started.
<< "ip=\'" << (string) IP // Emit the ip.
<< "\' type=\'" // Emit the type.
<< ((Good == IPState.G.Flag()) ? "good" :
((Bad == IPState.G.Flag()) ? "bad" :
((Ugly == IPState.G.Flag()) ? "ugly" :
((Ignore == IPState.G.Flag()) ? "ignore" : "error"))))
<< "\' p=\'" << IPState.G.Probability() // Emit the probability.
<< "\' c=\'" << IPState.G.Confidence() // Emit the confidence.
<< "\' b=\'" << IPState.G.Bad() // Emit the bad count.
<< "\' g=\'" << IPState.G.Good() // Emit the good count.
<< "\' range=\'"
<< ((Unknown == IPState.R) ? "unknown" :
((White == IPState.R) ? "white" :
((Normal == IPState.R) ? "normal" :
((New == IPState.R) ? "new" :
((Caution == IPState.R) ? "caution" :
((Black == IPState.R) ? "black" :
((Truncate == IPState.R) ? "truncate" : "error")))))))
<< "\' code=\'" << IPState.Code
<< "\'"
<< "/></gbudb></xci></snf>" // Finish it up.
<< endl;

return Response.str(); // Return the formatted response.
}

string snfXCIJobProcessor::processStatusReport() { // Process a report request.
string ReportToSend; // Keep this in scope.

if(0 == myXCI.report_request_status_class.find("hour")) { // Please send the hour report.
ReportToSend = myHome->MyLOGmgr.getStatusHourReport();
} else

if(0 == myXCI.report_request_status_class.find("minute")) { // Please send the minute report.
ReportToSend = myHome->MyLOGmgr.getStatusMinuteReport();
} else { // Please send the second report.
ReportToSend = myHome->MyLOGmgr.getStatusSecondReport();
}

string Response = "<snf><xci><report><response>"; // Construct the response using the
Response.append(ReportToSend); // snf/xci template and the selected
Response.append("</response></report></xci></snf>"); // status report text.

return Response; // Return the response.
}

void snfXCIJobProcessor::process(snfXCIJob& J) { // Process a Job.

// Parse the XCI request and check for an error.

myXCI.read(J.Request); // Parse the request.
if(myXCI.bad()) { // If it's bad then
J.Response = XCIErrorResponse; // respond with an error.
myHome->logThisError("XCI",-1,"Bad Request"); // Log the error.
return; // Done.
} else

// Process scan requests.

if(isScanJob()) { // If this is a Scan request
processScan(J); // respond with the result.
return; // Done.
} else

// Process gbudb requests.

if(isGBUdbJob()) { // If this is a GBUdb request
J.Response = processGBUdb(); // respond with the result.
return; // Done.
} else

// Process report requests.

if(isReportJob()) { // If this is a Status report request
J.Response = processStatusReport(); // respond with the desired report.
return; // Done.
} else

// Process server commands.

if(isCommandJob()) { // If this is a server command
J.Response = myHome->processXCIServerCommandRequest(myXCI); // pass it up and return the
return; // result. Done.
} else

// If we get to this point we don't understand the well formed request.

J.Response = XCIErrorResponse; // Don't understand?
myHome->logThisError("XCI",-2,"Unrecognized Request"); // Log the error. Respond with
return; // the standard error response.
}

// ChannelJob encapsulates a Client Job while in the queue and how long it has
// been in the system (since created).

ChannelJob::ChannelJob() : myClient(0) {} // Empty is the null client.

ChannelJob::ChannelJob(TCPClient* C) : // We are created like this.
myClient(C) { // We capture the client and
} // our timer starts automaticially.

msclock ChannelJob::Age() { // How old is this job?
return Lifetime.getElapsedTime(); // Return the elapsed time in ms.
}

TCPClient* ChannelJob::Client() { // What client does it hold?
return myClient; // Return the Client pointer.
}

// snfXCITCPChannel encapsulates the logic to queue and handle TCPClients for
// the XCI interface. The queued TCPClients each represent a single request.
// Each request is handled in turn by reading the request into an snfXCIJob,
// handing that snfXCIJob to an snfXCIJobProcessor, transmitting the result
// back to the TCPClient, closing the connection, and recycling the snfXCIJob
// object for the next round.

// snfXCITCPChannel shuts down when given a NULL TCPClient; This allows any
// jobs in queue to be handled before the thread stops. To shut down a channel
// { C->submit(NULL); C->join(); delete C; C = NULL;}

void snfXCITCPChannel::give(ChannelJob& J) { // Give a job to the queue.
ScopeMutex OneAtATimePlease(QueueMutex); // Protected with a mutex...
JobQueue.push(J); // Push the job in.
LatestSize = JobQueue.size(); // Set the blinking light.
QueueGateway.produce(); // Add the item to our gateway.
}

ChannelJob snfXCITCPChannel::take() { // Take a job from the queue.
QueueGateway.consume(); // Hold on until there is work.
ScopeMutex OneAtATimePlease(QueueMutex); // Queue Data Protected with a mutex.
ChannelJob J = JobQueue.front(); // Grab the next job in the queue.
JobQueue.pop(); // Pop that job out of the queue.
LatestSize = JobQueue.size(); // Set the blinking light.
return J; // Return the Job.
}

const int RWTimeLimit = 30000; // RWTimeLimit in ms. 30 seconds.
const string endSNF = "</snf>"; // snf_xci snf element terminator.
const int RWPollMin = 15; // Minimum time between polls.
const int RWPollMax = 75; // Maximum time between polls.
const int MaxQueueLength = 32; // Most waiting in any queue.
const int MaxTCPQueueLength = 4 * MaxQueueLength; // Most connections waiting.

void snfXCITCPChannel::readRequest(TCPClient* Client) { // Read Job.Request from Client.
Timeout ReadTimeLimit(RWTimeLimit); // We have time limits.
PollTimer ReadThrottle(RWPollMin, RWPollMax); // Throttle with a spiral delay.
while(
false == ReadTimeLimit.isExpired() && // Read stuff until we're out of time
string::npos == Job.Request.find(endSNF,0) // or we have a complete request.
) {
memset(LineBuffer, 0, sizeof(LineBuffer)); // Clear the buffer.
int bytes = Client->delimited_receive( // Read up to all but one byte
LineBuffer, sizeof(LineBuffer)-1, '\n'); // of the buffer up to the first \n.
if(0 < bytes) { // If we got some bytes
Job.Request.append(LineBuffer); // Append the data we got and
ReadThrottle.reset(); // reset the throttle.
} else { // If we didn't get any bytes then
ReadThrottle.pause(); // wait a little bit more each round.
}
} // When we're done we will return.
}

void snfXCITCPChannel::writeResponse(TCPClient* Client) { // Write Job.Request from Client.
Timeout WriteTimeLimit(RWTimeLimit); // We have a time limit.
PollTimer WriteThrottle(RWPollMin, RWPollMax); // Throttle with a spiral delay.
for( // For all the bytes in the response:
int Length = Job.Response.length(), BytesThisTime = 0, Bytes = 0; // Bytes to send, this time and sent.
Bytes < Length && // Keep going if we've got more to
false == WriteTimeLimit.isExpired(); // send and we still have time.
) {
BytesThisTime = Client->transmit( // Transmit some bytes.
&Job.Response[Bytes], Job.Response.length()-Bytes); // from where we are, what is left.
if(0 < BytesThisTime) { // If we sent bytes
Bytes += BytesThisTime; // then keep track of how many
WriteThrottle.reset(); // and reset our throttle to min.
} else { // If we didn't then pause a bit
WriteThrottle.pause(); // and let our delay grow.
}
}
}

const int XCI_Reading = 0; // XCI Mode Flags.
const int XCI_Processing = 1;
const int XCI_Writing = 2;

void snfXCITCPChannel::myTask() { // Thread's main loop.
bool WeAreAlive = true; // It's not over 'til it's over.
while(WeAreAlive) { // While we are alive:
CurrentThreadState(XCI_Wait); // Mark our state.
ChannelJob J = take(); // Pull a Client Job from the queue.
if(0 == J.Client()) { // If the job is empty we're done.
CurrentThreadState(XCI_Shutdown); // Mark our state.
WeAreAlive = false; // Turn off the alive flag and
break; // break out of the loop.

} else { // When we have a job to do:
int XCIMode;
try {
CurrentThreadState(XCI_Read);
XCIMode = XCI_Reading; // Now we are reading.
readRequest(J.Client()); // Read the client job.

CurrentThreadState(XCI_Process);
XCIMode = XCI_Processing; // Now we are processing.
Job.SetupTime = J.Age(); // Capture the read and queue time.
Processor.process(Job); // Pass the XCIJob to our processor.

CurrentThreadState(XCI_Write);
XCIMode = XCI_Writing; // Now we are writing.
writeResponse(J.Client()); // Write the response.
}

// Log any exceptions that were thrown.

catch(...) {
switch(XCIMode) {
case XCI_Reading: {
myHome->logThisError("XCI",-5,"SocketReadError");
break;
}
case XCI_Processing: {
myHome->logThisError("XCI",-6,"ProcessError");
break;
}
case XCI_Writing: {
myHome->logThisError("XCI",-7,"SocketWriteError");
break;
}
}
}
}

// At the end of every job we clean up no matter what.

if(0 != J.Client()) { // If we have a client
CurrentThreadState(XCI_Close);
J.Client()->close(); // Close the client.
delete J.Client(); // Delete the client.
}

CurrentThreadState(XCI_Clear);
Job.clear(); // Clear the job buffer.
} // Go again.
}

const ThreadType snfXCITCPChannel::Type("snfXCITCPChannel"); // The thread's type.

//// XCI Thread States

const ThreadState snfXCITCPChannel::XCI_Wait("Waiting For Take()");
const ThreadState snfXCITCPChannel::XCI_Read("Reading Request");
const ThreadState snfXCITCPChannel::XCI_Process("Processing Job");
const ThreadState snfXCITCPChannel::XCI_Write("Writing Results");
const ThreadState snfXCITCPChannel::XCI_Close("Closing Connection");
const ThreadState snfXCITCPChannel::XCI_Clear("Clearing Workspace");
const ThreadState snfXCITCPChannel::XCI_Shutdown("Shutting Down");

snfXCITCPChannel::snfXCITCPChannel(snf_RulebaseHandler* H, string N) : // Create these with a home rulebase.
Thread(snfXCITCPChannel::Type, N), // XCI TCP Channel Type & name.
myHome(H), // We know our home.
Processor(H), // Our processor has a rulebase.
LatestSize(0) { // Our job queue size is zero.
run(); // We start our thread.
}

snfXCITCPChannel::~snfXCITCPChannel() { // Destroy them very carefully.
ChannelJob EndJob; // On the way down feed ourselves
give(EndJob); // an empty job - that will end our
join(); // thread once other jobs are done.
myHome = 0; // Once joined our home is gone.
} // We're done.

int snfXCITCPChannel::Size() { // Keep track of how full they are.
return LatestSize; // Flash the blinking light.
}

void snfXCITCPChannel::submit(TCPClient* C) { // This is how we submit jobs.
ChannelJob J(C); // Create a Job for this client.
give(J); // Give it (copy) to the queue.
}

// snfXCImgr encapsulates a service engine that takes XCI requests via TCP,
// performs the required actions, and returns an XCI response. It also checks
// to see if the configuration for the XCI interface has changed.

void snfXCImgr::checkCFG() { // Checks the configuration.
CurrentThreadState(XCI_CheckConfig); // Update our status.
int NEW_XCI_Port; // Prepare for a change in port.

// Quickly as we can, grab a config packet, capture the XCI parts, and
// then let it go.

if(myHome->isReady()) { // If we know our home then
snfCFGPacket MyCFGPacket(myHome); // Grab a configuration packet.
if(MyCFGPacket.bad()) { // If it's not valid then
return; // wait (skip this) till next time.
} else { // If we've got a good config then
CFG_XCI_ON = MyCFGPacket.Config()->XCI_OnOff; // Is XCI turned on?
NEW_XCI_Port = MyCFGPacket.Config()->XCI_Port; // What port we listen to?
} // If our rulebase manager was
} else return; // not ready (skip this) for now.

if(CFG_XCI_ON) { // If the XCI is configured up:

if(NEW_XCI_Port != CFG_XCI_PORT) { // Check for a port change. If the
CFG_XCI_PORT = NEW_XCI_Port; // port changed then check for a live
if(Listener) { // listener. For a live port change
shutdown_Listener(); // shut down the current listener and
myHome->logThisInfo("XCI", 0, "ListenerDown:PortChanged"); // log the activity.
startup_Listener(); // Restart the listener with the new
myHome->logThisInfo("XCI", 0, "ListenerUp:PortChanged"); // port and log the event.
}
}

startup_XCI(); // Make sure the XCI is up.

} else { // If the XCI is configured down
shutdown_XCI(); // then make sure it is down.
}
}

snfXCITCPChannel* LowestQueue(snfXCITCPChannel* A, snfXCITCPChannel* B) { // Pick the lowest queue of two.
return ((A->Size() < B->Size()) ? A : B); // Pick one and return it.
}

snfXCITCPChannel* snfXCImgr::BestAvailableChannel() { // Selects XCI channel w/ lowest queue.
return LowestQueue( // Pick the lowest of the lowest.
LowestQueue(C0, C1),
LowestQueue(C2, C3)
);
}

void snfXCImgr::startup_Listener() { // Listener startup function.
if(0 == Listener) { // If we need a new listener:
Listener = new TCPListener(CFG_XCI_PORT); // Create a new listener.
Listener->MaxPending = MaxTCPQueueLength; // We may get a lot of hits ;-)
Listener->open(); // Open it for business.
Listener->makeNonBlocking(); // Make it non-blocking.
}
}

void snfXCImgr::shutdown_Listener() { // Listener shutdown function.
if(Listener) { // Only act if there is a listener:
Listener->close(); // The listener gets closed,
delete Listener; // then deleted, then the
Listener = 0; // Listener pointer is zeroed.
}
}

void snfXCImgr::startup_XCI() { // XCI startup function.
if(true == XCI_UP) return; // If we're already up we're done.
ScopeMutex IGotIt(ChannelMutex); // Serialize state control for safety.
if(myHome) { // We need to know our home.
if(CFG_XCI_ON) { // If XCI is configured on, startup!
C0 = new snfXCITCPChannel(myHome, "C0"); // Launch our 4 processing channels.
C1 = new snfXCITCPChannel(myHome, "C1");
C2 = new snfXCITCPChannel(myHome, "C2");
C3 = new snfXCITCPChannel(myHome, "C3");
startup_Listener(); // Start up our listener.
myHome->logThisInfo("XCI", 0, "Startup"); // Log the startup.
XCI_UP = true; // Set the flag. We're up!
}
}
}

void snfXCImgr::shutdown_XCI() { // XCI shutdown function.
if(false == XCI_UP) return; // If we're already down we're done.
ScopeMutex IGotIt(ChannelMutex); // Serialize state control for safety.
shutdown_Listener(); // If up, take down & 0 the Listener.
if(C0) { delete C0; C0 = 0; } // If up, take C0 down and NULL it.
if(C1) { delete C1; C1 = 0; } // If up, take C1 down and NULL it.
if(C2) { delete C2; C2 = 0; } // If up, take C2 down and NULL it.
if(C3) { delete C3; C3 = 0; } // If up, take C3 down and NULL it.

myHome->logThisInfo("XCI", 0, "Shutdown"); // Log the shutdown.
XCI_UP = false; // Set the flag. We're down!
}

int snfXCImgr::pollLoopCount() { // Retrieve & reset Loop Count.
int x = diagLoopCount;
diagLoopCount = 0;
return x;
}

int snfXCImgr::pollClientCount() { // Retrieve & reset Client Count.
int x = diagClientCount;
diagClientCount = 0;
return x;
}

const ThreadState snfXCImgr::XCI_InitialConfig("Initial Config"); // Getting initial configuration.
const ThreadState snfXCImgr::XCI_InitialStartup("Initial Startup"); // Performing first startup.
const ThreadState snfXCImgr::XCI_CheckConfig("Checking Config"); // Checking configuration.
const ThreadState snfXCImgr::XCI_PollingListener("Polling Listener"); // Polling Listener for jobs.
const ThreadState snfXCImgr::XCI_SubmittingJob("Submitting Job"); // Submitting a new job.
const ThreadState snfXCImgr::XCI_ListenerDown("Listener Down!"); // Listener is down.
const ThreadState snfXCImgr::XCI_Stopping("Exited Polling Loop"); // XCImgr Exiting Big Loop

void snfXCImgr::myTask() { // Main thread task.
PollTimer PollingThrottle(RWPollMin, RWPollMax); // Set up a dynamic delay.
Timeout WaitForCFG(1000); // CFG Check every second or so.

// Wait for our initial configuration.
CurrentThreadState(XCI_InitialConfig); // Update our status.

Sleeper WaitATic(1000); // One second sleeper.
while(false == CFG_XCI_ON) { // Before we've been turned on
if(TimeToStop) return; // loop unless it's time to stop.
checkCFG(); WaitForCFG.restart(); // Check our configuration
WaitATic(); // every second or so.
}

// Once our configuration is good and we are turned on we get here.

try { // Safely accept/process requests.

CurrentThreadState(XCI_InitialStartup); // Update our status.

startup_XCI(); // We're on, so turn on!

while(false == TimeToStop) { // While it is not time to stop:

// Occasionally we check to see what our configuration says. If
// the XCI is configured up, or down, or if the port changes then
// the checkCFG() function handles the changes. After that all we
// need to do here is check for a listener -- if we're up we will
// have one and if not then we won't. Without a listener we will
// slow down and keep checking for a configuration change.

if(WaitForCFG.isExpired()) { checkCFG(); WaitForCFG.restart(); } // Check the CFG periodically.

// Get a new client if we have room in the queue
// and the listener is live.

int JobsThisRound = 0; // Keep track of each batch.
if(Listener) { // Check for a good listener.
CurrentThreadState(XCI_PollingListener); // Update our status.
TCPClient* NewClient; // This will be our client.
do { // Fast as we can - grab the work:
++diagLoopCount; // Count Polling Loops.
NewClient = 0; // Clear our client pointer.
snfXCITCPChannel* Channel = BestAvailableChannel(); // Pick a channel to use then
if(MaxQueueLength > Channel->Size()) { // If we have room in the queue
NewClient = Listener->acceptClient(); // get a new client.
if(NewClient) { // If we got one:
CurrentThreadState(XCI_SubmittingJob); // Update our status.
++diagClientCount; // Count Clients.
NewClient->makeNonBlocking(); // Make the client non-blocking.
Channel->submit(NewClient); // Submit the new client.
}
}
} while( // Keep getting work in this tight
(0 != NewClient)&& // loop until we run out of work
(MaxTCPQueueLength > diagClientCount) // or we've pulled a full queue.
);
} else {
CurrentThreadState(XCI_ListenerDown); // Update our status.
} // Throttle our loop to keep it real:
if(0 == JobsThisRound) PollingThrottle.pause(); // If we got nothing then slow down.
else PollingThrottle.reset(); // If we got some, keep getting it!
} // When we're done with the big loop:
CurrentThreadState(XCI_Stopping); // Update our status.
shutdown_XCI(); // Shutdown if we're not already.
} // End of the active section.

catch(exception& e) { // If we get a knowable exception
myHome->logThisError("XCI", -9, e.what()); // then we report it in detail,
try { shutdown_XCI(); } catch(...) {} // shutdown if we're not already,
WaitATic(); // wait a tic and try again.
}

catch(...) { // If we have an unhandled exception
myHome->logThisError("XCI", -10, "Panic!"); // Panic and reset. Notify the log.
try { shutdown_XCI(); } catch(...) {} // Shutdown if we're not already.
WaitATic(); // Pause to let things settle.
} // Let's try this again.
}


const ThreadType snfXCImgr::Type("snfXCIManager"); // The thread's type.
const int XCI_Default_Port = 9001; // Listener Default port = 9001.

snfXCImgr::snfXCImgr() : // Construct with no home.
Thread(snfXCImgr::Type, "XCI Manager"), // XCI Manager type and Name.
CFG_XCI_ON(false), // Everything starts off,
CFG_XCI_PORT(XCI_Default_Port), // default, and
myHome(0), // nulled.
XCI_UP(false),
C0(0), C1(0), C2(0), C3(0),
Listener(0), diagLoopCount(0), diagClientCount(0),
TimeToStop(true) {
}

snfXCImgr::~snfXCImgr() { // Stop when we are destroyed.
stop(); // Like I said, stop().
}

void snfXCImgr::linkHome(snf_RulebaseHandler* Home) { // Link to Home and set up shop.
if(0 != Home && 0 == myHome) { // If we are getting our home
myHome = Home; // then capture it,
myHome->use(); // Update it's use count.
TimeToStop = false; // clear the time to stop bit,
run(); // run our thread.
}
}

int snfXCImgr::TotalQueue() { // Return the total work queue size.
ScopeMutex IGotIt(ChannelMutex); // Serialize state control for safety.
return (
((0 == C0) ? 0 : C0->Size()) +
((0 == C1) ? 0 : C1->Size()) +
((0 == C2) ? 0 : C2->Size()) +
((0 == C3) ? 0 : C3->Size())
);
}

void snfXCImgr::stop() { // Called to shut down.
if(false == TimeToStop) { // If we are not stopped then
TimeToStop = true; // it is time to stop.
join(); // Wait for our main thread first,
shutdown_XCI(); // then shut down the XCI.
myHome->unuse(); // Let go of the rulebase manager.
myHome = 0; // Null it out for safety.
}
}


+ 199
- 0
snfXCImgr.hpp Прегледај датотеку

@@ -0,0 +1,199 @@
// snfXCImgr.hpp
// Copyright (C) 2007 - 2009 ARM Research Labs, LLC.
// See www.armresearch.com for the copyright terms.
// XML Command Interface manager.
// This module uperates a TCP server to accept requests for scans, GBUdb
// operations, etc on behalf of an snf_EngineHandler.

#ifndef included_snfXCImgr_hpp
#define included_snfXCImgr_hpp

#include <string>
#include <queue>
#include "timing.hpp"
#include "threading.hpp"
#include "networking.hpp"
#include "snf_xci.hpp"

using namespace std;

// We need to know these exist ;-)

class snf_RulebaseHandler; // These exist.
class snf_EngineHandler; // These exist.

// Handy references and "standards"

static const string XCIErrorResponse = // Unrecognized request error.
"<snf><xci><error message=\'What was that?\'/></xci></snf>\n";

static const string XCIBadSetResponse = // Empty GBUdb set command error.
"<snf><xci><error message=\'No changes in set. Use test!\'/></xci></snf>\n";

// snfXCIServerCommandHandler Base Class for Server Command Processing.

class snfXCIServerCommandHandler { // Server Command Handler Base Class.
public:
virtual string processXCIRequest(snf_xci& X); // Server provides a useful processor.
};

// snfXCIJob encapsulates a single XCI transaction.

class snfXCIJob { // Job Packet.
public:
string Request; // XCI formatted request.
string Response; // XCI formatted response.
int SetupTime; // Setup time so far in ms.
void clear(); // Clear the buffers.
};

// snfXCIJobProcessor encapsulates the logic to respond to an XCI request.

class snfXCIJobProcessor { // XCI job processor.
private:
snf_xci myXCI; // XCI interpreter.
snf_RulebaseHandler* myHome; // Rulebase to use.
snf_EngineHandler* myEngine; // Scanner (set up internally).

bool isScanJob(); // True if myXCI is a scan job.
bool isGBUdbJob(); // True if myXCI is a GBUdb job.
bool isReportJob(); // True if myXCI is a Report job.
bool isCommandJob(); // True if myXCI is a Command job.
void processScan(snfXCIJob& J); // Process a scan request.
string processGBUdb(); // Process a GBUdb request.
string processStatusReport(); // Process a report request.

public:
snfXCIJobProcessor(snf_RulebaseHandler* H); // Setup scanner.
~snfXCIJobProcessor(); // Tear down scanner.
void process(snfXCIJob& J); // Process a Job.
};

// ChannelJob encapsulates a Client Job while in the queue and how long it has
// been in the system (since created).

class ChannelJob { // Wraper for job queue.
private:
TCPClient* myClient; // We have a TCPClient.
Timer Lifetime; // We have a timer.

public:
ChannelJob(); // We can be blank but usually
ChannelJob(TCPClient* C); // we are created like this.
msclock Age(); // How old is this job?
TCPClient* Client(); // What client does it hold?
};

// snfXCITCPChannel encapsulates the logic to queue and handle TCPClients for
// the XCI interface. The queued TCPClients each represent a single request.
// Each request is handled in turn by reading the request into an snfXCIJob,
// handing that snfXCIJob to an snfXCIJobProcessor, transmitting the result
// back to the TCPClient, closing the connection, and recycling the snfXCIJob
// object for the next round.

// snfXCITCPChannel shuts down when given a NULL TCPClient; This allows any
// jobs in queue to be handled before the thread stops. To shut down a channel
// { C->submit(NULL); C->join(); delete C; C = NULL;}

const int LineBufferSize = 256; // Line buffer size.

class snfXCITCPChannel : private Thread { // TCPClient processor & queue.
private:

snf_RulebaseHandler* myHome; // Rulebase handler.

snfXCIJobProcessor Processor; // XCI processor.
snfXCIJob Job; // XCI Job buffer.

volatile int LatestSize; // Queue Size Blinking Light.
Mutex QueueMutex; // Serializes queue changes.
ProductionGateway QueueGateway; // Keeps track of give and take.
queue<ChannelJob> JobQueue; // Queue of clients.
void give(ChannelJob& J); // give a client to the queue.
ChannelJob take(); // take a client from the queue.

char LineBuffer[LineBufferSize]; // Read Line Buffer.
void readRequest(TCPClient* Client); // Read Job.Request from Client.
void writeResponse(TCPClient* Client); // Write Job.Request from Client.

void myTask(); // Thread's main loop.

public:

snfXCITCPChannel(snf_RulebaseHandler* H, string N); // Create these with a home rulebase.
~snfXCITCPChannel(); // Destroy them very carefully.
int Size(); // Keep track of how full they are.
void submit(TCPClient* C); // This is how we submit jobs.

const static ThreadType Type; // The thread's type.

const static ThreadState XCI_Wait;
const static ThreadState XCI_Read;
const static ThreadState XCI_Process;
const static ThreadState XCI_Write;
const static ThreadState XCI_Close;
const static ThreadState XCI_Clear;
const static ThreadState XCI_Shutdown;

//const static ThreadState ThreadInitialized; // Constructed successfully.
};

// snfXCImgr encapsulates a service engine that takes XCI requests via TCP,
// performs the required actions, and returns an XCI response. It also checks
// to see if the configuration for the XCI interface has changed.

class snfXCImgr : private Thread { // XCI manager.
private:

Mutex ChannelMutex; // Safety Channel Up/Down events.

bool CFG_XCI_ON; // Is XCI turned on?
int CFG_XCI_PORT; // What port we listen to?
void checkCFG(); // Checks the configuration.

snf_RulebaseHandler* myHome; // Rulebase handler to service.
snfXCITCPChannel* C0; // XCI channel 0
snfXCITCPChannel* C1; // XCI channel 1
snfXCITCPChannel* C2; // XCI channel 2
snfXCITCPChannel* C3; // XCI channel 3
snfXCITCPChannel* BestAvailableChannel(); // Selects XCI channel w/ lowest queue.

TCPListener* Listener; // XCI Listener.

bool XCI_UP; // True if XCI is alive.
void startup_Listener(); // Listener startup function.
void shutdown_Listener(); // Listener shutdown function.
void startup_XCI(); // XCI startup function.
void shutdown_XCI(); // XCI shutdown function.

bool TimeToStop; // True when shutting down.
void myTask(); // Main thread task.

volatile int diagLoopCount;
volatile int diagClientCount;

public:

snfXCImgr(); // Construct with no home.
~snfXCImgr(); // Destroy to shut down.

void linkHome(snf_RulebaseHandler* Home); // Link to Home and set up shop.
int TotalQueue(); // Return the total work queue size.
void stop(); // Called to shut down.

int pollLoopCount(); // Get diagnostic loop count.
int pollClientCount(); // Get diagnostic client count.

const static ThreadType Type; // The thread's type.

const static ThreadState XCI_InitialConfig; // Getting initial configuration.
const static ThreadState XCI_InitialStartup; // Performing first startup.
const static ThreadState XCI_CheckConfig; // Checking configuration.
const static ThreadState XCI_PollingListener; // Polling Listener for jobs.
const static ThreadState XCI_SubmittingJob; // Submitting a new job.
const static ThreadState XCI_ListenerDown; // Listener is down.
const static ThreadState XCI_Stopping; // XCImgr Exiting Big Loop

};

#endif

+ 233
- 0
snf_HeaderFinder.cpp Прегледај датотеку

@@ -0,0 +1,233 @@
// snf_HeaderFinder.cpp
// Copyright (C) 2007 - 2009 ARM Research Labs, LLC.
// See www.armresearch.com for the copyright terms.
//
// See snf_HeaderFinder.hpp for details

#include "snf_HeaderFinder.hpp"

#include "snfLOGmgr.hpp"
#include "snfCFGmgr.hpp"

const int NumberOfByteValues = 256; // Number of possible byte values.

HeaderFinder::HeaderFinder( // To construct one of these:
snfScanData* EngineScanData, // -- Scanner control data ptr.
const HeaderDirectiveSet& Patterns, // -- this is the set of patterns.
const unsigned char* MessageBuffer, // -- this is the message buffer.
const int MessageLength // -- this is the length of the buffer.
) :
ScanData(EngineScanData), // Grab the scan control block.
HeaderDirectives(Patterns), // Grab the Directives and
Bfr(MessageBuffer), // the message buffer.
Len(MessageLength),
ImpossibleBytes(NumberOfByteValues, false), // Clear the impossible bytes cache.
Directives(0) { // Zero the composite result.
UnfoldHeaders(); // Unfold the headers.
}

void HeaderFinder::CheckContent(string& Header, const HeaderFinderPattern& P) { // Check for a match in the header.
if(string::npos != Header.find(P.Contains, P.Header.length())) { // If we find the required contents:

/*** if/else laddar - too complex for switch ***/

if(
HeaderDirectiveBypass == P.Directive || // If this is a bypass directive or
HeaderDirectiveWhite == P.Directive // a white header directive:
) {
Directives |= P.Directive; // Add the flags to our output.
} else

if(HeaderDirectiveDrillDown == P.Directive) { // If this is a DrillDown rule
ScanData->drillPastOrdinal(P.Ordinal); // mark the IP DrillDown flag.
Directives |= P.Directive; // Add the flags to our output.
} else

if(HeaderDirectiveContext == P.Directive) { // If this is a context activation
ActivatedContexts.insert(P.Context); // header then activate the context.
Directives |= P.Directive; // Add the flags to our output.
} else

if( // Are we forcing the message source?
HeaderDirectiveSource == P.Directive && // If we matched a source directive and
false == ScanData->FoundSourceIP() && // the source is not already set and
ActivatedContexts.end() != ActivatedContexts.find(P.Context) // and the source context is active then
) { // we set the source from this header.
// Extract the IP from the header.

const string digits = "0123456789"; // These are valid digits.
int IPStart = Header.find_first_of(digits, P.Header.length()); // Find the first digit in the header.
if(string::npos == IPStart) return; // If we don't find it we're done.
const string ipchars = ".0123456789"; // These are valid IP characters.
int IPEnd = Header.find_first_not_of(ipchars, IPStart); // Find the end of the IP.
if(string::npos == IPEnd) IPEnd = Header.length(); // Correct for end of string cases.
ScanData->HeaderDirectiveSourceIP( // Extract the IP from the header and
Header.substr(IPStart, (IPEnd - IPStart)) // expose it to the calling scanner.
);
Directives |= P.Directive; // Add the flags to our output.
}
}
}

void HeaderFinder::MatchHeaders(string& Header) { // Check that the header matches.
if(0 >= Header.length()) return; // If there's nothing to look at, done!
HeaderFinderPattern Key; // We will need a handy key.
Key.Header.push_back(Header.at(0)); // Set up a minimal header string.
HeaderDirectiveIterator iK = HeaderDirectives.lower_bound(Key); // Locate the lower bound.

// At this point we have found a reasonable starting place for the
// header directives that might match this header. We will scan through
// them looking for a match. Since all matches should be grouped together
// in the set we will set a flag so that on the first non-match after that
// we can stop looking.

int CurrentOrdinal = 0; // Keep the current ordinal in scope.
bool FoundFirstMatch = false; // Have we found our first match?
for(;iK != HeaderDirectives.end();iK++) { // Scan through the directives.
const HeaderFinderPattern& P = (*iK); // Make a handy handle.
if(0 == Header.compare(0, P.Header.length(), P.Header)) { // Check for a matching header.
if(false == FoundFirstMatch) { // If this is our first match
FoundFirstMatch = true; // then set our first match flag
CurrentOrdinal = Ordinals[P.Header]; // and get the Ordinal. Then increment
Ordinals[P.Header] = CurrentOrdinal + 1; // the Ordinal for next time.
}
if(CurrentOrdinal == P.Ordinal) { // If the Ordinal matches our Directive
CheckContent(Header, P); // then check the content of the header.
} else
if(CurrentOrdinal < P.Ordinal) { // If we're into Directives bigger than
return; // our Ordinal then we're done.
}
} else { // If the header doesn't match and we
if(FoundFirstMatch) return; // were matching before then we're done.
if(Header.at(0)!=P.Header.at(0)) return; // If first bytes don't match, so done!
}
} // Move on to the next directive.
}

bool HeaderFinder::ByteIsImpossible(unsigned char b) { // Is b not first byte of any pattern?
if(ImpossibleBytes[b]) return true; // Don't look if we already know.
HeaderFinderPattern Key; // We will need a handy key.
Key.Header.push_back(b); // Set up a minimal header string.
HeaderDirectiveIterator iK = HeaderDirectives.lower_bound(Key); // Locate the lower bound.
if(iK == HeaderDirectives.end()) return (ImpossibleBytes[b] = true); // If we find nothing or the first byte
if((*iK).Header.at(0) != b) return (ImpossibleBytes[b] = true); // byte doesn't match it's impossible.
return false; // Otherwise we might find it ;-)
}

bool TrimToNextHeader(int& Pos, const unsigned char* Bfr, const int Len) { // Move Pos & check for EOH.
for(;(Pos < (Len-2));Pos++) { // Scan through the Bfr (stay in range).
switch(Bfr[Pos]) { // React to the byte at hand:
case '\t':
case '\r':
case ' ': { // Ordinary spaces and \r we skip.
break;
}
case '\n': { // On Newlines we check to see if
if( // this is the end of the headers.
('\r' == Bfr[Pos+1] && '\n' == Bfr[Pos+2]) || // Either \n\r\n or
('\n' == Bfr[Pos+1] ) // \n\n means EOH.
) {
return false; // If EOH, no more headers, send false.
}
break; // If not EOH then keep going.
}
default: { // Any other byte and we are done.
return true; // We have another header, send true.
}
}
} // If we run out of bytes then we
return false; // are also out of headers, send false.
}

void eatThisHeader(int& Pos, const unsigned char* Bfr, const int Len) { // Eat up to the next header.
for(;(Pos < (Len-1));Pos++) { // Scan through this header.
if('\n' == Bfr[Pos]) { // When we get to a new line check
if(' ' == Bfr[Pos+1] || '\t' == Bfr[Pos+1]) continue; // for and skip any folding. Anything
return; // other than folding and we're done.
}
}
}

void eatOrdinarySpace(int& Pos, const unsigned char* Bfr, const int Len) { // Eat all spaces (dedup, unfold, etc)
for(;Pos < Len;Pos++) { // Scan through the buffer.
switch(Bfr[Pos]) { // React to each byte.
case ' ': // Simply skip all ordinary spaces
case '\t': { // or tabs.
break;
}
default: { // At the first other byte
return; // we are done.
}
}
}
}

void captureThisHeader( // Capture the header and move pos.
string& Output, // Here is the output string.
int& Pos, // Here is the current position.
const unsigned char* Bfr, // Here is the buffer pointer.
const int Len // Here is the length of the buffer.
) {
Output.clear(); // Clear the output.
for(;(Pos < (Len-1)); Pos++) { // Scan through the header.
switch(Bfr[Pos]) { // React to each byte.
case '\r': { // If we find a <cr> ignore it.
break;
}
case '\n': { // If we find a <nl> check for folding.
if(' ' == Bfr[Pos+1] || '\t' == Bfr[Pos+1]) { // If we find folding then
++Pos; // move to the space
eatOrdinarySpace(Pos, Bfr, Len); // and gobble it up.
Output.push_back(' '); // output a single ordinary space
--Pos; // and drop back one for the loop's ++.
} else { // If the <nl> wasn't part of a fold
return; // then we are done with this header.
}
break; // Skip the rest of the switch.
}
case '\t': // When we come across a tab or
case ' ': { // a space then we will eat them
eatOrdinarySpace(Pos, Bfr, Len); // and any extras so they are converted
Output.push_back(' '); // into a single ordinary space.
--Pos; // Drop back one for the loop's ++.
break;
}
default: { // For all ordinary bytes we simply
Output.push_back(Bfr[Pos]); // add the byte to the string.
break;
}
}
}
}

void HeaderFinder::UnfoldHeaders() { // Unfold and check headers.
if(0 >= HeaderDirectives.size()) return; // Skip this if we have no patterns.
if(0 >= Len) return; // Skip if we have no message.
string TestHeader; // The header under test.

int Position = 0; // Position in Bfr.
for(;;) { // Scan through all of the headers.

// Skip any leading or leftover whitespace. Be sure to exit when we
// reach a blank new line. The capture routine later on will not eat
// the white space - that way we can check for the EOH in this one spot.

if(false == TrimToNextHeader(Position, Bfr, Len)) return; // If no more headers then we're done.

// Skip Impossible Headers -- no such first character.

if(ByteIsImpossible(Bfr[Position])) { // If we have no patterns for this
eatThisHeader(Position, Bfr, Len); // header then skip it and continue on
continue; // to the next one.
}

// Capture and unfold the header to test.

captureThisHeader(TestHeader, Position, Bfr, Len); // Unfold the header into TestHeader.

// Test the header.

MatchHeaders(TestHeader); // Match and activate header directives.
}
}


+ 96
- 0
snf_HeaderFinder.hpp Прегледај датотеку

@@ -0,0 +1,96 @@
// snf_HeaderFinder.hpp
// Copyright (C) 2007 - 2009 ARM Research Labs, LLC.
// See www.armresearch.com for the copyright terms.
//
// SNF Header Finder used for identifying headers in a message. A header match
// is defined by the name of the header, it's ordinal, and some string that is
// contained in that header. If the pattern is matched then one or more bits
// are set in a 32 bit status flag. Usually, one bit at a time. Other matchers
// that intend to set the same bits as are already set are turned off to save
// cycles.
//
// The initial implementation of this engine is for turning off GBUdb learning
// when one of the defined headers is matched. Other uses are likely to be
// developed. This engine will have to evolve as that occurrs.
//
// The evaluation of the status flag is defined by the application.

#ifndef snf_HeaderFinder_included
#define snf_HeaderFinder_included

#include <string>
#include <set>
#include <map>
#include <vector>

using namespace std;

struct HeaderFinderPattern { // Input pattern for header finder.
string Header; // Header name to match.
int Ordinal; // Which instance to match.
int Context; // Context link (for pairing patterns).
string Contains; // What to find in the header.
unsigned long int Directive; // What directive to present.

HeaderFinderPattern(): // When constructing a finder parttern
Header(""),Ordinal(0),Context(0),Contains(""),Directive(0){} // initialize it like this.

HeaderFinderPattern(const HeaderFinderPattern& P); // Copy constructor.

void clear(); // Do this to make fresh and clean.

HeaderFinderPattern& operator=(const HeaderFinderPattern& R); // Assignment operator.
const bool operator<(const HeaderFinderPattern& R) const; // Comparator for set<> living.
};

typedef set<HeaderFinderPattern> HeaderDirectiveSet; // Convenient set typedef.
typedef set<HeaderFinderPattern>::iterator HeaderDirectiveIterator; // Convenient iterator typedef.

typedef map<const string, int> NameOrdinalMap; // Header Ordinal Count Map.

// Upon construction the HeaderFinder scans the headers for matching directives
// and leaves the composite results ready for inspection via the () operator.
// UnfoldHeaders() strips and unfolds the headers then passes them to
// MatchHeaders() which tracks the ordinals for matching directives and passes
// those headers to CheckContent() to see if the required patterns are found.
// CheckContent() then updates the Directives if the appropriate content is
// found.

class snfScanData; // Yes, this does exist.

class HeaderFinder { // Header Finder Object.
private:

const HeaderDirectiveSet& HeaderDirectives; // Handle for the directives/patterns.
unsigned long int Directives; // Composite result given this message.

set<int> ActivatedContexts; // Set of activated contexts.

const unsigned char* Bfr; // Message buffer.
const int Len; // Message length.

snfScanData* ScanData; // Scanner control data.

vector<bool> ImpossibleBytes; // Cache of known impossible bytes.
NameOrdinalMap Ordinals; // Map of current header ordinals.

void CheckContent(string& Header, const HeaderFinderPattern& P); // Check for a match in the header.
void MatchHeaders(string& Header); // Check that the header matches.
bool ByteIsImpossible(unsigned char b); // Is b not first byte of any pattern?
void UnfoldHeaders(); // Unfold and check headers.

public:
HeaderFinder( // The constructor reads the message.
snfScanData* EngineScanData, // -- Scanner control data ptr.
const HeaderDirectiveSet& Patterns, // -- this is the set of patterns.
const unsigned char* MessageBuffer, // -- this is the message buffer.
const int MessageLength // -- this is the length of the buffer.
);

const unsigned long int operator()() const; // How to read the composite directives.
string EstablishedSourceIP; // Source IP from directive if any.
};

#include "snf_HeaderFinder.inline.hpp"

#endif

+ 50
- 0
snf_HeaderFinder.inline.hpp Прегледај датотеку

@@ -0,0 +1,50 @@
// snf_HeaderFinder.inline.hpp
// Copyright (C) 2007 - 2009 ARM Research Labs, LLC.
// See www.armresearch.com for the copyright terms.
// Inline methods.

inline const bool HeaderFinderPattern::operator<(const HeaderFinderPattern& R) const { // Comparator for set<> living.
if(Header < R.Header) { // If the Header name is < then true!
return true;
} else
if(Header == R.Header) { // If the Header name is == then
if(Ordinal < R.Ordinal) { // check the Ordinal. If it's < then
return true; // true!
} else
if(Ordinal == R.Ordinal) { // If the Ordinal == then
if(Contains < R.Contains) { // check the Contains. If it is < then
return true; // true!
}
}
}
return false; // In all other cases this is not < R
}

inline HeaderFinderPattern::HeaderFinderPattern(const HeaderFinderPattern& P) { // Copy constructor.
Header = P.Header;
Ordinal = P.Ordinal;
Context = P.Context;
Directive = P.Directive;
Contains = P.Contains;
}

inline void HeaderFinderPattern::clear() { // Do this to make fresh and clean.
Header.clear();
Ordinal = Context = Directive = 0;
Contains.clear();
}

inline HeaderFinderPattern&
HeaderFinderPattern::operator=(const HeaderFinderPattern& R) { // Assignment operator.
Header = R.Header;
Ordinal = R.Ordinal;
Context = R.Context;
Directive = R.Directive;
Contains = R.Contains;
return *this;
}


inline const unsigned long int HeaderFinder::operator()() const { // Return the Directives.
return Directives;
}

+ 791
- 0
snf_engine.cpp Прегледај датотеку

@@ -0,0 +1,791 @@
// snf_engine.cpp
//
// (C) 1985-2004 MicroNeil Research Corporation
// (C) 2005-2009 ARM Research Labs, LLC
// See www.armresearch.com for the copyright terms.
//
// Derived from original work on cellular automation for complex pattern
// reflex engine 1985 Pete McNeil (Madscientist)
//
// Derived from rapid scripting engine (token matrix) implementation 1987
//

// 20040419 _M Adding Verify() method. Beginning with version 2-3 of Message Sniffer
// we are embedding a Mangler digest of the rulebase file. The Verify() method reconstructs
// the digest and compares it. This ensures that no part of the rulebase file can be
// corrupted without the snf2check utility detecting the problem. Prior to this version
// it was possible to have undetected corruption in the middle of the rulebase file. The
// Mangler digest will prevent that.

// 20030130 _M Added testing section in TokenMatrix to throw an exeption if the file
// is too small to be a valid matrix. The value is calculated based on the idea that a
// valid matrix will have been encrypted in two segments so the file must be at least
// as large as these two segments. This is intended to solve the zero-length-rulebase
// bug where an access violation would occur if the file was of zero length.

// 20021030 _M Creation of snf_engine module by dragging the sniffer pattern matching engine out
// of the sniffer.cpp file.

#include <unistd.h>
#include <cstdio>
#include <cctype>
#include <ctime>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <string>
#include "mangler.hpp"
#include "snf_engine.hpp"

using namespace std;

///////////////////////////////////////////////////////////////////////////////////////////
// BEGIN IMPLEMENTATIONS //////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////

///////////////////////////////////////////////////////////////////////////////////////////

// Token Matrix Implementations ///////////////////////////////////////////////////////////

// TokenMatrix::Load(filename)

void TokenMatrix::Load(string& FileName) { // Initialize using a string for file name.
Load(FileName.c_str()); // Convert the string to a null terminated
} // char* and call the function below.

void TokenMatrix::Load(const char* FileName) { // Initializes the token matrix by file name.

ifstream MatrixFile(FileName,ios::binary); // Open the file.
if(MatrixFile == NULL || MatrixFile.bad()) // If anything is wrong with the file
throw BadFile("TokenMatrix::Load()(MatrixFile==NULL || MatrixFile.bad())"); // then throw a bad file exception.

Load(MatrixFile); // Load the matrix from the file.
MatrixFile.close(); // Be nice and clean up our file.
}

// TokenMatrix::Load(stream)

void TokenMatrix::Load(ifstream& F) { // Initializes the token matrix from a file.

MatrixSize = 0; // Clear out the old Matrix Size and array.
if(Matrix) delete Matrix; // that is, if there is an array.

F.seekg(0,ios::end); // Find the end of the file.
MatrixSize = F.tellg() / sizeof(Token); // Calculate how many tokens.
F.seekg(0); // Go back to the beginning.

if(MatrixSize < MinimumValidMatrix) // If the matrix file is too small then
throw BadMatrix("TokenMatrix::Load() (MatrixSize < MinimumValidMatrix)"); // we must reject it.

Matrix = new Token[MatrixSize]; // Allocate an array of tokens.

if(Matrix == NULL) // Check for an allocation error.
throw BadAllocation("TokenMatrix::Load() Matrix == NULL)"); // and throw an exception if it happens.

F.read( // Now read the file into the allocated
reinterpret_cast<char*>(Matrix), // matrix by recasting it as a character
(MatrixSize * sizeof(Token))); // buffer of the correct size.

if(F.bad()) // If there were any problems reading the
throw BadMatrix("TokenMatrix::Load() (F.bad())"); // matrix then report the bad matrix.
}

// TokenMatrix::Validate(key)

void TokenMatrix::Validate(string& SecurityKey) { // Decrypts and validates the matrix.

MANGLER ValidationChecker; // Create a mangler engine for validation.

// In order to do the validation we must look at the token matrix as a sequence of bytes.
// We will be decrypting the first and last SecurtySegmentSize of this sequence and then
// detecting wether the appropriate security key has been properly encrypted in the end.
// If we find everything as it should be then we can be sure that the two segments have
// not been tampered with and that we have the correct security key.

unsigned char* TokensAsBytes = reinterpret_cast<unsigned char*>(Matrix);
int BytesInTokenMatrix = (MatrixSize * sizeof(Token));

// Now that we have all of that stuff let's initialize our ValidationChecker.

// Note that the length of our security key is always 24 bytes. The license
// id is 8 bytes, the authentication code is 16 bytes. We don't bother to check
// here because if it's wrong then nothing will decrypt and we'll have essentially
// the same result. Note also that on the end of the rule file we pad this
// encrypted security id with nulls so that we can create a string from it easily
// and so that we have precisely 32 bytes which is the same size as 4 tokens.
//
// Note: The 32 byte value is in SecurityKeyBufferSize. This means that we can
// accept security keys up to 31 bytes in length. We need the ending null to
// assure our null terminated string is as expected. The security key block must
// match up with the edges of tokens in the matrix so we pad the end with nulls
// when encoding the security key in the encoded file.

int SecurityKeyLength = SecurityKey.length(); // For the length of our key
for(int a=0;a<SecurityKeyLength;a++) // feed each byte through the
ValidationChecker.Encrypt(SecurityKey.at(a)); // mangler to evolve the key
// state.

// Now we're ready to decrypt the matrix... We start with the first segment.

for(int a=0;a<SecuritySegmentSize;a++) // For the length of the segment
TokensAsBytes[a] = // replace each byte with the
ValidationChecker.Decrypt(TokensAsBytes[a]); // decrypted byte.

// Next we decrypt the last security segment...

for(int a= BytesInTokenMatrix - SecuritySegmentSize; a<BytesInTokenMatrix; a++)
TokensAsBytes[a] =
ValidationChecker.Decrypt(TokensAsBytes[a]);

// Now that we've done this we should find that our SecurityKey is at the end
// of the loaded token matrix... Let's look and find out shall we?!!!

unsigned char* SecurityCheckKey = // Reference the check
& TokensAsBytes[BytesInTokenMatrix-SecurityKeyBufferSize]; // space in the matrix.

SecurityCheckKey[SecurityKeyBufferSize-1] = 0; // Add a safety null just in case.

string SecurityCheck((char*)SecurityCheckKey); // Make a string.

// By now we should have a SecurityCheck string to compare to our SecurityKey.
// If they match then we know everything worked out and that our token matrix has
// been decrypted properly. This is also a good indication that our token matrix
// is not incomplete since if it were the decryption wouldn't work. Saddly, we
// don't have the computing cycles to decrypt the entire file - so we won't be
// doing that until we can load it in a server/daemon and then reuse it over and
// over... Once that happens we will be able to detect tampering also.

if(SecurityKey != SecurityCheck) // If the security keys don't match
throw BadMatrix("TokenMatrix::Validate() (SecurityKey != SecurityCheck)"); // then we have an invalid matrix.
}

// TokenMatrix::Verify(key)

void TokenMatrix::Verify(string& SecurityKey) { // Builds and verifies a file digest.

MANGLER DigestChecker; // Create a mangler for the digest.

// Gain access to our token matrix as bytes.

unsigned char* TokensAsBytes = reinterpret_cast<unsigned char*>(Matrix);
int BytesInTokenMatrix = (MatrixSize * sizeof(Token));

// Initialize our digest engine with the security key.

int SecurityKeyLength = SecurityKey.length(); // For the length of our key
for(int a=0;a<SecurityKeyLength;a++) // feed each byte through the
DigestChecker.Encrypt(SecurityKey.at(a)); // mangler to evolve the key
// state.
// Build the digest.

int IndexOfDigest = // Find the index of the digest by
BytesInTokenMatrix - // starting at the end of the matrix,
SecurityKeyBufferSize - // backing up past the security key,
RulebaseDigestSize; // then past the digest.

int a=0; // Keep track of where we are.
for(;a<IndexOfDigest;a++) // Loop through up to the digest and
DigestChecker.Encrypt(TokensAsBytes[a]); // pump the file through the mangler.

// Now that the digest is built we must test it.
// The original was emitted by encrypting 0s so if we do the same thing we will match.

for(int b=0;b<RulebaseDigestSize;b++) // Loop through the digest and compare
if(DigestChecker.Encrypt(0)!=TokensAsBytes[a+b]) // our digest to the stored digest. If
throw BadMatrix("TokenMatrix::Verify() Bad Digest"); // any byte doesn't match it's bad!

// If we made it through all of that then we're valid :-)

}

void TokenMatrix::FlipEndian() { // Converts big/little endian tokens.
assert(sizeof(unsigned int)==4); // Check our assumptions.
unsigned int* UInts = reinterpret_cast<unsigned int*>(Matrix); // Grab the matrix as uints.
int Length = ((MatrixSize * sizeof(Token)) / sizeof(unsigned int)); // Calculate it's size.
for(int i = 0; i < Length; i++) { // Loop through the array of u ints
unsigned int x = UInts[i]; // and re-order the bytes in each
x = ((x & 0xff000000) >> 24) | // one to swap from big/little endian
((x & 0x00ff0000) >> 8) | // to little/big endian.
((x & 0x0000ff00) << 8) |
((x & 0x000000ff) << 24);
UInts[i] = x; // Put the flipped int back.
}
}

// Evaluator Implementations //////////////////////////////////////////////////////////////

// 20030216 _M Optimization conversions

inline int Evaluator::i_lower() { return myEvaluationMatrix->i_lower; }
inline bool Evaluator::i_isDigit() { return myEvaluationMatrix->i_isDigit; }
inline bool Evaluator::i_isSpace() { return myEvaluationMatrix->i_isSpace; }
inline bool Evaluator::i_isAlpha() { return myEvaluationMatrix->i_isAlpha; }


// Evaluator::Evaluator(position,evalmatrix) Constructor

Evaluator::Evaluator(int s, EvaluationMatrix* m) { // Constructor...

myEvaluationMatrix = m; // Capture the matrix I live in.
Matrix = myEvaluationMatrix->getTokens(); // Capture the token matrix I walk in.
MatrixSize = myEvaluationMatrix->getMatrixSize(); // And get it's size.
PositionLimit = MatrixSize - 256; // Calculate the safety limit.

StreamStartPosition = s; // Always record our starting point.
NextEvaluator = NULL; // Allways start off with no extensions.
CurrentPosition = 0; // Always start at the root of the matrix;
WildRunLength = 0; // No run length when new.

Condition = DOING_OK; // Start off being ok.
}

// Evaluator::EvaluateThis()

Evaluator::States Evaluator::EvaluateThis(unsigned short int i) { // Follow the this byte.

Condition = FALLEN_OFF; // Start off guessing we'll fall off.

// First upgrade will be to DOING_OK, after that we launch buddies.

// In order to handle wildcard characters, this evaluation function must actually
// compare the character to a number of possibilities in most-specific to least-
// specific order to see if any match. In order to support overlapping rule sets,
// if more than one wildcard matches at this node, an additional evaluator will be
// placed in line already _AT THIS PATH POINT_ so that both possibilities will be
// explored. New evaluators are always added at the TOP of the list so we are always
// guaranteed not to overdrive an evaluator and end up in a recursive race condition.

// 20030216 _M Optimizations. In order to reduce the number of instructions per byte
// the parent Evaluation Matrix will now translate the byte i into boolean flags
// indicating if they are digits, white, letters, etc... and converting to lower
// case etc... This conversion is then done only once so that thereafter only a simple
// comparison need be made. This should eliminate many function calls and a collection
// of numeric comparisons.
//
// I am also moving the simple comparisons to the front of each logical section so
// that failures there can short-circuit subsequent logic to view the state of the
// matrix regardin that character. The matrix lookup is likely to be more expensive
// than a single binary comparison.

// For safety, we check our evaluation position here - If xNoCase is out of range
// then we will return OUT_OF_RANGE to indicate the problem rather than accessing
// data beyone our token matrix's limits.

/*** 20070606 _M Reduced the strength of this check from 3 comparisons to 1.
**** CurrentPosition is now an unsigned int so it cannot be negative. The limit
**** is now calculated once in the constructor as PositionLimit.

if(
CurrentPosition < 0 || // Position should never be < 0
xPrecise >= MatrixSize || // nor xPrecise over the top.
xNoCase >= MatrixSize // nor NoCase over the top.
) // If either occur we have a
return Condition = OUT_OF_RANGE; // bad matrix.
***/

if(CurrentPosition >= PositionLimit) return Condition = OUT_OF_RANGE;

// All of the positions calculated below are guaranteed to be within the ranges checked
// above so we're safe if we get to this point.

// So, at this point it's safe to check and see if I'm terminated. Note that if I
// am at a termination point, my path has terminated and I have a symbol so I don't
// need to resolve any more characters - even the current one.

if(Matrix[CurrentPosition].isTermination()) return Condition = TERMINATED;

// NOTE: The above is written for sudden-death termination. Eventually we will want
// to support deep - filters which will show every rule match and this will need to
// be rewritten.

// Evaluation order, most-to-least specific:

int xPrecise = CurrentPosition + i; // Match Precise Character
int xNoCase = CurrentPosition + i_lower(); // Match Case insensitive

// Of course I may need to resolve some of the following
// wildcard characters.

int xLetter = CurrentPosition + WILD_LETTER; // Match Any letter.
int xDigit = CurrentPosition + WILD_DIGIT; // Match Any digit.
int xNonWhite = CurrentPosition + WILD_NONWHITE; // Match Any non-whitespace.
int xWhiteSpace = CurrentPosition + WILD_WHITESPACE; // Match Any whitespace.
int xAnyInline = CurrentPosition + WILD_INLINE; // Match Any byte but new line.
int xAnything = CurrentPosition + WILD_ANYTHING; // Match Any character at all.
int xRunGateway = CurrentPosition + RUN_GATEWAY; // Match the run-loop gateway.

// Try to match the precise character.

if(Matrix[xPrecise].Character() == i) { // If we've matched our path
Condition = DOING_OK; // upgrade to doing ok.
CurrentPosition = xPrecise +
Matrix[xPrecise].Vector; // Move myself along this path.
}

// Try to match the case insensitive character.

if(i_lower()!=i && Matrix[xNoCase].Character()==i_lower()){

// If we've matched our path
// with a compromized case then
if(Condition==FALLEN_OFF) { // check: if no matches yet,
Condition = DOING_OK; // upgrade to doing ok.
CurrentPosition = xNoCase +
Matrix[xNoCase].Vector; // Move myself along this path.
}
// If we more than one match then
else { // lets try to make a buddy...

// If there's no duplicate buddy like this already, then we'll create one.
// To create a buddy, add an evaluator at the top of the list (behind us) and
// set it's position as if it had been here all along and had matched the current
// character. Next time we evaluate it will be just like all the others.

myEvaluationMatrix->
AddEvaluator(StreamStartPosition,Matrix[xNoCase].Vector+xNoCase);

}
}

// Start looking at wildcards... Here's where we must limit run length.

if(Condition == DOING_OK) // If we matched above we'll
WildRunLength = 0; // reset our wild run count.
// If not then we need to keep
else { // track of our run length.

++WildRunLength; // Count up the run length.
if(WildRunLength >= MaxWildRunLength) // If we exceed the max then
return Condition = FALLEN_OFF; // we've fallen off the path
} // and we do it immediately.

// WILD_LETTER
// If that didn't do it for us...
// Try to match any letter character.

// The way this next one works (and the rest of the wildcards) is we look into
// the token matrix to see if the wildcard is part of the current path... If it
// is then we compare the incoming character to that wildcard evaluation function
// and if it is true, then we've got a match.

if(i_isAlpha() && Matrix[xLetter].Character()==WILD_LETTER){

// If we've matched our path
// with any letter then
if(Condition==FALLEN_OFF) { // check: if no matches yet,
Condition = DOING_OK; // upgrade to doing ok.
CurrentPosition = xLetter +
Matrix[xLetter].Vector; // Move myself along this path.
}

else { // Otherwise make a buddy...

// If there's no duplicate buddy like this already, then we'll create one.
// To create a buddy, add an evaluator at the top of the list (behind us) and
// set it's position as if it had been here all along and had matched the current
// character. Next time we evaluate it will be just like all the others.

myEvaluationMatrix->
AddEvaluator(StreamStartPosition,Matrix[xLetter].Vector+xLetter);

}
}

// WILD_DIGIT
// If that didn't do it for us...
// Try to match any digit character.

if(i_isDigit() && Matrix[xDigit].Character()==WILD_DIGIT){

// If we've matched our path
// with any letter then
if(Condition==FALLEN_OFF) { // check: if no matches yet,
Condition = DOING_OK; // upgrade to doing ok.
CurrentPosition = xDigit +
Matrix[xDigit].Vector; // Move myself along this path.
}

else { // Otherwise make a buddy...

// If there's no duplicate buddy like this already, then we'll create one.
// To create a buddy, add an evaluator at the top of the list (behind us) and
// set it's position as if it had been here all along and had matched the current
// character. Next time we evaluate it will be just like all the others.

myEvaluationMatrix->
AddEvaluator(StreamStartPosition,Matrix[xDigit].Vector+xDigit);

}
}

// WILD_NONWHITE
// If that didn't do it for us...
// Try to match any non-whitespace character.

if(!i_isSpace() && Matrix[xNonWhite].Character()==WILD_NONWHITE){

// If we've matched our path
// with any letter then
if(Condition==FALLEN_OFF) { // check: if no matches yet,
Condition = DOING_OK; // upgrade to doing ok.
CurrentPosition = xNonWhite +
Matrix[xNonWhite].Vector; // Move myself along this path.
}

else { // Otherwise make a buddy...

// If there's no duplicate buddy like this already, then we'll create one.
// To create a buddy, add an evaluator at the top of the list (behind us) and
// set it's position as if it had been here all along and had matched the current
// character. Next time we evaluate it will be just like all the others.

myEvaluationMatrix->
AddEvaluator(StreamStartPosition,Matrix[xNonWhite].Vector+xNonWhite);

}
}

// WILD_WHITESPACE
// If that didn't do it for us...
// Try to match any whitespace character.

if(i_isSpace() && Matrix[xWhiteSpace].Character()==WILD_WHITESPACE){

// If we've matched our path
// with any whitespace then
if(Condition==FALLEN_OFF) { // check: if no matches yet,
Condition = DOING_OK; // upgrade to doing ok.
CurrentPosition = xWhiteSpace +
Matrix[xWhiteSpace].Vector; // Move myself along this path.
}

else { // Otherwise make a buddy...

// If there's no duplicate buddy like this already, then we'll create one.
// To create a buddy, add an evaluator at the top of the list (behind us) and
// set it's position as if it had been here all along and had matched the current
// character. Next time we evaluate it will be just like all the others.

myEvaluationMatrix->
AddEvaluator(StreamStartPosition,Matrix[xWhiteSpace].Vector+xWhiteSpace);

}
}

// WILD_INLINE
// If that didn't do it for us...
// Try to match any character EXCEPT a new line.

if(i != '\n' && Matrix[xAnyInline].Character()==WILD_INLINE){

// If we've matched our path
// with any byte but \n then
if(Condition==FALLEN_OFF) { // check: if no matches yet,
Condition = DOING_OK; // upgrade to doing ok.
CurrentPosition = xAnyInline +
Matrix[xAnyInline].Vector; // Move myself along this path.
}

else { // Otherwise make a buddy...

// If there's no duplicate buddy like this already, then we'll create one.
// To create a buddy, add an evaluator at the top of the list (behind us) and
// set it's position as if it had been here all along and had matched the current
// character. Next time we evaluate it will be just like all the others.

myEvaluationMatrix->
AddEvaluator(StreamStartPosition,Matrix[xAnyInline].Vector+xAnyInline);

}
}

// WILD_ANYTHING
// If that didn't do it for us...
// Try to match any character.

if(Matrix[xAnything].Character()==WILD_ANYTHING){

// If we've matched our path
// with any letter then
if(Condition==FALLEN_OFF) { // check: if no matches yet,
Condition = DOING_OK; // upgrade to doing ok.
CurrentPosition = xAnything +
Matrix[xAnything].Vector; // Move myself along this path.
}

else { // Otherwise make a buddy...

// If there's no duplicate buddy like this already, then we'll create one.
// To create a buddy, add an evaluator at the top of the list (behind us) and
// set it's position as if it had been here all along and had matched the current
// character. Next time we evaluate it will be just like all the others.

myEvaluationMatrix->
AddEvaluator(StreamStartPosition,Matrix[xAnything].Vector+xAnything);

}
}

// 20021112 _M
// Beginning with version 2 of Message Sniffer we've implemented a new construct
// for run-loops that prevents any interference between rules where run-loops might
// appear in locations coinciding with standard match bytes. The new methodology
// uses a special run-loop-gateway character to isolate any run loops from standard
// nodes in the matrix. Whenever a run-loop gateway is present at a node a buddy is
// inserted AFTER the current evaluator so that it will evaluate the current character
// from the position of the run-loop gateway. This allows run loops to occupy the same
// positional space as standard matches while maintaining isolation between their paths
// in the matrix.

// We don't want to launch any run loop buddies unless we matched this far. If we did
// match up to this point and the next character in a pattern includes a run loop then
// we will find a gateway byte at this point representing the path to any run loops.

// If we made it this far launch a buddy for any run-loop gateway that's present.
// Of course, the buddy must be evaluated after this evaluator during this pass because
// he will have shown up late... That is, we don't detect a run gateway until we're
// sitting on a new node looking for a result... The very result we may be looking for
// could be behind the gateway - so we launch the buddy behind us and he will be able
// to match anything in this pass that we missed when looking for a non-run match.

if(Matrix[xRunGateway].Character() == RUN_GATEWAY)
myEvaluationMatrix->
InsEvaluator(StreamStartPosition,Matrix[xRunGateway].Vector+xRunGateway);

// At this point, we've tried all of our rules, and created any buddies we needed.
// If we got a match, we terminated long ago. If we didn't, then we either stayed
// on the path or we fell off. Either way, the flag is in Condition so we can send
// it on.

return Condition;

}

///////////////////////////////////////////////////////////////////////////////////////////
// EvaluationMatrix Implementations ///////////////////////////////////////////////////////

// EvaluationMatrix::AddMatchRecord(int sp, int ep, int sym)

// Most of this functionality is about deep scans - which have been put on hold for now
// due to the complexity and the scope of the current application. For now, although
// we will use this reporting mechanism, it will generally record only one event.

MatchRecord* EvaluationMatrix::AddMatchRecord(int sp, int ep, int sym) {

// 20030216 _M Added range check code to watch for corruption. Some systems have
// reported matches with zero length indicating an undetected corruption. This
// range check will detect and report it.

if(sp==ep) // Check that we're in range - no zero
throw OutOfRange("sp==ep"); // length pattern matches allowed!

MatchRecord* NewMatchRecord = // Then, create the new result object
new MatchRecord(sp,ep,sym); // by passing it the important parts.

if(NewMatchRecord==NULL) // Check for a bad allocation and throw
throw BadAllocation("NewMatchRecord==NULL"); // an exception if that happens.

if(ResultList == NULL) { // If this is our first result we simply
ResultList = NewMatchRecord; // add the result to our list, and of course
LastResultInList = NewMatchRecord; // it is the end of the list as well.
} else { // If we already have some results, then
LastResultInList->NextMatchRecord = // we add the new record to the result list
NewMatchRecord; // and record that the new record is now the
LastResultInList = NewMatchRecord; // last result in the list.
}

return NewMatchRecord; // Return our new match record.
}


// EvaluationMatrix::AddEvaluator()

// 20021112 _M
// This function has be modified to include a check for duplicates as well as setting
// the mount point for the new evaluator. This eliminates a good deal of code elsewhere
// and encapsulates the complete operation. If a duplicate evaluator is found then the
// function returns NULL indicating that nothing was done. In practic, no check is made
// since any serious error conditions cause errors to be thrown from within this function
// call. These notes apply to some extent to InsEvaluator which is copied from this function
// and which has the only difference of putting the new evaluator after the current one
// in the chain in order to support branch-out operations for loop sequences in the matrix.

Evaluator* EvaluationMatrix::AddEvaluator(int s, int m) { // Adds a new evaluator at top.

if(!isNoDuplicate(m)) return NULL; // If there is a duplicate do nothing.

if(CountOfEvaluators >= MAX_EVALS) // If we've exceeded our population size
throw MaxEvalsExceeded("Add:CountOfEvaluators >= MAX_EVALS"); // then throw an exception.

Evaluator* NewEvaluator = SourceEvaluator(s,this); // Make up a new evaluator.

if(NewEvaluator == NULL) // Check for a bad allocation and throw
throw BadAllocation("Add:NewEvaluator == NULL"); // an exception if it happens.

NewEvaluator->NextEvaluator = EvaluatorList; // Point the new evaluator to the list.
EvaluatorList = NewEvaluator; // Then point the list head to
// the new evaluator.

NewEvaluator->CurrentPosition = m; // Esablish the mount point.

++CountOfEvaluators; // Add one to our evaluator count.
if(CountOfEvaluators > MaximumCountOfEvaluators) // If the count is the biggest we
MaximumCountOfEvaluators = CountOfEvaluators; // have seen then keep track of it.

return NewEvaluator; // Return the new evaluator.
}

// EvaluationMatrix::InsEvaluator()

Evaluator* EvaluationMatrix::InsEvaluator(int s, int m) { // Inserts a new evaluator.

if(!isNoDuplicate(m)) return NULL; // If there is a duplicate do nothing.

if(CountOfEvaluators >= MAX_EVALS) // If we've exceeded our population size
throw MaxEvalsExceeded("Ins:CountOfEvaluators >= MAX_EVALS"); // then throw an exception.

Evaluator* NewEvaluator = SourceEvaluator(s,this); // Make up a new evaluator.

if(NewEvaluator == NULL) // Check for a bad allocation and throw
throw BadAllocation("Ins:NewEvaluator == NULL"); // an exception if it happens.

NewEvaluator->NextEvaluator = // Point the new evaluator where the
CurrentEvaluator->NextEvaluator; // current evalautor points... then point
CurrentEvaluator->NextEvaluator = // the current evaluator to this one. This
NewEvaluator; // accomplishes the insert operation.

NewEvaluator->CurrentPosition = m; // Esablish the mount point.

++CountOfEvaluators; // Add one to our evaluator count.
if(CountOfEvaluators > MaximumCountOfEvaluators) // If the count is the biggest we
MaximumCountOfEvaluators = CountOfEvaluators; // have seen then keep track of it.

return NewEvaluator; // Return the new evaluator.
}

// EvaluationMatrix::DropEvaluator()

void EvaluationMatrix::DropEvaluator() { // Drops the current evaluator from the matrix.

Evaluator* WhereTo = CurrentEvaluator->NextEvaluator; // Where do we go from here?

// First step is to heal the list as if the current evaluator were not present.
// If there is no previous evaluator - meaning this should be the first one in the
// list - then we point the list head to the next evaluator on the list (WhereTo)

if(PreviousEvaluator != NULL) // If we have a Previous then
PreviousEvaluator->NextEvaluator = WhereTo; // our next is it's next.
else // If we don't then our next
EvaluatorList = WhereTo; // is the first in the list.

// Now that our list is properly healed, it's time to drop the dead evaluator and
// get on with our lives...

CurrentEvaluator->NextEvaluator = NULL; // Disconnect from any list.
CacheEvaluator(CurrentEvaluator); // Drop the current eval.

CurrentEvaluator = WhereTo; // Move on.

--CountOfEvaluators; // Reduce our evaluator count.

}


// EvaluationMatrix::EvaluateThis()
//
// This function returns the number of matches that were found. It is possible for more
// than one evaluator to match on a single character.
//
// 0 indicates no matches were found.
// >0 indicates some matches were found.
// If there is a problem then an exception will be thrown.

int EvaluationMatrix::EvaluateThis(unsigned short int i) {

AddEvaluator(CountOfCharacters,0); // First, add a new Evaluator at the root of the
// matrix for the current position in the scan
// stream.

// The new evaluator is now at the top of our list.
// If there was a problem then an exception will have been thrown.
// If our allocation worked ok, then we'll be here and ready to start scanning
// the rule set with our current character.

PassResult = 0; // Start by assuming we won't match.
CurrentEvaluator = EvaluatorList; // Start at the top of the list.
PreviousEvaluator = NULL; // NULL means previous is the top.

// 20030216 _M
// Next do some basic conversions and evaluations so they don't need to be done
// again within the evaluators. From now on the evaluators will look here for basic
// conversions and boolean check values rather than performing the checks themselves.

i_lower = tolower(i); // Convert i to lower case.
i_isDigit = isdigit(i); // Check for a digit.
i_isSpace = isspace(i); // Check for whitespace.
i_isAlpha = isalpha(i); // Check for letters.

// Next, loop through the list and pass the incoming character to
// each evaluator. Drop those that fall off, and record those that terminate. The
// rest of them stick around to walk their paths until they meet their fate.

while(CurrentEvaluator != NULL) { // While there are more evaluators...
// go through the list and evaluate
switch(CurrentEvaluator->EvaluateThis(i)) { // the current character against each.

case Evaluator::FALLEN_OFF: { // If we've fallen off the path
DropEvaluator(); // drop the current evaluator and
break; // move on with our lives.
}

case Evaluator::DOING_OK: { // If we're still going then...
PreviousEvaluator = CurrentEvaluator; // keep track of where we've been and
CurrentEvaluator = // move forward to the next evaluator
CurrentEvaluator->NextEvaluator; // in the list.
break;
}

case Evaluator::TERMINATED: { // If we've terminated a path...
++PassResult; // Record our PassResult.

// Create a new match result using the data in the current evaluator.
// If there is a problem adding the match an exception will be thrown.

AddMatchRecord(
CurrentEvaluator->StreamStartPosition,
CountOfCharacters - 1,
myTokenMatrix->Symbol(CurrentEvaluator->CurrentPosition)
);

// From Version 2 onward we're always doing deep scans...
// Having successfully recorded the result of this critter we can kill them off.

DropEvaluator(); // He's dead.
break; // Now let's keep looking.
}

case Evaluator::OUT_OF_RANGE: { // This result is really bad and
throw OutOfRange("case Evaluator::OUT_OF_RANGE:"); // probably means we have a bad matrix.
break;

// The reason we don't throw OutOfRange from within the evaluator is that we
// may want to take some other action in the future... So, we allow the evaluator
// to tell us we sent it out of range and then we decide what to do about it.

}
}
}

// At the end of this function our PassResult is either an error (which is
// reported immediately), or it is a match condition. We start out by assuming
// there will be no match. If we find one, then we reset that result... so at
// this point, all we need do is report our findings.

++CountOfCharacters; // Add one to our Character Count statistic.

// Note that from this point on, the index in the stream is one less than the
// CountOfCharacters... for example, if I've evaluated (am evaluating) one character
// the it's index is 0. This will be important when we create any match records.

return PassResult; // When we're finished, return the last known result.
}

+ 546
- 0
snf_engine.hpp Прегледај датотеку

@@ -0,0 +1,546 @@
// snf_engine.hpp
//
// (C) 1985-2004 MicroNeil Research Corporation
// (C) 2005-2009 ARM Research Labs, LLC.
//
// Derived from original work on cellular automation for complex pattern
// reflex engine 1985 Pete McNeil (Madscientist)
//
// Derived from rapid scripting engine (token matrix) implementation 1987
//

// This is the header file for the sniffer pattern matching engine.

// 20080305 _M - Added FlipEndian() function to convert rulebases from their
// native little-endian format to big-endian format for CPUs that need it. See
// additional work in SNFMulti to call the FlipEndian() function AFTER the
// rulebase has been authenticated but before it is put into use.

// 20070606 _M - Refactored exceptions to use base std::exception and improved
// the evaluator code to reduce the strength of safety testing from 3 compares
// per byte to 1.

// 20060531 _M - Added evaluator caching to save a few cycles by not allocating
// new memory and performing a complete initialization of an evaluator if there
// is already one handy from a previous use.

// 20021030 _M - Created.

#ifndef _MN_SNF_ENGINE
#define _MN_SNF_ENGINE

#include <cassert>
#include <stdexcept>
#include <unistd.h>
#include <cstdio>
#include <cctype>
#include <ctime>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <string>
#include <exception>
#include "mangler.hpp"
//#include "../nvwa-0.6/nvwa/debug_new.h"

using namespace std;

// 20030929 _M SYMBOL_RANGE moved to snf_engine.hpp as part of augmenting the
// capability of a match record. Match records now can decode themselves.

const int SYMBOL_RANGE = 256; // Symbol result coding modulator.

// Let's create our utility classes and structures.

// The Token class.
// This class represents the structure of a token. The rule file is, in fact,
// a token matrix. Tokens within the matrix allow the sniffer to navigate through
// a state change matrix attempting to locate special positions that indicate the
// termination of a path, or more specifically, the recognition of a string that
// has been evaluated along that path.
//
// IT IS IMPORTANT TO NOTE THAT AS THESE PROGRAMS ARE WRITTEN IT ASSUMES WE ARE IN
// A 32 BIT INTEL ENVIRONMENT SO THAT THE TOKEN MATRIX CAN BE LOADED IN A SINGLE PASS
// USING A BINARY INPUT STREAM.

////////////////////////////////////////////////////////////////////////////////////////
// Token Declaration ///////////////////////////////////////////////////////////////////

class Token { // Token class for defining and interpreting nodes within the matrix.

public: // Beginning of Public stuff.

int Check; // The first int is a check character.
int Vector; // The second int is a vector.

// isUnused() Returns true if the token is in an unused state.

int isUnused() {
return (Check==-1 && Vector==0) ? true : false;
}

// isTermination() Returns true if the token is in a termination state.

int isTermination() {
if(Check==0 && Vector > 0)
return true;
else
return false;
}

// Symbol() Returns the symbol value for the token.

int Symbol() { return Vector; }

// Character() Returns the check character for this token.

int Character() { return Check; }

// End of Public stuff.
// Note that no constructor is needed because the default constructor will do nicely.

};

////////////////////////////////////////////////////////////////////////////////////////
// Token Matrix Declaration ////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////
//
// The Token Matrix loads, verifies, and maintains an array of tokens for the evaluators
// to live in. This class provides safe access to the token matrix.
//
////////////////////////////////////////////////////////////////////////////////////////

class TokenMatrix {

private:

Token* Matrix; // Where we hold the token matrix.
int MatrixSize; // What size is the matrix.

public:

// Exceptions...

class BadAllocation : public runtime_error { // Exception for a bad memory allocation.
public: BadAllocation(const string& w):runtime_error(w) {}
};
class BadMatrix : public runtime_error { // Exception for invalid matrix loads.
public: BadMatrix(const string& w):runtime_error(w) {}
};
class BadFile : public runtime_error { // Exception for missing rulebase files.
public: BadFile(const string& w):runtime_error(w) {}
};
class OutOfRange : public runtime_error { // Exception for indexes out of range.
public: OutOfRange(const string& w):runtime_error(w) {}
};

// Standards...

static const int SecuritySegmentSize = 1024; // File Authentication Segment
static const int SecurityKeyBufferSize = 32; // Security Key Pad Block Size
static const int RulebaseDigestSize = 64; // Number of bytes in digest.

static const int MinimumValidMatrix = // Establish the smallest valid
SecuritySegmentSize * 2 / SecurityKeyBufferSize; // matrix size

// The first interface component checks the range and gives up the token.

Token at(int x) { // Get the token at x
if(x<0 || x>=MatrixSize) // Check to see if we're in bounds.
throw OutOfRange("(x<0 || x>=MatrixSize)"); // If we're not then throw an exception.
return Matrix[x]; // If we are then give it to them.
}

// The second interface component delivers the Matrix if it's valid so that other
// code can manipulate it more efficiently (without constantly checking bounds.

Token* getMatrix() { // Return the matrix.
if(MatrixSize==0 || Matrix==NULL) // If the matrix isn't ready then
throw BadMatrix("(MatrixSize==0 || Matrix==NULL)"); // throw an exception. If it is
return Matrix; // ready then send it out.
}

// For simplicity we simply extend the underlying Token functions by taking a
// position reference, checking it's range, and returning the result.

int isUnused(int x) { // Extend Token.isUnused()
return at(x).isUnused();
}

int isTermination(int x) { // Extend Token.isTermination()
return at(x).isTermination();
}

int Symbol(int x) { // Exetend Token.Symbol()
return at(x).Symbol();
}

int Character(int x) { // Extend Token.Character()
return at(x).Character();
}

// Utility functions...

int Size() { return MatrixSize; } // Returns the size of the matrix.

void Load(const char* FileName); // Loads the matrix from a file name.

void Load(string& FileName); // Loads the matrix from a file name string.

void Load(ifstream& F); // Loads the token matrix from the file.

void Validate(string& SecurityKey); // Validates the matrix with a key string.

void Verify(string& SecurityKey); // Verifies the matrix digest.

void FlipEndian(); // Converts big/little endian tokens.

// Constructors...

TokenMatrix() :
MatrixSize(0),
Matrix(NULL) { }

TokenMatrix(ifstream& F) :
MatrixSize(0),
Matrix(NULL) {
Load(F);
}

~TokenMatrix() { // The Distructor...
MatrixSize = 0; // Set the size to zero.
if(Matrix) { delete [] Matrix; Matrix = NULL; } // If we have a matrix, remove it.
}

};

/////////////////////////////////////////////////////////////////////////////////////////
// End Token Work ///////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////

// Having defined the token matrix, I now define the Evaluator class which
// be used to follow any matching rule threads as the program scans a a file.
// A new evaluator is started at each position in the input stream making all
// of the rules in the token matrix global.

// The following two values are returned by the Evaluator at every step.

const int WILD_WHITESPACE = 1; // Token code for whitespace wildcards.
const int WILD_DIGIT = 2; // Token code for digit wildcards.
const int WILD_LETTER = 3; // Token code for letter wildcards.
const int WILD_NONWHITE = 4; // Token code for non-whitespace wildcards.
const int WILD_ANYTHING = 5; // Token code for any character.
const int WILD_INLINE = 6; // Token code for any character except new line.

const int RUN_GATEWAY = 8; // Token code for run-loop gateways.

// Here are some tuning parameters

const int MaxWildRunLength = 4096; // Maximum span of "any number" wildcards.
const int MAX_EVALS = 2048; // Maximum number of evaluators.

//////////////////////////////////////////////////////////////////////////////////////////
// Evaluators and the Evaluation Matrix
//////////////////////////////////////////////////////////////////////////////////////////

class EvaluationMatrix; // We've got to pre-declare this for some compilers.

class Evaluator { // Evaluator class for following threads through the matrix.

private:

EvaluationMatrix* myEvaluationMatrix; // The evaluation matrix I live in.
Token* Matrix; // The raw token matrix I walk in.
int MatrixSize; // Size of raw token matrix.

// 20070606 _M Optimized Evaluator code by reducing the strength of the
// safety check from 3 comparisons to 1.

unsigned int PositionLimit; // Largest CurrentPosition.

// 20030216 _M Optimization conversions

inline int i_lower(); // { return myEvaluationMatrix->i_lower; }
inline bool i_isDigit(); // { return myEvaluationMatrix->i_isDigit; }
inline bool i_isSpace(); // { return myEvaluationMatrix->i_isSpace; }
inline bool i_isAlpha(); // { return myEvaluationMatrix->i_isAphpa; }

public:

// Standard Values...

enum States { // These are the posible coditions.
OUT_OF_RANGE, // We're outside the matrix - very bad.
FALLEN_OFF, // We've fallen off the path and are lost.
DOING_OK, // We're doing ok and following along.
TERMINATED // We've reached the end of our path.
};

// Attributes...

States Condition; // What state am I in? How's my health?

Evaluator* NextEvaluator; // Linked List Pointer.
int StreamStartPosition; // Indexes the position where we started.
unsigned int CurrentPosition; // Indexes the node we are surfing.

int WildRunLength; // Wildcard run length so far.

// EvaluateThis() assumes it is being given the next character along the
// path of a thread in the token matrix. It follows that thread and evaluates
// it's condition.

States EvaluateThis(unsigned short int i); // Follow the next byte.

// isNoDuplicate() is used to keep us from allocating identical evaluators. This is
// key to creating buddies when working with wildcards. It prevents us from recursively
// proliferating evaluators at each new character when running in a wildcard loop.

int isNoDuplicate(int Position) { // Returns false if there is a duplicate.
if(CurrentPosition == Position) // Obviously, if I match, then there's a dup.
return false;
// If I don't match and I'm the last one then
if(NextEvaluator==NULL) // it must be true there are no dups. If there
return true; // are more to ask then I'll let them answer.
else
return NextEvaluator->isNoDuplicate(Position);
}

Evaluator(int s, EvaluationMatrix* m); // Constructor...

~Evaluator(){
if(NextEvaluator!=NULL){ // If there's more to this list then
delete NextEvaluator; // delete it.
}
NextEvaluator = NULL; // Always null on exit.
}

};

// A MatchRecord is created each time a new rule match occurrs. These records form a
// linked list within the Evaluation Matrix that can be spit out after the process is
// over for reporting purposes.

class MatchRecord {
public:
int MatchStartPosition; // Where in the data stream did the match start?
int MatchEndPosition; // Where in the data stream did the match end?
int MatchSymbol; // What symbol was attached to the match rule?

inline int RuleId(){return (MatchSymbol/SYMBOL_RANGE);} // Decode RuleID
inline int RuleGroup(){return (MatchSymbol%SYMBOL_RANGE);} // Decode GroupID

MatchRecord* NextMatchRecord;

MatchRecord(int sp, int ep, int sym) { // When constructing a MatchRecord,
MatchStartPosition = sp; // you must provide all of it's data.
MatchEndPosition = ep;
MatchSymbol = sym;
// Since match records are always added to
NextMatchRecord = NULL; // the end our next pointer is always NULL.
}

~MatchRecord(){
if(NextMatchRecord != NULL) // If there's more list, then delete it.
delete NextMatchRecord;
NextMatchRecord = NULL; // Clean up our pointer before leaving.
}
};

// Now that we've created our utility classes, we'll create another class (with an instance)
// that builds a matrix to evaluate all incoming characters, manage the list, and keeps
// statistics and results from the execution process.

class EvaluationMatrix {

private:

TokenMatrix* myTokenMatrix; // Token Matrix that I evaluate with.

Evaluator* EvaluatorList; // Linked list of Evaluators.

Evaluator* CurrentEvaluator; // Current Evaluator (when checking)
Evaluator* PreviousEvaluator; // Previous Evaluator (when checking)

// Evaluator Caching Mechanism.

Evaluator* EvaluatorCache; // List of cached, ready evaluators.
Evaluator* SourceEvaluator(int s, EvaluationMatrix* m); // Get a cached or new evaluator.
void CacheEvaluator(Evaluator* e); // Cache a used evaluator.

int CountOfEvaluators; // Current count of evaluators.

int PassResult; // Result of the latest evaluation pass.

MatchRecord* LastResultInList; // Keeps track of the end of the result list.

MatchRecord* AddMatchRecord(int sp, int ep, int sym); // Add a match result.

// DropEvaluator() is called by the EvaluateThis() method whenever an evaluator
// reports the FALLEN_OFF result. The EvaluateThis() method keeps two values up
// to date - one is the current evaluator (which will be dropped) and the other is
// the previous evaluator (which will be updated to heal the list).

// When we've finished this function, the CurrentEvaluator will be on the next
// evaluator node if it exists. Therefore, the caller should skip it's normal
// list itteration code when this function has been called.

void DropEvaluator();

public:

// Exception classes...

class BadAllocation : public runtime_error { // Allocation failed exception.
public: BadAllocation(const string& w):runtime_error(w) {}
};
class MaxEvalsExceeded : public runtime_error { // Too many evaluators exception.
public: MaxEvalsExceeded(const string& w):runtime_error(w) {}
};
class OutOfRange : public runtime_error { // Out of range exception.
public: OutOfRange(const string& w):runtime_error(w) {}
};

// Attributes...

int CountOfCharacters; // How many characters have been evaluated.
int MaximumCountOfEvaluators; // Largest matrix size reached.

MatchRecord* ResultList; // List of match results.

int DeepSwitch; // true if we're doing a deep scans.

// 20030216 _M High Level Conversion Optimizers...

int i_lower; // Lower case version of byte under test.
bool i_isDigit; // true if i is a digit.
bool i_isSpace; // true if i is whitespace.
bool i_isAlpha; // true if i is alpha.

// AddEvaluator() is made public because the Evaluator object must have access
// to it in order to handle the creation of buddies as it evaluates it's rules.
// Similarly the getTokens is public because evaluators must use this when they
// initialize. In a later version we will clean this up so that all of this stuff
// can be handled somewhat more privately.

Token* getTokens() { // Deliver the raw token matrix
return myTokenMatrix->getMatrix(); // for use when creating evaluators.
}

int getMatrixSize() { // Deliver the raw matrix size
return myTokenMatrix->Size(); // for use when creating evaluators.
}

Evaluator* AddEvaluator(int s, int m); // Adds a new evaluator to the top.

Evaluator* InsEvaluator(int s, int m); // Inserts a new evaluator after the
// current evaluator. (Only called by
// an existing evaluator in process...)


// isNoDuplicate(int p) checks for duplicate evaulators

int isNoDuplicate(int p) { // If there's no list there can be no
if(EvaluatorList == NULL) // duplicates so we're true. If there is
return true; // a list then we'll let the list answer.
else
return EvaluatorList->isNoDuplicate(p);
}

// EvaluateThis() Moves each evaluator with the current character and creates a new
// evaluator for the current spot in the input file to make all rules global.

int EvaluateThis(unsigned short int i);

EvaluationMatrix(TokenMatrix* m) { // Constructor w/ pointer to Token Matrix...

myTokenMatrix = m; // Grab my TokenMatrix.

EvaluatorList = NULL; // Start off with no evaluators.
EvaluatorCache = NULL; // Start off with no evaluator cache.

CurrentEvaluator = NULL; // NULL means starting at the top.
PreviousEvaluator = NULL; // NULL means previous is the top.

ResultList = NULL; // Start off with no results in our list.
LastResultInList = NULL;

CountOfCharacters = 0; // The count of characters will be zero and
MaximumCountOfEvaluators = 0; // the maximum Evaluator count will be zero
CountOfEvaluators = 0; // and the current count will also be zero.

PassResult = 0; // Initialize expecting no matches.

}

~EvaluationMatrix(){ // Destructor to clean up memory allocations.

myTokenMatrix = NULL; // Stop pointing at the TokenMatrix

// Both of these lists konw how to delete themselves.
// 20060531_M Fixed possible crash by checking for NULL before
// deleting these lists. Also added cleanup for the EvaluatorCache.

if(NULL!=EvaluatorCache) {
delete EvaluatorCache; // Delete the evaluator cache.
EvaluatorCache = NULL; // Then clear it's pointer.
}

if(NULL!=EvaluatorList) {
delete EvaluatorList; // Delete the evaluator list.
EvaluatorList = NULL; // Then clear it's pointer.
}

if(NULL!=ResultList) {
delete ResultList; // Delete the result list.
ResultList = NULL; // Then clear it's pointer.
}
}

};

// 20060531_M Implementation of the evaluator cache is all inline.
// In place of new Evaluator() we now can use SourceEvaluator()
// In place of delete Evaluator() we now can use CacheEvaluator()
// The effect is to store previously allocaed evaluators in the EvaluatorCache
// list so that they can be reused. This avoids the frequen use of
// new and delete and allows us to skip a few extra cycles for initialization
// because much of the constructor work for a new evaluator is already done
// in any cached evaluator.
//
// In practice, at least one evaluator is likely to be created and destroyed
// for each byte that is scanned. This new mechanism significantly reduces the
// number of cycles that would normally be associated with those operations by
// eliminating them most of the time. Instead of returning used memory to the
// heap during delete, the evaulator is simply added to the cache list. Instead
// of allocating new space from the heap and initializing the object, a chached
// evaluator is simply moved from the cache into production. Moving into and
// out of the cache is roughly as simple as changing a couple of pointers.

// In place of new Evaluator, we do this...

inline Evaluator* EvaluationMatrix::SourceEvaluator(int s, EvaluationMatrix* m) { // Get a cached or new evaluator.
if(NULL==EvaluatorCache) return new Evaluator(s,m); // If we have no cache, use new!
Evaluator* reuse = EvaluatorCache; // Otherwise grab a reusable one.
EvaluatorCache = reuse->NextEvaluator; // Collaps the cache by one.
reuse->NextEvaluator = NULL; // Clean it up a bit.
reuse->StreamStartPosition = s; // Record our starting point.
reuse->CurrentPosition = 0; // Reset the Current Position.
reuse->WildRunLength = 0; // Reset the run length.
reuse->Condition = Evaluator::DOING_OK; // Reset the condition.
return reuse; // Return the reusable unit.
}

// In place of delete Evaluator, we do this...

inline void EvaluationMatrix::CacheEvaluator(Evaluator* e) { // Cache a used evaluator.
e->NextEvaluator = EvaluatorCache; // Link the used evaluator
EvaluatorCache = e; // into the cache;
}

// In the above, the first evaluator added will get NULL as it's NextEvaluator.
// When that first evaulator is used, the NULL pointer will return to the root
// of the EvaluatorCache list. In this regard the cache acts like a stack.

#endif


+ 21
- 0
snf_match.h Прегледај датотеку

@@ -0,0 +1,21 @@
/* snf_match.h
**
** (C) Copyright 2006 ARM Research Labs, LLC.
**
** 20060121_M
**
** The Engine provides detailed match results using this structure.
*/

#ifndef _ARM_snf_match
#define _ARM_snf_match

struct snf_match {
char flag;
int symbol;
int ruleid;
int index;
int endex;
};

#endif

+ 146
- 0
snf_sync.cpp Прегледај датотеку

@@ -0,0 +1,146 @@
// snf_sync.cpp
// Copyright (C) 2006 - 2009 ARM Research Labs, LLC.
// See www.armresearch.com for the copyright terms.
// See snf_sync.hpp for details.

#include "snf_sync.hpp"

void snf_sync::construct() { // Encapsulate initial construction.
ClientGBUAlertInitializer.link(ClientGBUAlertHandler); // Link the alert configurators.
ServerGBUAlertInitializer.link(ServerGBUAlertHandler);
SNFWasParsed.setup(ReadWasGood); // Link our configurator to that flag.
SetupReader(); // Configure our reader.
reset(); // and initialize our data.
}

snf_sync::snf_sync() : // Construcing a blank snf_sync.
Reader("snf"), // The Reader looks for "snf"
ReadWasGood(false) { // There has been no good read yet.
construct(); // Internal wiring & initialization.
}

snf_sync::snf_sync(const char* bfr, int len) : // Constructing with a full buffer.
Reader("snf"), // Start with our blank construction.
ReadWasGood(false) {
construct(); // Internal wiring & initialization.
ConfigurationData Data(bfr, len); // Then build ConfigurationData from
Reader.interpret(Data); // the buffer and interpret it.
}

snf_sync::snf_sync(string& input) : // Constructing with a string.
Reader("snf"), // Start with our blank construction.
ReadWasGood(false) {
construct(); // Internal wiring & initialization.
ConfigurationData Data(input.c_str(), input.length()); // Then build ConfigurationData from
Reader.interpret(Data); // the string and interpret it.
}

void snf_sync::SetupReader() { // Configure the reader to recognize
Reader // the snf_sync protocol.
.atEndCall(SNFWasParsed) // Set flag to true when successful.
.Element("<!-- SNFWasParsed -->", ReadWasGood, false).End() // Trick using impossible element name.
.Element("sync")
.Element("challenge")
.Attribute("text", snf_sync_challenge_txt, "")
.End("challenge")
.Element("response")
.Attribute("nodeid", snf_sync_response_nodeid, "")
.Attribute("text", snf_sync_response_text, "")
.End("response")
.Element("error")
.Attribute("message", snf_sync_error_message, "")
.Attribute("code", snf_sync_error_code, 0)
.End("error")
.Element("rulebase")
.Attribute("utc", snf_sync_rulebase_utc, "")
.End("rulebase")
.Element("client")
.atStartCall(ClientGBUAlertInitializer)
.Element("gbu")
.atEndCall(ClientGBUAlertHandler)
.Attribute("time", ClientGBUAlertHandler.Alert_time, "")
.Attribute("ip", ClientGBUAlertHandler.Alert_ip, "")
.Attribute("t", ClientGBUAlertHandler.Alert_t, "Ignore")
.Attribute("b", ClientGBUAlertHandler.Alert_b, 0)
.Attribute("g", ClientGBUAlertHandler.Alert_g, 0)
.End("gbu")
.End("client")
.Element("server")
.atStartCall(ServerGBUAlertInitializer)
.Element("gbu")
.atEndCall(ServerGBUAlertHandler)
.Attribute("time", ServerGBUAlertHandler.Alert_time, "")
.Attribute("ip", ServerGBUAlertHandler.Alert_ip, "")
.Attribute("t", ServerGBUAlertHandler.Alert_t, "Ignore")
.Attribute("b", ServerGBUAlertHandler.Alert_b, 0)
.Attribute("g", ServerGBUAlertHandler.Alert_g, 0)
.End("gbu")
.Element("resync")
.Attribute("secs", snf_sync_server_resync_secs, -1)
.End("resync")
.End("server")
.End("sync")
.End("snf");
}

void snf_sync::reset() { // Reset the reader for new data.
ReadWasGood = false; // There has been no read yet.
Reader.initialize(); // Initialize to the defaults.
};

bool snf_sync::read(const char* bfr, int len) { // To read from a buffer we
ConfigurationData Data(bfr, len); // construct ConfigurationData from
Reader.interpret(Data); // the buffer and interpret it.
return good(); // Return true if it looked good.
}

bool snf_sync::read(string& input) { // To read from a string we
return read(input.c_str(), input.length()); // get the strings buffer and hand off
} // to our buffer read()

bool snf_sync::good() { // True if the Reader finished the
return (true == ReadWasGood); // snf element successfully.
}

bool snf_sync::bad() { // False if the Reader finished the
return (false == ReadWasGood); // snf element successfully.
}

void GBUAlertHandler::operator()(
ConfigurationElement& E, ConfigurationData& D) { // Add an alert.
GBUdbAlert NewAlert; // Create an alert object.
SocketAddress IPAddress; // Grab one of these for a converter.
IPAddress.setAddress(const_cast<char*>(Alert_ip.c_str())); // Conver the IP address to an int.
NewAlert.IP = IPAddress.getAddress(); // Put the IP into it's place.
NewAlert.R.Bad(Alert_b); // Set the bad count on the record.
NewAlert.R.Good(Alert_g); // Set the good count on the record.
strncpy(NewAlert.UTC, Alert_time.c_str(), UTCBufferSize); // Copy the timestamp.
switch(Alert_t.at(0)) { // Use the first byte to set the flag.
case 'U': { // U means Ugly.
NewAlert.R.Flag(Ugly);
break;
}
case 'I': { // I means Ignore.
NewAlert.R.Flag(Ignore);
break;
}
case 'G': { // G means Good.
NewAlert.R.Flag(Good);
break;
}
case 'B': { // B means Bad.
NewAlert.R.Flag(Bad);
break;
}
}
AlertList.push_back(NewAlert); // Push back the new alert.
}

void GBUAlertHandler::reset() { // To reset the handler,
Alert_time = ""; // clear all of the input strings
Alert_ip = ""; // to the empty string and all of
Alert_t = ""; // the input counts to zero.
Alert_b = 0;
Alert_g = 0;
AlertList.clear(); // Clear out the list.
}

+ 85
- 0
snf_sync.hpp Прегледај датотеку

@@ -0,0 +1,85 @@
// snf_sync.hpp
// Copyright (C) 2006 - 2009 ARM Research Labs, LLC.
// See www.armresearch.com for the copyright terms.
//
// SNF engine communications protocol interpreter.
// Communications are well formed xml snippets.
// See snf_sync.xml for examples.

#ifndef snf_sync_included
#define snf_sync_included

#include <list>
#include <cstring>
#include "GBUdb.hpp"
#include "networking.hpp"
#include "configuration.hpp"

class GBUAlertHandler : public Configurator {
public:
virtual void operator()(ConfigurationElement& E, ConfigurationData& D); // Add an alert handler :-)

void reset(); // Resets the list for a new run.

list<GBUdbAlert> AlertList; // Our list of alerts.

// Input variables.

string Alert_time; // time='YYYYMMDDhhmmss'
string Alert_ip; // ip='12.34.56.78'
string Alert_t; // t='Ugly', Good, Bad, Ignore
int Alert_b; // b='0'
int Alert_g; // g='0'
};

class GBUAlertInitializer : public Configurator {
private:
GBUAlertHandler* MyHandler; // Handler to reset.

public:
GBUAlertInitializer() { MyHandler = NULL; } // Init safely with null.
void link(GBUAlertHandler& H) { MyHandler = &H; } // Link to my handler.
virtual void operator()(ConfigurationElement& E, ConfigurationData& D) { // Add an alert handler :-)
if(NULL != MyHandler) { // If I know where it is
MyHandler->reset(); // I hit the reset button.
}
}
};

class snf_sync {
private:
ConfigurationElement Reader; // Our reader.
void SetupReader(); // Configure the reader.
ConfiguratorSetTrueOnComplete SNFWasParsed; // Configurator sets the ReadWasGood
bool ReadWasGood; // flag at the end of the snf element.
void construct(); // Encapsulate the initial construction.
void reset(); // Reset/initialize for the next read.

public:
snf_sync(); // Construct empty.
snf_sync(const char* bfr, int len); // Construct from buffer.
snf_sync(string& input); // Construct from string.
bool read(const char* bfr, int len); // Read from buffer.
bool read(string& input); // Read from string.

//// And now the interpreted results ////
bool good(); // True if read was good.
bool bad(); // True if read was not good.

string snf_sync_challenge_txt;
string snf_sync_response_nodeid;
string snf_sync_response_text;
string snf_sync_error_message;
int snf_sync_error_code;
string snf_sync_rulebase_utc;
int snf_sync_server_resync_secs;

GBUAlertHandler ClientGBUAlertHandler; // GBU Alerts received from client
GBUAlertInitializer ClientGBUAlertInitializer;

GBUAlertHandler ServerGBUAlertHandler; // GBU Alerts received from server
GBUAlertInitializer ServerGBUAlertInitializer;
};

#endif


+ 138
- 0
snf_xci.cpp Прегледај датотеку

@@ -0,0 +1,138 @@
// snf_xci.hpp
// Copyright (C) 2006 - 2009 ARM Research Labs, LLC
// See www.armresearch.com for the copyright terms.
//
// SNF XML Command Interface
// See snf_xci.hpp for details / notes.

#include "snf_xci.hpp"

//// snf_xci Interpreter Object ////////////////////////////////////////////////

snf_xci::snf_xci() : // Construcing a blank snf_xci.
Reader("snf"), // The Reader looks for "snf"
ReadWasGood(false) { // There has been no good read yet.
SNFWasParsed.setup(ReadWasGood); // Link our configurator to that flag.
SetupReader(); // Configure our reader.
reset(); // and initialize our data.
}

snf_xci::snf_xci(const char* bfr, int len) : // Constructing with a full buffer.
Reader("snf"), // Start with our blank construction.
ReadWasGood(false) {
SNFWasParsed.setup(ReadWasGood);
SetupReader();
reset();
ConfigurationData Data(bfr, len); // Then build ConfigurationData from
Reader.interpret(Data); // the buffer and interpret it.
}

snf_xci::snf_xci(string& input) : // Constructing with a string.
Reader("snf"), // Start with our blank construction.
ReadWasGood(false) {
SNFWasParsed.setup(ReadWasGood);
SetupReader();
reset();
ConfigurationData Data(input.c_str(), input.length()); // Then build ConfigurationData from
Reader.interpret(Data); // the string and interpret it.
}

void snf_xci::SetupReader() { // Configure the reader to recognize
Reader // the snf_xci protocol.
.setInitOnInterpret()
.atEndCall(SNFWasParsed) // Set flag to true when successful.
.Element("<!-- SNFWasParsed -->", ReadWasGood, false).End() // Trick using impossible element name.
.Element("xci")
.Element("scanner")
.Element("scan")
.Attribute("file", scanner_scan_file,"")
.Attribute("xhdr", scanner_scan_xhdr, false)
.Attribute("log", scanner_scan_log, false)
.Attribute("ip", scanner_scan_ip, "")
.End("scan")
.Element("result")
.Attribute("code", scanner_result_code,0)
.Element("xhdr", scanner_result_xhdr, "")
.End("xhdr")
.Element("log", scanner_result_log, "")
.End("log")
.End("result")
.End("scanner")
.Element("gbudb")
.Element("set")
.Attribute("ip", gbudb_set_ip, "")
.Attribute("type", gbudb_set_type, "")
.Attribute("b", gbudb_set_bad_count, -1)
.Attribute("g", gbudb_set_good_count, -1)
.End("set")
.Element("good")
.Attribute("ip", gbudb_good_ip, "")
.End("good")
.Element("bad")
.Attribute("ip", gbudb_bad_ip, "")
.End("bad")
.Element("test")
.Attribute("ip", gbudb_test_ip, "")
.End("test")
.Element("drop")
.Attribute("ip", gbudb_drop_ip, "")
.End("drop")
.Element("result")
.Attribute("ip", gbudb_result_ip, "")
.Attribute("type", gbudb_result_type, "")
.Attribute("p", gbudb_result_probability, 0.0)
.Attribute("c", gbudb_result_confidence, 0.0)
.Attribute("b", gbudb_result_bad_count, -1)
.Attribute("g", gbudb_result_good_count, -1)
.Attribute("range", gbudb_result_range, "")
.Attribute("code", gbudb_result_code, 0)
.End("result")
.End("gbudb")
.Element("report")
.Element("request")
.Element("status")
.Attribute("class", report_request_status_class, "")
.End("status")
.End("request")
.Element("response", report_response, "")
.End("response")
.End("report")
.Element("server")
.Element("command", xci_server_command_content, "")
.Attribute("command", xci_server_command, "")
.End("command")
.Element("response")
.Attribute("message", xci_server_response, "")
.Attribute("code", xci_server_response_code, -1)
.End("response")
.End("server")
.Element("error")
.Attribute("message", xci_error_message, "")
.End("error")
.End("xci")
.End("snf");
}

void snf_xci::reset() { // Reset the reader for new data.
ReadWasGood = false; // There has been no read yet.
Reader.initialize(); // Initialize to the defaults.
};

bool snf_xci::read(const char* bfr, int len) { // To read from a buffer we
ConfigurationData Data(bfr, len); // construct ConfigurationData from
Reader.interpret(Data); // the buffer and interpret it.
return good(); // Return true if it looked good.
}

bool snf_xci::read(string& input) { // To read from a string we
return read(input.c_str(), input.length()); // get the strings buffer and hand off
} // to our buffer read()

bool snf_xci::good() { // True if the Reader finished the
return (true == ReadWasGood); // snf element successfully.
}

bool snf_xci::bad() { // False if the Reader finished the
return (false == ReadWasGood); // snf element successfully.
}


+ 78
- 0
snf_xci.hpp Прегледај датотеку

@@ -0,0 +1,78 @@
// snf_xci.hpp
// Copyright (C) 2006 - 2009 ARM Research Labs, LLC
// See www.armresearch.com for the copyright terms.
//
// SNF XML Command Interface
//
// SNF clients communicate with the SNF server using one-line xml statements.
// The server responds in kind. This module uses the configuration module to
// interpret those communications. In practice, a line will be read from a
// connected socket and then passed to an snf_xci object for interpretation.
// The snf_xci object parses the xml and presents the results on it's surface
// in easily used variables.

#ifndef snf_xci_included
#define snf_xci_included

#include "configuration.hpp"

class snf_xci { // SNF XCI message interpreter.
private:
ConfigurationElement Reader; // Our reader.
void SetupReader(); // Configure the reader.
ConfiguratorSetTrueOnComplete SNFWasParsed; // Configurator sets the ReadWasGood
bool ReadWasGood; // flag at the end of the snf element.
void reset(); // Reset/initialize for the next read.

public:
snf_xci();
snf_xci(const char* bfr, int len);
snf_xci(string& input);
bool read(const char* bfr, int len);
bool read(string& input);

//// And now the interpreted results ////
bool good();
bool bad();

string scanner_scan_file;
bool scanner_scan_xhdr;
bool scanner_scan_log;
string scanner_scan_ip;
int scanner_result_code;
string scanner_result_xhdr;
string scanner_result_log;

string gbudb_set_ip;
string gbudb_set_type;
int gbudb_set_bad_count;
int gbudb_set_good_count;

string gbudb_good_ip;
string gbudb_bad_ip;
string gbudb_test_ip;
string gbudb_drop_ip;

string gbudb_result_ip;
string gbudb_result_type;
double gbudb_result_probability;
double gbudb_result_confidence;
int gbudb_result_bad_count;
int gbudb_result_good_count;
string gbudb_result_range;
int gbudb_result_code;

string report_request_status_class;
string report_response;

string xci_server_command;
string xci_server_command_content;
string xci_server_response;
int xci_server_response_code;

string xci_error_message;

};

#endif


+ 54
- 0
tcp_watchdog.cpp Прегледај датотеку

@@ -0,0 +1,54 @@
// tcp_watchdog.cpp
// Copyright (C) 2006 - 2009 MicroNeil Research Corporation
// See tcp_watchdog.hpp for details.

#include "tcp_watchdog.hpp"

const ThreadType TCPWatchdog::Type("TCPWatchdog"); // Thread type.

const ThreadState TCPWatchdog::Watching("Watching"); // State when waiting to fire.
const ThreadState TCPWatchdog::KilledSocket("KilledSocket"); // Killed The Socket.
const ThreadState TCPWatchdog::LiveAndLetBe("LiveAndLetBe"); // Shutdown without incident.

TCPWatchdog::TCPWatchdog(Socket& SocketToWatch, int Milliseconds) : // Construct with
MySocket(SocketToWatch), // a socket to watch,
MyTimeout(Milliseconds), // a time limit,
StillAlive(true) { // and a true alive flag.
run(); // Run the thread.
}

TCPWatchdog::~TCPWatchdog() { // When we go away, we
stop(); // need to stop.
}

void TCPWatchdog::reset() { // We can be reset by
MyTimeout.restart(); // restarting the timeout.
}

void TCPWatchdog::reset(int Milliseconds) { // We can also be reset by
MyTimeout.setDuration(Milliseconds); // setting a new timeout and
MyTimeout.restart(); // starting fresh.
}

void TCPWatchdog::stop() { // If we are stopped then
MyTimeout.restart(); // we restart the timeout for safety,
if(StillAlive) { // IF we're alive when we get here
CurrentThreadState(LiveAndLetBe); // we are "calling off the dog".
}
StillAlive = false; // falsify our alive flag, and
join(); // wait for our thread to end.
}

void TCPWatchdog::myTask() { // This is the job we do.
const int OneSecond = 1000; // One second in milliseconds.
Sleeper WaitATic(OneSecond); // Set up a one second sleeper.
while(StillAlive) { // While we are alive,
CurrentThreadState(Watching); // we are watching the clock.
WaitATic(); // Every second or so we will
if(MyTimeout.isExpired()) { // check to see if we've expired.
CurrentThreadState(KilledSocket); // If the clock expires - we kill!
StillAlive = false; // To do that, we turn ourselves
MySocket.close(); // off and close the socket.
}
}
}

+ 45
- 0
tcp_watchdog.hpp Прегледај датотеку

@@ -0,0 +1,45 @@
// tcp_watchdog.hpp
// Copyright (C) 2006 - 2009 MicroNeil Research Corporation
// Watchdog timer for TCP connections.
// Closes the connection if it times out.
// Theoretically, when a socet closes, anything blocked on that socket
// will receive an exception and will deal with that appropriately by
// stopping what it is doing... Can't work on a closed socket ;-)
// This allows blocking sockets to be used safely in that the application
// won't "hang" on a stopped / broken socket.

#ifndef tcp_watchdog_included
#define tcp_watchdog_included

#include "timing.hpp"
#include "threading.hpp"
#include "networking.hpp"

class TCPWatchdog : private Thread {
private:

Socket& MySocket; // Socket to watch.
Timeout MyTimeout; // Timeout value.

void myTask(); // Watchdog task.

volatile bool StillAlive; // True if we're watching.

public:

TCPWatchdog(Socket& SocketToWatch, int Milliseconds); // Create with a socket and a time limit.
~TCPWatchdog(); // Destroy by stopping the task.

void reset(); // Reset the watchdog - everything is ok.
void reset(int Milliseconds); // Reset the watchdog - use a new time.
void stop(); // Stop the watchdog - all done here.

const static ThreadType Type; // The thread's type.

const static ThreadState Watching; // State when waiting to fire.
const static ThreadState KilledSocket; // Killed The Socket.
const static ThreadState LiveAndLetBe; // Shutdown without incident.

};

#endif

Loading…
Откажи
Сачувај