hexsha
stringlengths 40
40
| size
int64 22
2.4M
| ext
stringclasses 5
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
260
| max_stars_repo_name
stringlengths 5
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
9
| max_stars_count
float64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
260
| max_issues_repo_name
stringlengths 5
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
9
| max_issues_count
float64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
260
| max_forks_repo_name
stringlengths 5
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
9
| max_forks_count
float64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 22
2.4M
| avg_line_length
float64 5
169k
| max_line_length
int64 5
786k
| alphanum_fraction
float64 0.06
0.95
| matches
listlengths 1
11
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c94dec3c07486181a8733f8ed430fc461a21bf5a
| 960
|
h
|
C
|
zengine/zengine/zengine_plugin_factory_manager.h
|
wubenqi/zengine
|
2c6f942c4b79a42c8b19c850579f25bb7e78e0d7
|
[
"Apache-2.0"
] | 20
|
2015-01-20T12:48:15.000Z
|
2021-07-23T09:48:41.000Z
|
zengine/zengine/zengine_plugin_factory_manager.h
|
wubenqi/zengine
|
2c6f942c4b79a42c8b19c850579f25bb7e78e0d7
|
[
"Apache-2.0"
] | 1
|
2015-03-12T01:30:47.000Z
|
2015-03-12T02:43:09.000Z
|
zengine/zengine/zengine_plugin_factory_manager.h
|
wubenqi/zengine
|
2c6f942c4b79a42c8b19c850579f25bb7e78e0d7
|
[
"Apache-2.0"
] | 9
|
2015-07-17T09:25:43.000Z
|
2020-06-17T07:37:32.000Z
|
// Copyright (C) 2012 by wubenqi
// Distributable under the terms of either the Apache License (Version 2.0) or
// the GNU Lesser General Public License, as specified in the COPYING file.
//
// By: wubenqi<[email protected]>
//
#ifndef ZENGINE_ZENGINE_PLUGIN_FACTORY_MANAGER_H_
#define ZENGINE_ZENGINE_PLUGIN_FACTORY_MANAGER_H_
#pragma once
#include <string>
#include <vector>
#include "base/memory/singleton.h"
#include "zengine/zengine_plugin.h"
namespace zengine {
class Plugin;
class PluginFactory;
class PluginFactoryManager {
public:
static PluginFactoryManager* GetInstance() {
return Singleton<PluginFactoryManager>::get();
}
void Initialize();
Plugin* CreateInstance(const std::string& module_name);
void Shutdown();
private:
friend struct DefaultSingletonTraits<PluginFactoryManager>;
PluginFactoryManager() {}
~PluginFactoryManager() {
Shutdown();
}
std::vector<PluginFactory*> module_factorys_;
};
}
#endif
| 20.869565
| 79
| 0.757292
|
[
"vector"
] |
c954773e86dd8de4b9a96c1b7025ac347b1af2c3
| 6,437
|
h
|
C
|
extract_misc/brainvisa_ext/RII_Struct3D-4.1.0/RicUtil/src/RicPoint.h
|
binarybottle/mindboggle_sidelined
|
1431d4877f4ceae384486fb66798bc22e6471af7
|
[
"Apache-2.0"
] | 3
|
2019-07-20T05:36:03.000Z
|
2020-12-23T07:47:43.000Z
|
extract_misc/brainvisa_ext/RII_Struct3D-4.1.0/RicUtil/src/RicPoint.h
|
binarybottle/mindboggle_sidelined
|
1431d4877f4ceae384486fb66798bc22e6471af7
|
[
"Apache-2.0"
] | 2
|
2020-11-30T10:18:42.000Z
|
2020-12-24T06:29:47.000Z
|
extract_misc/brainvisa_ext/RII_Struct3D-4.1.0/RicUtil/src/RicPoint.h
|
binarybottle/mindboggle_sidelined
|
1431d4877f4ceae384486fb66798bc22e6471af7
|
[
"Apache-2.0"
] | null | null | null |
// 3D Vector class from 3dKindoms - thanks
// Just slightly modified for RIC use
// Point, IPoint and DPoint classes are defined
#ifndef _RicPoint_h
#define _RicPoint_h
#include <math.h>
/// XYZ floating point class
class Point
{
public:
// Data
float x, y, z;
// Ctors
Point( float InX, float InY, float InZ ) : x( InX ), y( InY ), z( InZ )
{
}
Point( ) : x(0), y(0), z(0)
{
}
// Operator Overloads
inline bool operator== (const Point& V2) const
{
return (x == V2.x && y == V2.y && z == V2.z);
}
inline Point operator+ (const Point& V2) const
{
return Point( x + V2.x, y + V2.y, z + V2.z);
}
inline Point operator- (const Point& V2) const
{
return Point( x - V2.x, y - V2.y, z - V2.z);
}
inline Point operator- ( ) const
{
return Point(-x, -y, -z);
}
inline Point operator/ (float S ) const
{
float fInv = 1.0f / S;
return Point (x * fInv , y * fInv, z * fInv);
}
inline Point operator/ (const Point& V2) const
{
return Point (x / V2.x, y / V2.y, z / V2.z);
}
inline Point operator* (const Point& V2) const
{
return Point (x * V2.x, y * V2.y, z * V2.z);
}
inline Point operator* (float S) const
{
return Point (x * S, y * S, z * S);
}
inline void operator+= ( const Point& V2 )
{
x += V2.x;
y += V2.y;
z += V2.z;
}
inline void operator-= ( const Point& V2 )
{
x -= V2.x;
y -= V2.y;
z -= V2.z;
}
inline float operator[] ( int i )
{
if ( i == 0 ) return x;
else if ( i == 1 ) return y;
else return z;
}
// Functions
inline float Dot( const Point &V1 ) const
{
return V1.x*x + V1.y*y + V1.z*z;
}
inline Point CrossProduct( const Point &V2 ) const
{
return Point(
y * V2.z - z * V2.y,
z * V2.x - x * V2.z,
x * V2.y - y * V2.x );
}
// Return vector rotated by the 3x3 portion of matrix m
// (provided because it's used by bbox.cpp in article 21)
Point RotByMatrix( const float m[16] ) const
{
return Point(
x*m[0] + y*m[4] + z*m[8],
x*m[1] + y*m[5] + z*m[9],
x*m[2] + y*m[6] + z*m[10] );
}
// These require math.h for the sqrtf function
float Magnitude( ) const
{
return sqrtf( x*x + y*y + z*z );
}
float Distance( const Point &V1 ) const
{
return ( *this - V1 ).Magnitude();
}
inline void Normalize()
{
float fMag = ( x*x + y*y + z*z );
if (fMag == 0) {return;}
float fMult = 1.0f/sqrtf(fMag);
x *= fMult;
y *= fMult;
z *= fMult;
return;
}
};
/// XYZ double class
class DPoint
{
public:
// Data
double x, y, z;
// Ctors
DPoint( double InX, double InY, double InZ ) : x( InX ), y( InY ), z( InZ )
{
}
DPoint( ) : x(0), y(0), z(0)
{
}
// Operator Overloads
inline bool operator== (const DPoint& V2) const
{
return (x == V2.x && y == V2.y && z == V2.z);
}
inline DPoint operator+ (const DPoint& V2) const
{
return DPoint( x + V2.x, y + V2.y, z + V2.z);
}
inline DPoint operator- (const DPoint& V2) const
{
return DPoint( x - V2.x, y - V2.y, z - V2.z);
}
inline DPoint operator- ( ) const
{
return DPoint(-x, -y, -z);
}
inline DPoint operator/ (double S ) const
{
double fInv = 1.0f / S;
return DPoint (x * fInv , y * fInv, z * fInv);
}
inline DPoint operator/ (const DPoint& V2) const
{
return DPoint (x / V2.x, y / V2.y, z / V2.z);
}
inline DPoint operator* (const DPoint& V2) const
{
return DPoint (x * V2.x, y * V2.y, z * V2.z);
}
inline DPoint operator* (double S) const
{
return DPoint (x * S, y * S, z * S);
}
inline void operator+= ( const DPoint& V2 )
{
x += V2.x;
y += V2.y;
z += V2.z;
}
inline void operator-= ( const DPoint& V2 )
{
x -= V2.x;
y -= V2.y;
z -= V2.z;
}
inline double operator[] ( int i )
{
if ( i == 0 ) return x;
else if ( i == 1 ) return y;
else return z;
}
// Functions
inline double Dot( const DPoint &V1 ) const
{
return V1.x*x + V1.y*y + V1.z*z;
}
inline DPoint CrossProduct( const DPoint &V2 ) const
{
return DPoint(
y * V2.z - z * V2.y,
z * V2.x - x * V2.z,
x * V2.y - y * V2.x );
}
// Return vector rotated by the 3x3 portion of matrix m
// (provided because it's used by bbox.cpp in article 21)
DPoint RotByMatrix( const double m[16] ) const
{
return DPoint(
x*m[0] + y*m[4] + z*m[8],
x*m[1] + y*m[5] + z*m[9],
x*m[2] + y*m[6] + z*m[10] );
}
// These require math.h for the sqrtf function
double Magnitude( ) const
{
return sqrt( x*x + y*y + z*z );
}
double Distance( const DPoint &V1 ) const
{
return ( *this - V1 ).Magnitude();
}
inline void Normalize()
{
double fMag = ( x*x + y*y + z*z );
if (fMag == 0) {return;}
double fMult = 1.0f/sqrt(fMag);
x *= fMult;
y *= fMult;
z *= fMult;
return;
}
};
/// XYZ integer class
class IPoint
{
public:
// Data
int x, y, z;
// Ctors
IPoint( int InX, int InY, int InZ ) : x( InX ), y( InY ), z( InZ )
{
}
IPoint( ) : x(0), y(0), z(0)
{
}
// Operator Overloads
inline bool operator== (const IPoint& V2) const
{
return (x == V2.x && y == V2.y && z == V2.z);
}
inline IPoint operator+ (const IPoint& V2) const
{
return IPoint( x + V2.x, y + V2.y, z + V2.z);
}
inline IPoint operator- (const IPoint& V2) const
{
return IPoint( x - V2.x, y - V2.y, z - V2.z);
}
inline IPoint operator- ( ) const
{
return IPoint(-x, -y, -z);
}
inline IPoint operator/ (int S ) const
{
return IPoint (x/S , y /S, z/S);
}
inline IPoint operator/ (const IPoint& V2) const
{
return IPoint (x / V2.x, y / V2.y, z / V2.z);
}
inline IPoint operator* (const IPoint& V2) const
{
return IPoint (x * V2.x, y * V2.y, z * V2.z);
}
inline IPoint operator* (int S) const
{
return IPoint (x * S, y * S, z * S);
}
inline void operator+= ( const IPoint& V2 )
{
x += V2.x;
y += V2.y;
z += V2.z;
}
inline void operator-= ( const IPoint& V2 )
{
x -= V2.x;
y -= V2.y;
z -= V2.z;
}
inline int operator[] ( int i )
{
if ( i == 0 ) return x;
else if ( i == 1 ) return y;
else return z;
}
// These require math.h for the sqrtf function
float Magnitude( ) const
{
return (int)(0.5+sqrtf( x*x + y*y + z*z ));
}
float Distance( const IPoint &V1 ) const
{
return ( *this - V1 ).Magnitude();
}
};
inline Point DP2P(DPoint dp)
{
Point p((float)dp.x,(float)dp.y,(float)dp.z);
return p;
}
inline DPoint P2DP(Point p)
{
DPoint dp((double)p.x,(double)p.y,(double)p.z);
return dp;
}
#endif
| 17.635616
| 76
| 0.553363
|
[
"vector",
"3d"
] |
c95794305355308e48ad29c0ba9c3124ffc7e389
| 881
|
h
|
C
|
include/modes/script.h
|
scaryrawr/tofi
|
15b3757c4d492d5bbc7f57aef94f582549d2bef3
|
[
"MIT"
] | 1
|
2020-08-03T18:57:01.000Z
|
2020-08-03T18:57:01.000Z
|
include/modes/script.h
|
scaryrawr/tofi
|
15b3757c4d492d5bbc7f57aef94f582549d2bef3
|
[
"MIT"
] | 1
|
2021-03-07T21:32:10.000Z
|
2021-03-08T13:56:10.000Z
|
include/modes/script.h
|
scaryrawr/tofi
|
15b3757c4d492d5bbc7f57aef94f582549d2bef3
|
[
"MIT"
] | null | null | null |
#pragma once
#include "../mode.h"
#include <future>
#include <string>
#include <string_view>
#include <vector>
namespace tofi
{
namespace modes
{
/**
* @brief Loads a script and will recall script with selected output until no output is returned.
*
*/
class script : public Mode
{
public:
script(std::string_view name, std::string_view script);
std::wstring Name() const override
{
return m_name;
}
const Entries &Results() override;
PostExec Execute(const Entry &result, const std::wstring &) override;
private:
std::wstring m_name;
std::string m_script;
std::future<Entries> m_loader;
Entries m_results;
};
} // namespace modes
} // namespace tofi
| 22.025
| 105
| 0.54597
|
[
"vector"
] |
c959260aafb200565e01abe10ac20a084c00d721
| 1,121
|
h
|
C
|
src/pattern.h
|
ppearson/Sniffle
|
d4c6232fb438964e9f85957c7bd1a57c2f33e0cd
|
[
"Apache-2.0"
] | 1
|
2018-03-26T07:24:22.000Z
|
2018-03-26T07:24:22.000Z
|
src/pattern.h
|
ppearson/Sniffle
|
d4c6232fb438964e9f85957c7bd1a57c2f33e0cd
|
[
"Apache-2.0"
] | null | null | null |
src/pattern.h
|
ppearson/Sniffle
|
d4c6232fb438964e9f85957c7bd1a57c2f33e0cd
|
[
"Apache-2.0"
] | null | null | null |
/*
Sniffle
Copyright 2018-2019 Peter Pearson.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
---------
*/
#ifndef PATTERN_H
#define PATTERN_H
#include <string>
#include <vector>
struct PatternSearch
{
enum PatternType
{
ePatternUnknown,
ePatternSingleFile, // don't actually search, just try to open, mainly for processing known files
ePatternSimple,
ePatternWildcardDir,
ePatternError
};
PatternSearch() : type(ePatternUnknown)
{
}
PatternType type;
std::string baseSearchPath;
std::string dirWildcardMatch;
std::vector<std::string> dirRemainders;
std::string fileMatch;
};
#endif // PATTERN_H
| 21.557692
| 99
| 0.748439
|
[
"vector"
] |
c95970bc7b4a33d3fe7d419d1edc9e7221fd442b
| 867
|
h
|
C
|
RPerformanceTracking/Private/_RPTTrackingManager.h
|
donnie-jp/ios-perftracking
|
07e469055707b4b332770c2c008846df589ec16d
|
[
"MIT"
] | null | null | null |
RPerformanceTracking/Private/_RPTTrackingManager.h
|
donnie-jp/ios-perftracking
|
07e469055707b4b332770c2c008846df589ec16d
|
[
"MIT"
] | null | null | null |
RPerformanceTracking/Private/_RPTTrackingManager.h
|
donnie-jp/ios-perftracking
|
07e469055707b4b332770c2c008846df589ec16d
|
[
"MIT"
] | null | null | null |
#import <RPerformanceTracking/RPTDefines.h>
NS_ASSUME_NONNULL_BEGIN
@class _RPTConfiguration, _RPTRingBuffer, _RPTTracker, _RPTSender;
RPT_EXPORT @interface _RPTTrackingManager : NSObject
@property (nonatomic, readonly) _RPTConfiguration *configuration;
@property (nonatomic, readonly) _RPTRingBuffer *ringBuffer;
@property (nonatomic, readonly) _RPTTracker *tracker;
@property (nonatomic, readonly) _RPTSender *sender;
@property (nonatomic, copy) NSString *currentScreen;
@property (nonatomic, readonly) BOOL disableSwizzling;
+ (instancetype)sharedInstance;
- (void)startMetric:(NSString *)metric;
- (void)prolongMetric;
- (void)startMeasurement:(NSString *)measurement object:(nullable NSObject *)object;
- (void)endMeasurement:(NSString *)measurement object:(nullable NSObject *)object;
@end
NS_ASSUME_NONNULL_END
| 36.125
| 84
| 0.768166
|
[
"object"
] |
c95bdf308a6beff0b16f1218eb790e13fd336b15
| 288
|
h
|
C
|
sort_isomers/sorting_algorithms.h
|
ElenaKusevska/dGr_from_opt_and_sp_results
|
c925b0c09d553a7fcbe884e070285e019cd270ed
|
[
"MIT"
] | null | null | null |
sort_isomers/sorting_algorithms.h
|
ElenaKusevska/dGr_from_opt_and_sp_results
|
c925b0c09d553a7fcbe884e070285e019cd270ed
|
[
"MIT"
] | null | null | null |
sort_isomers/sorting_algorithms.h
|
ElenaKusevska/dGr_from_opt_and_sp_results
|
c925b0c09d553a7fcbe884e070285e019cd270ed
|
[
"MIT"
] | null | null | null |
#ifndef SORTING_ALGORITHMS_H
#define SORTING_ALGORITHMS_H
#include <vector>
#include <string>
int bubble_sort (std::vector<std::string>& isomers, \
std::vector<double>& E_opt, std::vector<double>& G_opt, \
std::vector<double>& freq, std::vector<double>& cpu_time);
#endif
| 26.181818
| 65
| 0.708333
|
[
"vector"
] |
c96470ac08f9036e2448f020abf5f874af9b36a4
| 1,420
|
h
|
C
|
include/shader.h
|
JustSlavic/gir1
|
b952bf73ffc2005d23772cb94358a178243847ac
|
[
"MIT"
] | null | null | null |
include/shader.h
|
JustSlavic/gir1
|
b952bf73ffc2005d23772cb94358a178243847ac
|
[
"MIT"
] | null | null | null |
include/shader.h
|
JustSlavic/gir1
|
b952bf73ffc2005d23772cb94358a178243847ac
|
[
"MIT"
] | null | null | null |
#ifndef GIR1_SHADER_H
#define GIR1_SHADER_H
#include <unordered_map>
#include <glm/glm.hpp>
struct Shader {
enum class Type {
Vertex,
Fragment
};
struct Uniform {
int location;
Uniform(int);
};
Shader() = default;
Shader(const Shader&) = delete;
Shader(Shader&&) = default;
~Shader();
unsigned int id = 0;
std::unordered_map<Type, std::string> sources;
std::unordered_map<std::string, Uniform> uniform_cache;
Shader& load_shader(Type type, const char *filename);
Shader& compile();
Uniform get_uniform(const char *name);
Shader& set_uniform_1i(Uniform uniform, int x);
Shader& set_uniform_1i(const char *name, int x);
Shader& set_uniform_3f(Uniform uniform, float x1, float x2, float x3);
Shader& set_uniform_3f(const char *name, float x1, float x2, float x3);
Shader& set_uniform_vec3f(Uniform uniform, const glm::vec3 &vector);
Shader& set_uniform_vec3f(const char *name, const glm::vec3 &vector);
Shader& set_uniform_4f(Uniform uniform, float x1, float x2, float x3, float x4);
Shader& set_uniform_4f(const char *name, float x1, float x2, float x3, float x4);
Shader& set_uniform_mat4f(Uniform uniform, const glm::mat4& matrix);
Shader& set_uniform_mat4f(const char *name, const glm::mat4& matrix);
Shader& bind();
static void unbind();
};
#endif // GIR1_SHADER_H
| 25.818182
| 85
| 0.674648
|
[
"vector"
] |
c96c7e24671664c2349733dbf7716cc1f42480d2
| 580
|
h
|
C
|
UM Laundry/UM_LaundryController.h
|
arichiv/um.laundry
|
b67187b5130af6d044ba4be1344d4f1c5245b0ba
|
[
"MIT"
] | 1
|
2018-05-31T03:55:56.000Z
|
2018-05-31T03:55:56.000Z
|
UM Laundry/UM_LaundryController.h
|
arichiv/um.laundry
|
b67187b5130af6d044ba4be1344d4f1c5245b0ba
|
[
"MIT"
] | null | null | null |
UM Laundry/UM_LaundryController.h
|
arichiv/um.laundry
|
b67187b5130af6d044ba4be1344d4f1c5245b0ba
|
[
"MIT"
] | null | null | null |
//
// UM_LaundryController.h
// UM Laundry
//
// Created by Ari Chivukula on 8/7/11.
// Copyright (c) 2013 Ari Chivukula. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "UM_LaundryModel.h"
@interface UM_LaundryController : UITableViewController <UIAlertViewDelegate, UIActionSheetDelegate> {
dispatch_queue_t queue;
Model* model;
Item* father;
Item* selected;
NSMutableArray* items;
MachineStatus status;
}
-(id) initWithFather:(Item*)_father;
-(void) setGroup:(UISegmentedControl *)sender;
-(void) updateDisplay;
-(void) updateData;
@end
| 21.481481
| 102
| 0.718966
|
[
"model"
] |
c96e9bd6257e249e30175e343cfdc9e728931e7e
| 16,962
|
c
|
C
|
src/likelihood.c
|
cboettig/wrightscape
|
93947673f4342266acf5af667141bd466de13b3a
|
[
"CC0-1.0"
] | null | null | null |
src/likelihood.c
|
cboettig/wrightscape
|
93947673f4342266acf5af667141bd466de13b3a
|
[
"CC0-1.0"
] | null | null | null |
src/likelihood.c
|
cboettig/wrightscape
|
93947673f4342266acf5af667141bd466de13b3a
|
[
"CC0-1.0"
] | null | null | null |
/**
* @file likelihood.c
* @brief calculate the likelihood for general multitype OU processes on a phylogenetic tree
* @author Carl Boettiger <[email protected]>
* @version 0.1
* @date 2011-04-22
*
* @section LICENSE
*
* Copyright (C)
* 2011 - Carl Boettiger
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include "likelihood.h"
/** get the last common ancestor of two nodes
* This version isn't particularly efficient, but the
* calculation can in principle be done only once for
* a given tree and stored as a matrix that is passed
* to the likelihood function.
*/
int
get_lca(int i, int j, int n_nodes, const int * ancestor,
const double * branch_length, double * sep)
{
int * ancestor_list = (int *) malloc(n_nodes * sizeof(int));
int k = 0, s = 0;
double * time_to_ancestor = (double *) calloc(n_nodes, sizeof(double));
for(k=0; k<n_nodes; k++)
{
ancestor_list[k]=0;
}
k = 0;
while(1){
ancestor_list[k] = i;
time_to_ancestor[k+1] = time_to_ancestor[k] + branch_length[i];
if(i==0) break;
i = ancestor[i];
k++;
}
while(1){
for(k=0; k<n_nodes; k++){
if(j == ancestor_list[k]){
s = j;
j = 0;
break;
}
}
if(j==0) break;
j = ancestor[j];
}
*sep = time_to_ancestor[k];
free(time_to_ancestor);
free(ancestor_list);
return s;
}
/** Simple function to compute the age of a node measured from root, forward
* Called by calc_mean and calc_var */
double
node_age(int i, const int * ancestor, const double * branch_length)
{
double time=0;
while(ancestor[i] >= 0 )
{
time += branch_length[i];
i = ancestor[i];
}
return time;
}
/**
* @brief Calculate the log normal likelihood given the vector
* of mean square differences and the variance matrix
* @param n dimension
* @param X_EX mean square differences, vector length n
* @param V variance matrix, n by n matrix (as a 1d array)
*
* @return Log likelihood
*/
double log_normal_lik(int n, double * X_EX, double * V)
{
gsl_matrix * V_inverse = gsl_matrix_alloc(n,n);
gsl_permutation * p = gsl_permutation_alloc (n);
gsl_matrix * ANS = gsl_matrix_alloc(1,n);
gsl_matrix * ANS2 = gsl_matrix_alloc(1,1);
double V_det, Xt_Vi_X;
int signum;
int is_singular; /* error checking */
gsl_matrix_view V_view = gsl_matrix_view_array(V, n, n);
gsl_matrix_view DIFF = gsl_matrix_view_array(X_EX, n, 1);
is_singular = gsl_linalg_LU_decomp (&V_view.matrix, p, &signum);
if(is_singular) return GSL_NEGINF;
gsl_linalg_LU_invert(&V_view.matrix, p, V_inverse);
V_det = gsl_linalg_LU_det(&V_view.matrix,signum);
/**@f$ -2 log L = @f$
* @f$ (X - E(X) )^T V^{-1} (X-E(X) ) + N\log(2\pi) + \log(\det V) @f$ */
/* Consider using appropriate blas optimized multiplication,
* not the general matrix-matrix method (for greater speed) */
gsl_blas_dgemm (CblasTrans, CblasNoTrans,
1.0, &DIFF.matrix, V_inverse,
0.0, ANS);
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans,
1.0, ANS, &DIFF.matrix,
0.0, ANS2);
Xt_Vi_X = gsl_matrix_get(ANS2, 0, 0);
gsl_matrix_free(ANS);
gsl_matrix_free(ANS2);
gsl_matrix_free(V_inverse);
gsl_permutation_free(p);
return -Xt_Vi_X/2. - n*log(2*M_PI)/2. - log(V_det)/2.;
}
/** Calculate the row of the gamma matrix for tip i
* @param tips a vector giving the node number of each tip (data)
* @param n_tips the number of tips (data)
* @param alpha vector of alpha parameters for each regime (data)
* @param regimes the regime of each branch (data)
* @param branch_length vector of branch lengths of each node (data)
* @param gamma_matrix the returned matrix (well, a pointer to it)
* @details in gamma_matrix[i,j], i is the node-number of the tip
* whose history we are tracking, j is any other node. Only for j's
* in the ancestry of i will gamma[i,j] have entries. Those entries
* are simply the sum of branch lengths times alpha values of that node */
void calc_gamma_matrix(const int * tips, const int n_tips,
const double * alpha, const int * regimes,
const int * ancestor, const double * branch_length,
gsl_matrix * gamma_matrix)
{
int i, j, k, rj;
double value;
for(k = 0; k < n_tips; k++)
{
i = tips[k];
j = i;
value = 0;
while( ancestor[j] >= 0)
{
rj = regimes[j];
value += branch_length[j] * alpha[rj];
j = ancestor[j];
gsl_matrix_set(gamma_matrix, i, j, value);
}
}
}
double calc_mean(int i, double Xo, const double * alpha,
const double * theta, const int * regimes,
const int * ancestor, const double * branch_length,
const gsl_matrix * gamma_matrix)
{
int tip = i; // remember the tip value
int ri;
long double omega=0;
while( ancestor[i] >= 0 )
{
ri = regimes[i];
omega += theta[ri] * (1 - exp(- alpha[ri] * branch_length[i])) *
exp( - gsl_matrix_get(gamma_matrix, tip, i) );
i = ancestor[i];
}
return Xo * exp( - gsl_matrix_get(gamma_matrix, tip, 0)) + omega;
}
/** Calculate the covariance between tips i & j
* @f[ e^{-\gamma_{i,k}} e^{-\gamma_{j_k}} \frac{\sigma_k^2}{2 \alpha_k}
* \left(e^{2\alpha_k T } - e^{2 \alpha_k (T - l)} \right) @f]
* where T is the age of the node and l the length of the branch under it
*/
double calc_var(
int i, int j, ///< nodes being compared
int lca, ///< last common ancestor, pass to avoid recalculating
const double * alpha, ///< value of alpha in each regime, length n_regimes
const double * sigma, ///< value of sigma in each regime
const int * regimes, ///< specification of the regimes (paintings), length n_nodes
const int * ancestor, ///< ancestor of the node, length n_nodes
const double * branch_length, ///< branch length ancestral to the node, length n_nodes
gsl_matrix * gamma_matrix
)
{
int k = lca; //if i=j, k=i=j
int rk;
long double omega=0;
while( ancestor[k] >= 0)
{
rk = regimes[k];
omega += gsl_pow_2( sigma[rk] ) / (2 * alpha[rk] ) *
( 1 - exp( - 2 * alpha[rk] * branch_length[k] ) ) *
exp( - gsl_matrix_get(gamma_matrix, i, k) - gsl_matrix_get(gamma_matrix, j, k));
k = ancestor[k];
}
return omega;
}
/**
* @brief Create a vector of length n tips with the identies of the tip nodes
*
* @param n_nodes Total number of nodes (internal and tips)
* @param ancestor List of ancestors
*
* @return an integer array of the tip nodes
*/
int * alloc_tips(int n_nodes, const int * ancestor){
int n_tips = (n_nodes+1)/2;
int * child = (int *) calloc(n_nodes,sizeof(int) );
int * tips = (int *) calloc(n_tips,sizeof(int) );
int i, j, k=0, empty;
for(i=0; i < n_nodes; i++){
empty = 1;
for(j=0; j < n_nodes; j++){
if(i == ancestor[j] ){
if(empty){
child[i] = j;
empty = 0;
}
}
}
if (child[i] == 0){
tips[k] = i;
k++;
}
}
free(child);
return tips;
}
/**
* @brief Calculate the likelihood of a multitype OU model (callable from R)
*
* @param Xo Root value
* @param alpha[] selective strength in each regime
* @param theta[] optimum in each regime
* @param sigma[] diversification rate in each regime
* @param regimes[] specify the regime each node belongs to
* @param ancestor[] specify the ancestor of each node (thus the topology)
* @param branch_length[] length of the branch below each node
* @param traits[] trait value observed at each node (tips only)
* @param n_nodes total number of nodes
* @param lca_matrix[] a n_nodes^2 matrix of least common ancestor for each pair,
* computed by the lca_calc function once per tree for efficiency.
* @param llik the likelihood returned by the function
*
* @details
* Calculates the expected value and the convariance matrix for each tip.
*
*/
void calc_lik (const double *Xo, const double alpha[], const double theta[],
const double sigma[], const int regimes[], const int ancestor[],
const double branch_length[], const double traits[],
int *n_nodes, int lca_matrix[], double *llik)
{
gsl_set_error_handler_off (); /* Comment out this line to assist debugging */
/* Declare variables */
int i, j, ki, kj;
int n_tips = (*n_nodes+1)/2;
double *X_EX = (double *) malloc(n_tips * sizeof(double));
double *V = (double *) malloc(n_tips * n_tips * sizeof(double));
gsl_matrix * gamma_matrix = gsl_matrix_calloc(*n_nodes,*n_nodes);
double mean;
int lca;
int * tips = alloc_tips(*n_nodes, ancestor);
/* Calculate the gamma matrix */
calc_gamma_matrix(tips, n_tips, alpha, regimes, ancestor,
branch_length, gamma_matrix);
/* Unit test -- tips have the same age */
// for(i = 0; i < n_tips; i++)
// printf("%lf\n", node_age(tips[i], ancestor, branch_length));
/* Unit test -- gamma of root values */
// for(i = 0; i < n_tips; i++)
// printf("%lf\n", gsl_matrix_get(gamma_matrix, tips[i], 0));
// printf("%lf\n", gsl_matrix_get(gamma_matrix, tips[i], ancestor[tips[i]]));
/* Unit test -- variance on a single tip with a middle node (tree = *-*-*) */
/*
double salpha[] = {.1};
double ssigma[] = {2};
int sregimes[] = {0, 0, 0, 0};
int sancestor[] = {-1, 0, 1, 1};
double sbranch_length[] = {0, 5, 5, 5};
int s_tips[] = {2,3};
gsl_matrix * sgamma_matrix = gsl_matrix_calloc(4,4);
calc_gamma_matrix(s_tips, 2, salpha, sregimes, sancestor,
sbranch_length, sgamma_matrix);
printf("var: %lf\n", calc_var(2, 3, 1, salpha, ssigma, sregimes, sancestor, sbranch_length, sgamma_matrix));
printf("analytic %lf\n", gsl_pow_2(ssigma[0])/(2*salpha[0]) * (1 - exp(-2*salpha[0]*5)) *exp(-2*salpha[0]*5) );
printf("var: %lf\n", calc_var(2, 2, 2, salpha, ssigma, sregimes, sancestor, sbranch_length, sgamma_matrix));
printf("analytic %lf\n", gsl_pow_2(ssigma[0])/(2*salpha[0]) * (1 - exp(-2*salpha[0]*10)));
*/
/*
for(i=0;i<4;i++){
printf("\n");
for(j=0;j<4;j++){
printf("%g\t ", gsl_matrix_get(sgamma_matrix, i, j));
}
}
printf("\n\n");
*/
/*
for(i=0;i<*n_nodes;i++){
printf("\n");
for(j=0;j<15;j++){
printf("%g\t ", gsl_matrix_get(gamma_matrix, i, j));
}
}
printf("\n\n");
*/
/* Calculate the mean square differences */
for(i = 0; i < n_tips; i++){
ki = tips[i];
mean = calc_mean(ki, *Xo, alpha, theta, regimes, ancestor,
branch_length, gamma_matrix);
X_EX[i] = traits[ki] - mean;
//printf("%lf\n", mean);
}
/* Calculate the variances */
for(i=0; i < n_tips; i++){
ki = tips[i];
for(j=0; j < n_tips; j++){
kj = tips[j];
/* Identify which node is last common ancestor of the tips*/
lca = lca_matrix[ki * *n_nodes + kj];
/* get the covariance between all possible pairs of tips */
V[n_tips*i+j] = calc_var(ki, kj, lca, alpha, sigma, regimes,
ancestor, branch_length, gamma_matrix);
//if(ki==kj) printf("%g, %d, %d\n", V[n_tips*i+j], ki, lca);
}
}
*llik = log_normal_lik(n_tips, X_EX, V);
gsl_matrix_free(gamma_matrix);
free(X_EX);
free(V);
free(tips);
}
/**
* @brief Simulate by drawing random numbers
* from the appropriate multivariate normal
*
* @param rng a gsl random number gerneator
* @param mytree the tree and parameters
*/
void simulate (const gsl_rng * rng, tree * mytree)
{
/* Allocate memory */
int i,j,ki, kj;
int n_tips = (mytree->n_nodes+1)/2;
gsl_vector * EX = gsl_vector_alloc(n_tips);
gsl_matrix * V = gsl_matrix_alloc(n_tips,n_tips);
gsl_vector * simdata = gsl_vector_alloc(n_tips);
gsl_matrix * gamma_matrix = gsl_matrix_alloc(mytree->n_nodes,mytree->n_nodes);
int * tips = alloc_tips(mytree->n_nodes, mytree->ancestor);
int lca;
/* Calculate the gamma matrix */
calc_gamma_matrix(tips, n_tips, mytree->alpha, mytree->regimes,
mytree->ancestor, mytree->branch_length,
gamma_matrix);
/* Calculate means */
for(i = 0; i < n_tips; i++){
ki = tips[i];
gsl_vector_set(EX, i,
calc_mean(ki, *(mytree->Xo), mytree->alpha,
mytree->theta, mytree->regimes,
mytree->ancestor, mytree->branch_length,
gamma_matrix));
}
/* Calculate Variances */
for(i=0; i < n_tips; i++){
ki = tips[i];
for(j=0; j< n_tips; j++){
kj = tips[j];
lca = mytree->lca_matrix[ki * mytree->n_nodes + kj];
gsl_matrix_set( V, i, j,
calc_var(ki,kj,lca, mytree->alpha,
mytree->sigma, mytree->regimes,
mytree->ancestor, mytree->branch_length,
gamma_matrix));
}
}
/* Calculate simulated data as multivariate normal random numbers */
mvn(rng, EX, V, simdata);
/* Write that data to the tip states */
for(i=0; i< n_tips; i++){
ki = tips[i];
mytree->traits[ki] = gsl_vector_get(simdata,i);
}
/* Clean up */
gsl_vector_free(EX);
gsl_matrix_free(V);
gsl_vector_free(simdata);
gsl_matrix_free(gamma_matrix);
free(tips);
}
void unit_tests (const double *Xo, const double alpha[], const double theta[],
const double sigma[], const int regimes[], const int ancestor[],
const double branch_length[], const double traits[],
int *n_nodes, int lca_matrix[], double *llik)
{
gsl_set_error_handler_off (); /* Comment out this line to assist debugging */
/* Declare variables */
int i, j, ki, kj;
int n_tips = (*n_nodes+1)/2;
double *X_EX = (double *) malloc(n_tips * sizeof(double));
double *V = (double *) malloc(n_tips * n_tips * sizeof(double));
gsl_matrix * gamma_matrix = gsl_matrix_calloc(*n_nodes,*n_nodes);
double mean;
int lca;
int * tips = alloc_tips(*n_nodes, ancestor);
/* Calculate the gamma matrix */
calc_gamma_matrix(tips, n_tips, alpha, regimes, ancestor,
branch_length, gamma_matrix);
/* Unit test -- tips have the same age */
for(i = 0; i < n_tips; i++)
printf("%lf\n", node_age(tips[i], ancestor, branch_length));
/* Unit test -- gamma of root values */
for(i = 0; i < n_tips; i++)
printf("%lf\n", gsl_matrix_get(gamma_matrix, tips[i], 0));
printf("%lf\n", gsl_matrix_get(gamma_matrix, tips[i], ancestor[tips[i]]));
/* Unit test -- variance on a single tip with a middle node (tree = *-*-*) */
double salpha[] = {.1};
double ssigma[] = {2};
int sregimes[] = {0, 0, 0, 0};
int sancestor[] = {-1, 0, 1, 1};
double sbranch_length[] = {0, 5, 5, 5};
int s_tips[] = {2,3};
gsl_matrix * sgamma_matrix = gsl_matrix_calloc(4,4);
calc_gamma_matrix(s_tips, 2, salpha, sregimes, sancestor,
sbranch_length, sgamma_matrix);
printf("var: %lf\n", calc_var(2, 3, 1, salpha, ssigma, sregimes, sancestor, sbranch_length, sgamma_matrix));
printf("analytic %lf\n", gsl_pow_2(ssigma[0])/(2*salpha[0]) * (1 - exp(-2*salpha[0]*5)) *exp(-2*salpha[0]*5) );
printf("var: %lf\n", calc_var(2, 2, 2, salpha, ssigma, sregimes, sancestor, sbranch_length, sgamma_matrix));
printf("analytic %lf\n", gsl_pow_2(ssigma[0])/(2*salpha[0]) * (1 - exp(-2*salpha[0]*10)));
for(i=0;i<4;i++){
printf("\n");
for(j=0;j<4;j++){
printf("%g\t ", gsl_matrix_get(sgamma_matrix, i, j));
}
}
printf("\n\n");
/* Calculate the mean square differences */
for(i = 0; i < n_tips; i++){
ki = tips[i];
mean = calc_mean(ki, *Xo, alpha, theta, regimes, ancestor,
branch_length, gamma_matrix);
X_EX[i] = traits[ki] - mean;
printf("%lf\n", mean);
}
/* Calculate the variances */
for(i=0; i < n_tips; i++){
ki = tips[i];
for(j=0; j < n_tips; j++){
kj = tips[j];
/* Identify which node is last common ancestor of the tips*/
lca = lca_matrix[ki * *n_nodes + kj];
/* get the covariance between all possible pairs of tips */
V[n_tips*i+j] = calc_var(ki, kj, lca, alpha, sigma, regimes,
ancestor, branch_length, gamma_matrix);
if(ki==kj) printf("%g, %d, %d\n", V[n_tips*i+j], ki, lca);
}
}
*llik = log_normal_lik(n_tips, X_EX, V);
gsl_matrix_free(gamma_matrix);
free(X_EX);
free(V);
free(tips);
}
| 31.645522
| 111
| 0.622391
|
[
"vector",
"model"
] |
c96ee04f80c8b097751c7ac1514a1e8e25451cb1
| 2,573
|
h
|
C
|
src/p3d/algebra/Matrix3.h
|
arnaudcoj/m1s2_m3ds_tp10_obb
|
68985c433bd7e54b245a6baab259b64d91c028d2
|
[
"MIT"
] | 1
|
2019-12-10T01:59:15.000Z
|
2019-12-10T01:59:15.000Z
|
src/p3d/algebra/Matrix3.h
|
arnaudcoj/m1s2_m3ds_tp10_obb
|
68985c433bd7e54b245a6baab259b64d91c028d2
|
[
"MIT"
] | null | null | null |
src/p3d/algebra/Matrix3.h
|
arnaudcoj/m1s2_m3ds_tp10_obb
|
68985c433bd7e54b245a6baab259b64d91c028d2
|
[
"MIT"
] | 2
|
2020-02-27T18:13:54.000Z
|
2022-02-24T14:37:22.000Z
|
/*
* Matrix3.h
*
* Created on: 14 mars 2011
* @author: aubert
*/
#ifndef MATRIX3_H_
#define MATRIX3_H_
/*!
*
* @file
*
* @brief 3x3 Matrix Class
* @author F. Aubert
*
*/
namespace p3d {
class Matrix4;
class Vector3;
class Matrix3 {
double _c[9];
static float _cf[9]; ///< for casting in float
public:
/// destructor
virtual ~Matrix3();
/// copy
Matrix3 &operator=(const Matrix3 &m);
/// constructor
Matrix3();
/// converts a 4x4 matrix to 3x3 : it is the "top-left" sub matrix
explicit Matrix3(const Matrix4 &m);
/// returns the pointer to data as float (!caution : the pointer is shared between all Matrix3)
const float *fv() const ;
/// returns the pointer to data (column-major order)
inline const double *dv() const {return _c;}
/// returns the i-th element (column-major order)
inline const double &operator()(unsigned int i) const {return _c[i];}
/// returns the i-th element (column-major order)
inline double &operator()(unsigned int i) {return _c[i];}
/// transforms p by this matrix
void transform(p3d::Vector3 *p) const;
Matrix3 &invert(const Matrix3 &a);
Matrix3 &invert();
Matrix3 inverse() const;
Matrix3 &transpose();
void subScaleColumn(int i, int j, double s);
void setIdentity();
void set(const double *v);
/// returns the i-th coefficient of this (Matrix3 is column-major)
inline double &operator()(int i) {return _c[i];}
/// returns the i-th coefficient of this (Matrix3 is column-major)
inline const double &operator()(int i) const {return _c[i];}
/// returns the i-th coefficient of this (Matrix3 is column-major)
const double &at(int i) const {return _c[i];}
/// returns the i-th coefficient of this (Matrix3 is column-major)
double &at(int i) {return _c[i];}
/// returns the element at row i and at column j
inline double &operator()(int i,int j) {return _c[i+(j*3)];}
/// returns the element at row i and at column j
inline const double &at(int i,int j) const {return _c[i+(j*2)];}
/// returns the element at row i and at column j
inline double &at(int i,int j) {return _c[i+(j*2)];}
/// returns the element at row i and at column j
inline const double &operator()(int i,int j) const {return _c[i+(j*3)];}
void swapColumn(int i, int j);
void scaleColumn(int i, double k);
};
/// returns the transformation of p by the matrix m. Ex : p2=m*p1
Vector3 operator*(const p3d::Matrix3 &m,const p3d::Vector3 &p);
}
#endif /* MATRIX3_H_ */
| 27.666667
| 98
| 0.643218
|
[
"transform"
] |
c9714789367c2f252fb3a63b2dedaa6364715b06
| 8,594
|
h
|
C
|
public/gcsdk/sqlaccess/schemafull.h
|
DannyParker0001/Kisak-Strike
|
99ed85927336fe3aff2efd9b9382b2b32eb1d05d
|
[
"Unlicense"
] | 252
|
2020-12-16T15:34:43.000Z
|
2022-03-31T23:21:37.000Z
|
tf2_src/public/gcsdk/sqlaccess/schemafull.h
|
Counter2828/TeamFortress2
|
1b81dded673d49adebf4d0958e52236ecc28a956
|
[
"MIT"
] | 23
|
2020-12-20T18:02:54.000Z
|
2022-03-28T16:58:32.000Z
|
tf2_src/public/gcsdk/sqlaccess/schemafull.h
|
Counter2828/TeamFortress2
|
1b81dded673d49adebf4d0958e52236ecc28a956
|
[
"MIT"
] | 42
|
2020-12-19T04:32:33.000Z
|
2022-03-30T06:00:28.000Z
|
//========= Copyright Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
// $NoKeywords: $
//=============================================================================
#ifndef GCSCHEMAFULL_H
#define GCSCHEMAFULL_H
#ifdef _WIN32
#pragma once
#endif
namespace GCSDK
{
//-----------------------------------------------------------------------------
// SerSchemaFull
// This defines the binary serialization format for a CSchemaFull
//-----------------------------------------------------------------------------
struct SerSchemaFull_t
{
enum EVersion
{
k_ECurrentVersion = 1,
};
int32 m_nVersion; // version of serialization format
int32 m_cSchema; // # of schema we contain
};
//-----------------------------------------------------------------------------
// CFTSCatalogInfo
// information about a full text search catalog object in our schema
//-----------------------------------------------------------------------------
class CFTSCatalogInfo
{
public:
enum ESchemaCatalog m_eCatalog;
const char *m_pstrName;
int m_nFileGroup;
CFTSCatalogInfo()
: m_pstrName( NULL ),
m_eCatalog( k_ESchemaCatalogInvalid )
{
}
~CFTSCatalogInfo()
{
free( (void*) m_pstrName);
}
CFTSCatalogInfo( const CFTSCatalogInfo &refOther )
{
m_eCatalog = refOther.m_eCatalog;
m_nFileGroup = refOther.m_nFileGroup;
if ( refOther.m_pstrName != NULL )
m_pstrName = strdup( refOther.m_pstrName );
else
m_pstrName = NULL;
}
#ifdef DBGFLAG_VALIDATE
void Validate( CValidator &validator, const char *pchName ) // Validate our internal structures
{
validator.ClaimMemory( (void *) m_pstrName );
}
#endif
};
//-----------------------------------------------------------------------------
// SchemaFull conversion instructions
// These specify various operations that can be performed when converting
// from one SchemaFull to another.
//-----------------------------------------------------------------------------
struct DeleteTable_t
{
char m_rgchTableName[k_cSQLObjectNameMax]; // Name of the table to delete
};
struct RenameTable_t
{
char m_rgchTableNameOld[k_cSQLObjectNameMax]; // Rename a table with this name
int m_iTableDst; // to this table
};
enum ETriggerType
{
k_ETriggerType_Invalid,
k_ETriggerType_After_Insert,
k_ETriggerType_InsteadOf_Insert,
k_ETriggerType_After_Delete,
k_ETriggerType_InsteadOf_Delete,
k_ETriggerType_After_Update,
k_ETriggerType_InsteadOf_Update,
};
class CTriggerInfo
{
public:
CTriggerInfo()
: m_eTriggerType( k_ETriggerType_Invalid ),
m_bMatched( false )
{
}
// are these equal for identity?
bool operator==( const CTriggerInfo& refOther ) const
{
if ( 0 != Q_stricmp( m_szTriggerTableName, refOther.m_szTriggerTableName ) )
return false;
if ( 0 != Q_stricmp( m_szTriggerName, refOther.m_szTriggerName ) )
return false;
// they're equal!
return true;
}
// if the identity is the same, this will tell if text or type differs
bool IsDifferent( const CTriggerInfo& refOther ) const
{
if ( m_eTriggerType != refOther.m_eTriggerType )
return false;
if ( m_strText != refOther.m_strText )
return false;
// they're equal!
return true;
}
const char* GetTriggerTypeString() const
{
const char *pstrSQL = "~~ unknown trigger type syntax error ~~";
switch ( m_eTriggerType )
{
case k_ETriggerType_After_Insert:
pstrSQL = "AFTER INSERT";
break;
case k_ETriggerType_InsteadOf_Insert:
pstrSQL = "INSTEAD OF INSERT";
break;
case k_ETriggerType_After_Delete:
pstrSQL = "AFTER DELETE";
break;
case k_ETriggerType_InsteadOf_Delete:
pstrSQL = "INSTEAD OF DELETE";
break;
case k_ETriggerType_After_Update:
pstrSQL = "AFTER UPDATE";
break;
case k_ETriggerType_InsteadOf_Update:
pstrSQL = "INSTEAD OF UPDATE";
break;
default:
case k_ETriggerType_Invalid:
/* initialize is fine, thanks */
break;
}
return pstrSQL;
}
bool m_bMatched; // got matched during schema convert
ETriggerType m_eTriggerType; // what kinda trigger is this?
ESchemaCatalog m_eSchemaCatalog; // catalog where this trigger lives
char m_szTriggerName[k_cSQLObjectNameMax]; // name of the trigger object
char m_szTriggerTableName[k_cSQLObjectNameMax]; // name of the table hosting this trigger
CUtlString m_strText; // text of the trigger
// Validate our internal structures
#ifdef DBGFLAG_VALIDATE
void Validate( CValidator &validator, const char *pchName )
{
m_strText.Validate( validator, pchName );
}
#endif
};
//-----------------------------------------------------------------------------
// CSchemaFull
// This defines the schema for the entire data store. It's essentially just
// a collection of CSchema, which define the schema for individual tables.
//-----------------------------------------------------------------------------
class CSchemaFull
{
public:
// Constructors & destructors
CSchemaFull();
~CSchemaFull();
void Uninit();
// add a new schema and return its pointer.
CSchema *AddNewSchema( int iTable, ESchemaCatalog eCatalog, const char *pstrName )
{
CSchema &refNewSchema = m_VecSchema[m_VecSchema.AddToTail()];
refNewSchema.SetName( pstrName );
refNewSchema.SetESchemaCatalog( eCatalog );
SetITable( &refNewSchema, iTable );
return &refNewSchema;
}
// Accessors
int GetCSchema() const { return m_VecSchema.Count(); }
CSchema &GetSchema( int iSchema ) { return m_VecSchema[iSchema]; }
uint32 GetCheckSum() const { return m_unCheckSum; }
const char *GetDefaultSchemaNameForCatalog( ESchemaCatalog eCatalog );
uint8 *GetPubScratchBuffer( );
uint32 GetCubScratchBuffer() const { return m_cubScratchBuffer; }
// Makes sure that a generated intrinsic schema is consistent
void CheckSchema( CSchema *pSchema, int cField, uint32 cubRecord );
// Find the table with a given name (returns -1 if not found)
int FindITable( const char *pchName );
const char *PchTableFromITable( int iTable );
// Helper functions for recording schema conversion operations
void AddDeleteTable( const char *pchTableName );
void AddRenameTable( const char *pchTableNameOld, const char *pchTableNameNew );
void AddDeleteField( const char *pchTableName, const char *pchFieldName );
void AddRenameField( const char *pchTableName, const char *pchFieldNameOld, const char *pchFieldNameNew );
void AddAlterField( const char *pchTableName, const char *pchFieldNameOld, const char *pchFieldNameNew, PfnAlterField_t pfnAlterField );
// declare that a trigger is on a table
void AddTrigger( ESchemaCatalog eCatalog, const char *pchTableName, const char *pchTriggerName, ETriggerType eTriggerType, const char *pchTriggerText );
// Schema conversion helper: figure out what table to map a table from a different schema to
bool BCanConvertTable( const char *pchTableSrc, int *piTableDst );
// full text catalogs
void AddFullTextCatalog( enum ESchemaCatalog eCatalog, const char *pstrCatalogName, int nFileGroup );
int GetFTSCatalogByName( enum ESchemaCatalog eCatalog, const char *pstrCatalogName );
void EnableFTS( enum ESchemaCatalog eCatalog );
int GetCFTSCatalogs() const { return m_vecFTSCatalogs.Count(); }
const CFTSCatalogInfo & GetFTSCatalogInfo( int nIndex ) const { return m_vecFTSCatalogs[nIndex]; }
const CUtlVector< CTriggerInfo> & GetTriggerInfos( ) const { return m_VecTriggers; }
// is the given schema catalog FTS enabled?
bool GetFTSEnabled( enum ESchemaCatalog eCatalog );
void Validate( CValidator &validator, const char *pchName ); // Validate our internal structures
// sets tableID on CSchema, checking that it is not a duplicate
void SetITable( CSchema* pSchema, int iTable );
void FinishInit(); // Recalculates some internal fields
private:
CUtlConstString m_strDefaultSchemaName;
CUtlVector< CSchema > m_VecSchema; // Schema for tables in all catalogs
CUtlVector< CTriggerInfo > m_VecTriggers; // list of triggers in all catalogs
// which schema catalogs have FTS enabled?
CUtlMap< ESchemaCatalog, bool > m_mapFTSEnabled;
// list of catalogs; each is marked with the schema where it lives.
CUtlVector< CFTSCatalogInfo > m_vecFTSCatalogs;
uint32 m_unCheckSum; // A simple checksum of our contents
// SchemaFull conversion instructions
CUtlVector<DeleteTable_t> m_VecDeleteTable;
CUtlVector<RenameTable_t> m_VecRenameTable;
uint8 *m_pubScratchBuffer; // Big enough to hold any record or sparse record in this schemafull
uint32 m_cubScratchBuffer; // Size of the scratch buffer
};
extern CSchemaFull & GSchemaFull();
} // namespace GCSDK
#endif // GCSCHEMAFULL_H
| 30.154386
| 153
| 0.688155
|
[
"object"
] |
c971f847ce75885617a07cad112e45fed7bf337e
| 1,774
|
h
|
C
|
src/engine/entity/include/halley/entity/prefab.h
|
mjopenglsdl/halley
|
68b4bd0845569fa2bafaa72bef3926795f0a6d9b
|
[
"Apache-2.0"
] | null | null | null |
src/engine/entity/include/halley/entity/prefab.h
|
mjopenglsdl/halley
|
68b4bd0845569fa2bafaa72bef3926795f0a6d9b
|
[
"Apache-2.0"
] | null | null | null |
src/engine/entity/include/halley/entity/prefab.h
|
mjopenglsdl/halley
|
68b4bd0845569fa2bafaa72bef3926795f0a6d9b
|
[
"Apache-2.0"
] | null | null | null |
#pragma once
#include "halley/file_formats/config_file.h"
#include "entity_data_delta.h"
namespace Halley {
class Prefab : public Resource {
public:
static std::unique_ptr<Prefab> loadResource(ResourceLoader& loader);
constexpr static AssetType getAssetType() { return AssetType::Prefab; }
void reload(Resource&& resource) override;
void makeDefault();
void serialize(Serializer& s) const;
void deserialize(Deserializer& s);
void parseYAML(gsl::span<const gsl::byte> yaml);
String toYAML() const;
virtual bool isScene() const;
const EntityData& getEntityData() const;
const std::vector<EntityData>& getEntityDatas() const;
std::map<UUID, const EntityData*> getEntityDataMap() const;
const std::map<UUID, EntityDataDelta>& getEntitiesModified() const;
const std::set<UUID>& getEntitiesAdded() const;
const std::set<UUID>& getEntitiesRemoved() const;
const ConfigNode& getRoot() const;
ConfigNode& getRoot();
protected:
struct Deltas {
std::map<UUID, EntityDataDelta> entitiesModified;
std::set<UUID> entitiesAdded;
std::set<UUID> entitiesRemoved;
};
void loadEntityData();
virtual std::vector<EntityData> makeEntityDatas() const;
Deltas generatePrefabDeltas(const Prefab& newPrefab) const;
std::vector<EntityData> entityDatas;
ConfigFile config;
Deltas deltas;
};
class Scene final : public Prefab {
public:
static std::unique_ptr<Scene> loadResource(ResourceLoader& loader);
constexpr static AssetType getAssetType() { return AssetType::Scene; }
bool isScene() const override;
void reload(Resource&& resource) override;
void makeDefault();
protected:
std::vector<EntityData> makeEntityDatas() const override;
Deltas generateSceneDeltas(const Scene& newScene) const;
};
}
| 26.878788
| 73
| 0.740135
|
[
"vector"
] |
c995ec37e14c74f64c71fd6c533e3de8e3780a18
| 827
|
h
|
C
|
Aplicacion Movil/generated/bundles/login-transition/build/Android/Preview/app/src/main/include/Fuse.Motion.Simulatio-a4ba96c1.h
|
marferfer/SpinOff-LoL
|
a9dba8ac9dd476ec1ef94712d9a8e76d3b45aca8
|
[
"Apache-2.0"
] | null | null | null |
Aplicacion Movil/generated/bundles/login-transition/build/Android/Preview/app/src/main/include/Fuse.Motion.Simulatio-a4ba96c1.h
|
marferfer/SpinOff-LoL
|
a9dba8ac9dd476ec1ef94712d9a8e76d3b45aca8
|
[
"Apache-2.0"
] | null | null | null |
Aplicacion Movil/generated/bundles/login-transition/build/Android/Preview/app/src/main/include/Fuse.Motion.Simulatio-a4ba96c1.h
|
marferfer/SpinOff-LoL
|
a9dba8ac9dd476ec1ef94712d9a8e76d3b45aca8
|
[
"Apache-2.0"
] | null | null | null |
// This file was generated based on C:/Users/JuanJose/AppData/Local/Fusetools/Packages/Fuse.Motion/1.9.0/Simulation/Simulation.uno.
// WARNING: Changes might be lost if you edit this file directly.
#pragma once
#include <Uno.Object.h>
namespace g{
namespace Fuse{
namespace Motion{
namespace Simulation{
// internal abstract interface Simulation :5
// {
uInterfaceType* Simulation_typeof();
struct Simulation
{
void(*fp_get_IsStatic)(uObject*, bool*);
void(*fp_Update)(uObject*, double*);
static bool IsStatic(const uInterface& __this) { bool __retval; return __this.VTable<Simulation>()->fp_get_IsStatic(__this, &__retval), __retval; }
static void Update(const uInterface& __this, double elapsed) { __this.VTable<Simulation>()->fp_Update(__this, &elapsed); }
};
// }
}}}} // ::g::Fuse::Motion::Simulation
| 31.807692
| 151
| 0.737606
|
[
"object"
] |
c99def729d6f5f9eabab806561c5454e365f55ee
| 3,194
|
h
|
C
|
pluginsdk/yara/yara/object.h
|
torusrxxx/x64dbgpatchexporter
|
d6354a74d67178af59203efbebb747b79eafa93e
|
[
"BSD-2-Clause"
] | 22
|
2016-07-22T09:38:37.000Z
|
2022-02-25T08:27:39.000Z
|
pluginsdk/yara/yara/object.h
|
torusrxxx/x64dbgpatchexporter
|
d6354a74d67178af59203efbebb747b79eafa93e
|
[
"BSD-2-Clause"
] | 3
|
2016-11-08T23:53:49.000Z
|
2017-05-08T11:49:58.000Z
|
pluginsdk/yara/yara/object.h
|
torusrxxx/x64dbgpatchexporter
|
d6354a74d67178af59203efbebb747b79eafa93e
|
[
"BSD-2-Clause"
] | 12
|
2016-07-29T21:13:31.000Z
|
2021-07-27T20:05:06.000Z
|
/*
Copyright (c) 2014. The YARA Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef YR_OBJECT_H
#define YR_OBJECT_H
#ifdef _MSC_VER
#include <float.h>
#define isnan _isnan
#ifndef INFINITY
#define INFINITY (DBL_MAX + DBL_MAX)
#endif
#ifndef NAN
#define NAN (INFINITY-INFINITY)
#endif
#endif
#include "types.h"
#define OBJECT_CREATE 1
#define OBJECT_TYPE_INTEGER 1
#define OBJECT_TYPE_STRING 2
#define OBJECT_TYPE_STRUCTURE 3
#define OBJECT_TYPE_ARRAY 4
#define OBJECT_TYPE_FUNCTION 5
#define OBJECT_TYPE_REGEXP 6
#define OBJECT_TYPE_DICTIONARY 7
#define OBJECT_TYPE_FLOAT 8
int yr_object_create(
int8_t type,
const char* identifier,
YR_OBJECT* parent,
YR_OBJECT** object);
int yr_object_function_create(
const char* identifier,
const char* arguments_fmt,
const char* return_fmt,
YR_MODULE_FUNC func,
YR_OBJECT* parent,
YR_OBJECT** function);
int yr_object_from_external_variable(
YR_EXTERNAL_VARIABLE* external,
YR_OBJECT** object);
void yr_object_destroy(
YR_OBJECT* object);
YR_OBJECT* yr_object_lookup_field(
YR_OBJECT* object,
const char* field_name);
YR_OBJECT* yr_object_lookup(
YR_OBJECT* root,
int flags,
const char* pattern,
...);
int yr_object_has_undefined_value(
YR_OBJECT* object,
const char* field,
...);
int64_t yr_object_get_integer(
YR_OBJECT* object,
const char* field,
...);
SIZED_STRING* yr_object_get_string(
YR_OBJECT* object,
const char* field,
...);
int yr_object_set_integer(
int64_t value,
YR_OBJECT* object,
const char* field,
...);
int yr_object_set_float(
double value,
YR_OBJECT* object,
const char* field,
...);
int yr_object_set_string(
const char* value,
size_t len,
YR_OBJECT* object,
const char* field,
...);
YR_OBJECT* yr_object_array_get_item(
YR_OBJECT* object,
int flags,
int index);
int yr_object_array_set_item(
YR_OBJECT* object,
YR_OBJECT* item,
int index);
YR_OBJECT* yr_object_dict_get_item(
YR_OBJECT* object,
int flags,
const char* key);
int yr_object_dict_set_item(
YR_OBJECT* object,
YR_OBJECT* item,
const char* key);
int yr_object_structure_set_member(
YR_OBJECT* object,
YR_OBJECT* member);
YR_OBJECT* yr_object_get_root(
YR_OBJECT* object);
YR_API void yr_object_print_data(
YR_OBJECT* object,
int indent,
int print_identifier);
#endif
| 19.240964
| 73
| 0.67345
|
[
"object"
] |
c9a0f87492494c29dd3e3a2cfcdc34b7d971d926
| 12,138
|
h
|
C
|
includes/Vector.h
|
shreeviknesh/DS
|
ef804bd3e86a56b365676fa585bc9e88ff5664aa
|
[
"MIT"
] | 1
|
2020-10-20T07:42:50.000Z
|
2020-10-20T07:42:50.000Z
|
includes/Vector.h
|
shreeviknesh/DS
|
ef804bd3e86a56b365676fa585bc9e88ff5664aa
|
[
"MIT"
] | 17
|
2020-05-03T14:57:27.000Z
|
2020-05-25T14:18:53.000Z
|
includes/Vector.h
|
shreeviknesh/DS
|
ef804bd3e86a56b365676fa585bc9e88ff5664aa
|
[
"MIT"
] | null | null | null |
/*
* This file is part of the DS Library (https://github.com/shreeviknesh/DS).
*
* MIT License
*
* Copyright (c) 2020 Shreeviknesh
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef DS_VECTOR_H
#define DS_VECTOR_H
#include <initializer_list>
#include <stdexcept>
#include <memory>
template<typename Type>
class Vector
{
public:
Vector();
Vector(size_t size);
Vector(size_t size, Type* values);
Vector(std::initializer_list<Type> values);
Vector(const Vector<Type>& vector);
~Vector();
Vector<Type>& operator=(const Vector<Type>& vector);
Type& get(size_t pos) const;
Type& operator[](size_t pos) const { return get(pos); }
Type at(size_t pos) const { return get(pos); }
size_t size() const noexcept { return m_last + 1; }
size_t max_size() const noexcept { return m_size; }
bool empty() const noexcept { return (m_size == 0); }
void reserve(size_t size);
void shrink_to_fit();
void clear() noexcept;
void insert(size_t pos, const Type& value);
void insert(size_t pos, const Vector<Type>& values);
void insert(size_t pos, std::initializer_list<Type> values);
void erase(size_t pos);
void erase(size_t pos1, size_t pos2);
void push_back(const Type& value);
void pop_back() { m_last--; }
void resize(size_t targetSize);// shrink or expand
void resize(size_t targetSize, Type value);// only expand
void swap(size_t pos1, size_t pos2);
void swap(Vector<Type>& other);
void assign(size_t count, const Type& value);
void assign(std::initializer_list<Type> values);
Type front() const;
Type back() const;
Type* data() const noexcept { return m_data.get(); }
private:
std::unique_ptr<Type[]> m_data;
size_t m_size;
size_t m_last;
};
template<typename Type>
inline Vector<Type>::Vector() : m_data(nullptr), m_size(0), m_last(-1) {}
template<typename Type>
inline Vector<Type>::Vector(size_t size) : m_size(size), m_last(-1) {
m_data = std::make_unique<Type[]>(m_size);
}
template<typename Type>
Vector<Type>::Vector(size_t size, Type* values) : m_size(size) {
m_data = std::make_unique<Type[]>(m_size);
for (m_last = 0; m_last < size; m_last++) {
m_data[m_last] = values[m_last];
}
m_last--;
}
template<typename Type>
Vector<Type>::Vector(std::initializer_list<Type> values) {
m_data = std::make_unique<Type[]>(values.size());
m_size = 0;
for (Type value : values) {
m_data[m_size++] = value;
}
m_last = m_size - 1;
}
template<typename Type>
Vector<Type>::Vector(const Vector<Type>& vector) {
m_size = vector.m_size;
m_data = std::make_unique<Type[]>(m_size);
m_last = vector.m_last;
for (size_t i = 0; i <= m_last; i++) {
m_data[i] = vector[i];
}
}
template<typename Type>
inline Vector<Type>::~Vector() {
clear();
}
template<typename Type>
Vector<Type>& Vector<Type>::operator=(const Vector<Type>& vector) {
clear();
m_size = vector.m_size;
m_data = std::make_unique<Type[]>(m_size);
m_last = vector.m_last;
for (size_t i = 0; i <= m_last; i++) {
m_data[i] = vector[i];
}
return *this;
}
template<typename Type>
Type& Vector<Type>::get(size_t pos) const {
if (pos < 0 || pos > m_last) {
#ifdef _DEBUG
throw std::out_of_range("Vector index out of bounds");
#endif// _DEBUG
}
return m_data[pos];
}
template<typename Type>
void Vector<Type>::reserve(size_t size) {
if (m_last == -1) {
m_data.reset();
m_data = std::make_unique<Type[]>(size);
return;
}
Type* values = m_data.release();
m_data = std::make_unique<Type[]>(size);
m_size = size;
for (size_t i = 0; i <= m_last; i++) {
m_data[i] = values[i];
}
}
template<typename Type>
void Vector<Type>::shrink_to_fit() {
if (m_last == m_size - 1) {
return;
}
Type* values = m_data.release();
m_data = std::make_unique<int[]>(m_last + 1);
for (size_t i = 0; i <= m_last; i++) {
m_data[i] = values[i];
}
m_size = m_last + 1;
}
template<typename Type>
inline void Vector<Type>::clear() noexcept {
if (m_size != 0) {
m_data.reset();
m_size = 0;
m_last = -1;
}
}
template<typename Type>
inline void Vector<Type>::insert(size_t pos, const Type& value) {
if (pos < 0 || pos > m_last) {
#ifdef _DEBUG
throw std::out_of_range("Vector index out of bounds");
#endif// _DEBUG
return;
}
if (m_last == m_size - 1) {
Type* values = m_data.release();
m_data = std::make_unique<Type[]>(m_size + 1);
for (size_t i = 0; i < pos; i++) {
m_data[i] = values[i];
}
for (size_t i = pos; i <= m_last; i++) {
m_data[i + 1] = values[i];
}
m_size++;
m_last = m_size - 1;
} else {
for (size_t i = m_last; i >= pos; i--) {
m_data[i + 1] = m_data[i];
}
m_last++;
}
m_data[pos] = value;
}
template<typename Type>
void Vector<Type>::insert(size_t pos, const Vector<Type>& vector) {
if (pos < 0 || pos > m_last + 1) {
#ifdef _DEBUG
throw std::out_of_range("Vector index out of range");
#endif// _DEBUG
return;
}
size_t vector_size = vector.size();
if (m_last + vector_size >= m_size) {
Type* values = m_data.release();
m_data = std::make_unique<int[]>(m_size + vector_size);
for (size_t i = 0; i < pos; i++) {
m_data[i] = values[i];
}
for (size_t i = 0; i < vector_size; i++) {
m_data[pos + i] = vector.m_data[i];
}
for (size_t i = pos; i < m_size; i++) {
m_data[i + vector_size] = values[i];
}
m_size += vector_size;
m_last = m_size - 1;
} else {
Type* data = new Type[m_last + 1];
for (size_t i = 0; i <= m_last; i++) {
data[i] = m_data.get()[i];
}
for (size_t i = pos; i <= m_last; i++) {
m_data[i + vector_size] = data[i];
}
size_t index = pos;
for (size_t i = 0; i < vector_size; i++) {
m_data[pos + i] = vector.m_data[i];
}
m_last += vector_size;
}
}
template<typename Type>
void Vector<Type>::insert(size_t pos, std::initializer_list<Type> values) {
if (pos < 0 || pos > m_last + 1) {
#ifdef _DEBUG
throw std::out_of_range("Vector index out of range");
#endif// _DEBUG
return;
}
size_t list_size = values.size();
if (m_last + list_size >= m_size) {
Type* data = m_data.release();
m_data = std::make_unique<int[]>(m_size + list_size);
size_t index = 0;
for (; index < pos; index++) {
m_data[index] = data[index];
}
for (Type value : values) {
m_data[index++] = value;
}
for (size_t i = pos; i < m_size; i++) {
m_data[index++] = data[i];
}
m_size += list_size;
m_last += list_size;
} else {
Type* data = new Type[m_last + 1];
for (size_t i = 0; i <= m_last; i++) {
data[i] = m_data.get()[i];
}
for (size_t i = pos; i <= m_last; i++) {
m_data[i + list_size] = data[i];
}
size_t index = pos;
for (Type value : values) {
m_data[index++] = value;
}
m_last += list_size;
}
}
template<typename Type>
void Vector<Type>::erase(size_t pos) {
for (size_t i = pos; i < m_last; i++) {
m_data[i] = m_data[i + 1];
}
m_last--;
}
template<typename Type>
void Vector<Type>::erase(size_t pos1, size_t pos2) {
if (pos1 < 0 || pos2 < 0 || pos1 > m_last || pos2 > m_last || pos1 > pos2) {
#ifdef _DEBUG
throw std::out_of_range("Vector index out of range");
#endif// _DEBUG
return;
}
if (pos1 == pos2) {
return erase(pos1);
}
size_t temp_last = m_last - (pos2 - pos1 + 1);
for (size_t i = pos2 + 1; i <= m_last; i++) {
m_data[pos1++] = m_data[i];
}
m_last = temp_last;
}
template<typename Type>
void Vector<Type>::assign(size_t count, const Type& value) {
clear();
m_size = count;
m_last = m_size - 1;
m_data = std::make_unique<Type[]>(m_size);
for (size_t i = 0; i < m_size; i++) {
m_data[i] = value;
}
}
template<typename Type>
void Vector<Type>::assign(std::initializer_list<Type> values) {
clear();
m_size = values.size();
m_last = -1;
m_data = std::make_unique<Type[]>(m_size);
for (Type val : values) {
m_data[++m_last] = val;
}
}
template<typename Type>
Type Vector<Type>::front() const {
if (m_size == 0) {
#ifdef _DEBUG
throw std::out_of_range("Vector index out of bounds");
#endif// _DEBUG
return Type();
}
return m_data[0];
}
template<typename Type>
Type Vector<Type>::back() const {
if (m_size == 0) {
#ifdef _DEBUG
throw std::out_of_range("Vector index out of bounds");
#endif// _DEBUG
return Type();
}
return m_data[m_last];
}
template<typename Type>
void Vector<Type>::push_back(const Type& value) {
if (m_last == m_size - 1) {
Type* values = m_data.release();
m_data = std::make_unique<Type[]>(m_size + 1);
for (size_t i = 0; i < m_size; i++) {
m_data[i] = values[i];
}
m_size++;
}
m_data[++m_last] = value;
}
template<typename Type>
void Vector<Type>::resize(size_t targetSize) {
if (targetSize < 0) {
#ifdef _DEBUG
throw std::out_of_range("Vector size cannot be negative");
#endif// _DEBUG
return;
}
Type value = Type();
if (targetSize == 0) {
clear();
return;
}
if (targetSize < size()) {
m_last = targetSize - 1;
shrink_to_fit();
return;
}
if (targetSize > m_size) {
reserve(targetSize);
}
for (size_t i = m_last + 1; i < targetSize; i++) {
m_data[i] = value;
}
m_last = targetSize - 1;
}
template<typename Type>
void Vector<Type>::resize(size_t targetSize, Type value) {
if (targetSize <= m_last) {
#ifdef _DEBUG
throw std::out_of_range("Vector cannot shrink - use resize(size_t) instead.");
#endif// _DEBUG
return;
}
if (targetSize > m_size) {
reserve(targetSize);
}
for (size_t i = m_last + 1; i < targetSize; i++) {
m_data[i] = value;
}
m_last = targetSize - 1;
}
template<typename Type>
void Vector<Type>::swap(size_t pos1, size_t pos2) {
if (pos1 < 0 || pos2 < 0 || pos1 > m_last || pos2 > m_last) {
#ifdef _DEBUG
throw std::out_of_range("Vector index out of bounds");
#endif// _DEBUG
return;
}
Type temp = m_data[pos1];
m_data[pos1] = m_data[pos2];
m_data[pos2] = temp;
}
template<typename Type>
inline void Vector<Type>::swap(Vector<Type>& other) {
m_data.swap(other.m_data);
size_t temp = m_last;
m_last = other.m_last;
other.m_last = temp;
temp = m_size;
m_size = other.m_size;
other.m_size = temp;
}
#endif
| 27.276404
| 86
| 0.587411
|
[
"vector"
] |
c9a29f67245717e2e0b303726ccbb4715556a5f2
| 3,715
|
h
|
C
|
source/iAnt_controller.h
|
BCLab-UNM/iAnt-ARGoS
|
c27fdea8eeae0e6539b31d79ce815543f343a991
|
[
"MIT"
] | 9
|
2015-04-19T18:27:09.000Z
|
2022-02-03T19:55:59.000Z
|
source/iAnt_controller.h
|
BCLab-UNM/iAnt-ARGoS
|
c27fdea8eeae0e6539b31d79ce815543f343a991
|
[
"MIT"
] | 21
|
2015-03-23T22:48:46.000Z
|
2015-11-23T22:24:08.000Z
|
source/iAnt_controller.h
|
BCLab-UNM/iAnt-ARGoS
|
c27fdea8eeae0e6539b31d79ce815543f343a991
|
[
"MIT"
] | 7
|
2015-05-17T05:20:15.000Z
|
2015-11-25T17:48:44.000Z
|
#ifndef IANT_CONTROLLER_H_
#define IANT_CONTROLLER_H_
#include <argos3/core/control_interface/ci_controller.h>
#include <argos3/plugins/robots/foot-bot/simulator/footbot_entity.h>
#include <argos3/plugins/robots/generic/control_interface/ci_positioning_sensor.h>
#include <argos3/plugins/robots/generic/control_interface/ci_differential_steering_actuator.h>
#include <argos3/plugins/robots/foot-bot/control_interface/ci_footbot_proximity_sensor.h>
#include <argos3/core/utility/math/rng.h>
#include <source/iAnt_loop_functions.h>
#include <argos3/core/simulator/loop_functions.h>
using namespace argos;
using namespace std;
class iAnt_loop_functions;
/*****
* The brain of each iAnt robot which implements the Central Place Foraging Algorithm (CPFA).
*****/
class iAnt_controller : public CCI_Controller, public CLoopFunctions {
public:
/* constructor and destructor */
iAnt_controller();
virtual ~iAnt_controller() {}
/* CCI_Controller Inherited Functions */
void Init(TConfigurationNode& node);
void ControlStep();
void Reset();
/* public helper functions */
bool IsHoldingFood();
bool IsInTheNest();
CVector2 GetPosition();
CVector3 GetStartPosition();
bool Wait();
void Wait(size_t wait_time_in_seconds);
//bool Turn();
//bool Move();
private:
/* foot-bot components: sensors and actuators */
CCI_PositioningSensor* compass;
CCI_DifferentialSteeringActuator* wheels;
CCI_FootBotProximitySensor* proximitySensor;
/* iAnt controller parameters */
size_t maxTrailSize;
Real distanceTolerance;
Real searchStepSize;
Real robotForwardSpeed;
Real robotRotationSpeed;
CRange<CRadians> angleToleranceInRadians;
/* robot internal variables & statistics */
CRandom::CRNG* RNG;
iAnt_loop_functions& loopFunctions;
CVector3 startPosition;
CVector2 targetPosition;
CVector2 targetWaypoint;
CVector2 fidelityPosition;
vector<CVector2> trailToShare;
vector<CVector2> trailToFollow;
vector<CRay3> myTrail;
bool isHoldingFood;
bool isInformed;
bool isUsingSiteFidelity;
bool isGivingUpSearch;
size_t searchTime;
size_t waitTime;
size_t collisionDelay;
size_t resourceDensity;
private:
/* iAnt CPFA state variable */
enum CPFA { DEPARTING = 0, SEARCHING = 1, RETURNING = 2 } CPFA;
/* iAnt CPFA state functions */
void Departing();
void Searching();
void Returning();
/* CPFA helper functions */
void SetHoldingFood();
void SetRandomSearchLocation();
void SetLocalResourceDensity();
void SetFidelityList(CVector2 newFidelity);
void SetFidelityList();
bool SetTargetPheromone();
/* mathematical helper functions */
Real GetExponentialDecay(Real value, Real time, Real lambda);
Real GetBound(Real x, Real min, Real max);
Real GetPoissonCDF(Real k, Real lambda);
/* navigation helper functions */
CRadians GetHeading();
CRadians GetCollisionHeading();
bool IsCollisionDetected();
void ApproachTheTarget();
void SetTargetInBounds(CVector2 newTarget);
/* graphics helper functions */
void UpdateTargetRayList();
};
#endif /* IANT_CONTROLLER_H_ */
| 30.958333
| 94
| 0.632571
|
[
"vector"
] |
f96bf492f9c8c50c81be73ee09c677dc50c50b21
| 1,245
|
c
|
C
|
src/cmd/cmaoetest.c
|
szhilkin/opensrx-7.0
|
15482b597c5f4991464150ff01383e7eb2d9675c
|
[
"BSD-2-Clause"
] | null | null | null |
src/cmd/cmaoetest.c
|
szhilkin/opensrx-7.0
|
15482b597c5f4991464150ff01383e7eb2d9675c
|
[
"BSD-2-Clause"
] | null | null | null |
src/cmd/cmaoetest.c
|
szhilkin/opensrx-7.0
|
15482b597c5f4991464150ff01383e7eb2d9675c
|
[
"BSD-2-Clause"
] | null | null | null |
#include <u.h>
#include <libc.h>
#include <ctype.h>
#include "aoe.h"
enum {
Blksize = 8192,
};
void
dotest(Aoedev *dev, int fd, uvlong len)
{
static uchar buf[Blksize];
uvlong off;
int n;
off = 0;
while (len > 0) {
n = aoeread(dev, buf, sizeof buf, off);
if (n < 0)
sysfatal("error: can't read from target: %r");
if (pwrite(fd, buf, n, off) != n)
sysfatal("error: can't write to device: %r");
off += n;
len -= n;
}
}
Aoedev *
gettarg(void)
{
int i;
/*
* Return the first CacheMotion target we find. This requires
* the shelf be configured for loopback for reliable operation.
*/
aoediscover();
for (i = 0; i < ndevs; ++i)
if (strcmp(devs[i].model, "NVWC") == 0)
return &devs[i];
return nil;
}
void
usage(void)
{
fprint(2, "usage: cmaoetest\n");
exits("usage");
}
void
main(int argc, char *argv[])
{
Aoedev *dev;
int fd;
Dir *d;
ARGBEGIN {
default:
usage();
} ARGEND
if (argc != 0)
usage();
if (aoeinit(0, nil) < 0)
sysfatal("error: can't initialize aoe");
dev = gettarg();
if (dev == nil)
sysfatal("error: can't find target");
fd = open("#S/sdS0/data", OWRITE);
if (fd < 0)
sysfatal("error: can't open: %r");
d = dirfstat(fd);
dotest(dev, fd, d->length);
exits(nil);
}
| 15.961538
| 64
| 0.59759
|
[
"model"
] |
f973b6bc155360bad3f06439490dcb996e4f44de
| 106,419
|
c
|
C
|
src/fs/xfs/xfsprogs-dev/mkfs/xfs_mkfs.c
|
fengjixuchui/hydra
|
d49e652018a007bae9d22cb59dfa086deff7ad2f
|
[
"MIT"
] | 110
|
2019-08-21T04:23:22.000Z
|
2022-01-20T16:08:36.000Z
|
src/fs/xfs/xfsprogs-dev/mkfs/xfs_mkfs.c
|
fengjixuchui/hydra
|
d49e652018a007bae9d22cb59dfa086deff7ad2f
|
[
"MIT"
] | 16
|
2019-11-19T03:46:35.000Z
|
2021-12-19T19:26:07.000Z
|
src/fs/xfs/xfsprogs-dev/mkfs/xfs_mkfs.c
|
fengjixuchui/hydra
|
d49e652018a007bae9d22cb59dfa086deff7ad2f
|
[
"MIT"
] | 24
|
2019-09-30T21:38:08.000Z
|
2021-11-22T00:22:18.000Z
|
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libfrog.h"
#include "libxfs.h"
#include <ctype.h>
#include "xfs_multidisk.h"
#include "libxcmd.h"
#define TERABYTES(count, blog) ((uint64_t)(count) << (40 - (blog)))
#define GIGABYTES(count, blog) ((uint64_t)(count) << (30 - (blog)))
#define MEGABYTES(count, blog) ((uint64_t)(count) << (20 - (blog)))
/*
* Use this macro before we have superblock and mount structure to
* convert from basic blocks to filesystem blocks.
*/
#define DTOBT(d, bl) ((xfs_rfsblock_t)((d) >> ((bl) - BBSHIFT)))
/*
* amount (in bytes) we zero at the beginning and end of the device to
* remove traces of other filesystems, raid superblocks, etc.
*/
#define WHACK_SIZE (128 * 1024)
/*
* XXX: The configured block and sector sizes are defined as global variables so
* that they don't need to be passed to getnum/cvtnum().
*/
unsigned int blocksize;
unsigned int sectorsize;
/*
* Enums for each CLI parameter type are declared first so we can calculate the
* maximum array size needed to hold them automatically.
*/
enum {
B_SIZE = 0,
B_MAX_OPTS,
};
enum {
D_AGCOUNT = 0,
D_FILE,
D_NAME,
D_SIZE,
D_SUNIT,
D_SWIDTH,
D_AGSIZE,
D_SU,
D_SW,
D_SECTSIZE,
D_NOALIGN,
D_RTINHERIT,
D_PROJINHERIT,
D_EXTSZINHERIT,
D_COWEXTSIZE,
D_MAX_OPTS,
};
enum {
I_ALIGN = 0,
I_MAXPCT,
I_PERBLOCK,
I_SIZE,
I_ATTR,
I_PROJID32BIT,
I_SPINODES,
I_MAX_OPTS,
};
enum {
L_AGNUM = 0,
L_INTERNAL,
L_SIZE,
L_VERSION,
L_SUNIT,
L_SU,
L_DEV,
L_SECTSIZE,
L_FILE,
L_NAME,
L_LAZYSBCNTR,
L_MAX_OPTS,
};
enum {
N_SIZE = 0,
N_VERSION,
N_FTYPE,
N_MAX_OPTS,
};
enum {
R_EXTSIZE = 0,
R_SIZE,
R_DEV,
R_FILE,
R_NAME,
R_NOALIGN,
R_MAX_OPTS,
};
enum {
S_SIZE = 0,
S_SECTSIZE,
S_MAX_OPTS,
};
enum {
M_CRC = 0,
M_FINOBT,
M_UUID,
M_RMAPBT,
M_REFLINK,
M_MAX_OPTS,
};
/* Just define the max options array size manually right now */
#define MAX_SUBOPTS D_MAX_OPTS
#define SUBOPT_NEEDS_VAL (-1LL)
#define MAX_CONFLICTS 8
#define LAST_CONFLICT (-1)
/*
* Table for parsing mkfs parameters.
*
* Description of the structure members follows:
*
* name MANDATORY
* Name is a single char, e.g., for '-d file', name is 'd'.
*
* subopts MANDATORY
* Subopts is a list of strings naming suboptions. In the example above,
* it would contain "file". The last entry of this list has to be NULL.
*
* subopt_params MANDATORY
* This is a list of structs tied with subopts. For each entry in subopts,
* a corresponding entry has to be defined:
*
* subopt_params struct:
* index MANDATORY
* This number, starting from zero, denotes which item in subopt_params
* it is. The index has to be the same as is the order in subopts list,
* so we can access the right item both in subopt_param and subopts.
*
* seen INTERNAL
* Do not set this flag when definning a subopt. It is used to remeber that
* this subopt was already seen, for example for conflicts detection.
*
* str_seen INTERNAL
* Do not set. It is used internally for respecification, when some options
* has to be parsed twice - at first as a string, then later as a number.
*
* convert OPTIONAL
* A flag signalling whether the user-given value can use suffixes.
* If you want to allow the use of user-friendly values like 13k, 42G,
* set it to true.
*
* is_power_2 OPTIONAL
* An optional flag for subopts where the given value has to be a power
* of two.
*
* conflicts MANDATORY
* If your subopt is in a conflict with some other option, specify it.
* Accepts the .index values of the conflicting subopts and the last
* member of this list has to be LAST_CONFLICT.
*
* minval, maxval OPTIONAL
* These options are used for automatic range check and they have to be
* always used together in pair. If you don't want to limit the max value,
* use something like UINT_MAX. If no value is given, then you must either
* supply your own validation, or refuse any value in the 'case
* X_SOMETHING' block. If you forget to define the min and max value, but
* call a standard function for validating user's value, it will cause an
* error message notifying you about this issue.
*
* (Said in another way, you can't have minval and maxval both equal
* to zero. But if one value is different: minval=0 and maxval=1,
* then it is OK.)
*
* defaultval MANDATORY
* The value used if user specifies the subopt, but no value.
* If the subopt accepts some values (-d file=[1|0]), then this
* sets what is used with simple specifying the subopt (-d file).
* A special SUBOPT_NEEDS_VAL can be used to require a user-given
* value in any case.
*/
struct opt_params {
const char name;
const char *subopts[MAX_SUBOPTS];
struct subopt_param {
int index;
bool seen;
bool str_seen;
bool convert;
bool is_power_2;
struct _conflict {
struct opt_params *opts;
int subopt;
} conflicts[MAX_CONFLICTS];
long long minval;
long long maxval;
long long defaultval;
} subopt_params[MAX_SUBOPTS];
};
/*
* The two dimensional conflict array requires some initialisations to know
* about tables that haven't yet been defined. Work around this ordering
* issue with extern definitions here.
*/
extern struct opt_params sopts;
struct opt_params bopts = {
.name = 'b',
.subopts = {
[B_SIZE] = "size",
},
.subopt_params = {
{ .index = B_SIZE,
.convert = true,
.is_power_2 = true,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = XFS_MIN_BLOCKSIZE,
.maxval = XFS_MAX_BLOCKSIZE,
.defaultval = SUBOPT_NEEDS_VAL,
},
},
};
struct opt_params dopts = {
.name = 'd',
.subopts = {
[D_AGCOUNT] = "agcount",
[D_FILE] = "file",
[D_NAME] = "name",
[D_SIZE] = "size",
[D_SUNIT] = "sunit",
[D_SWIDTH] = "swidth",
[D_AGSIZE] = "agsize",
[D_SU] = "su",
[D_SW] = "sw",
[D_SECTSIZE] = "sectsize",
[D_NOALIGN] = "noalign",
[D_RTINHERIT] = "rtinherit",
[D_PROJINHERIT] = "projinherit",
[D_EXTSZINHERIT] = "extszinherit",
[D_COWEXTSIZE] = "cowextsize",
},
.subopt_params = {
{ .index = D_AGCOUNT,
.conflicts = { { &dopts, D_AGSIZE },
{ NULL, LAST_CONFLICT } },
.minval = 1,
.maxval = XFS_MAX_AGNUMBER,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = D_FILE,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
{ .index = D_NAME,
.conflicts = { { NULL, LAST_CONFLICT } },
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = D_SIZE,
.conflicts = { { NULL, LAST_CONFLICT } },
.convert = true,
.minval = XFS_AG_MIN_BYTES,
.maxval = LLONG_MAX,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = D_SUNIT,
.conflicts = { { &dopts, D_NOALIGN },
{ &dopts, D_SU },
{ &dopts, D_SW },
{ NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = UINT_MAX,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = D_SWIDTH,
.conflicts = { { &dopts, D_NOALIGN },
{ &dopts, D_SU },
{ &dopts, D_SW },
{ NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = UINT_MAX,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = D_AGSIZE,
.conflicts = { { &dopts, D_AGCOUNT },
{ NULL, LAST_CONFLICT } },
.convert = true,
.minval = XFS_AG_MIN_BYTES,
.maxval = XFS_AG_MAX_BYTES,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = D_SU,
.conflicts = { { &dopts, D_NOALIGN },
{ &dopts, D_SUNIT },
{ &dopts, D_SWIDTH },
{ NULL, LAST_CONFLICT } },
.convert = true,
.minval = 0,
.maxval = UINT_MAX,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = D_SW,
.conflicts = { { &dopts, D_NOALIGN },
{ &dopts, D_SUNIT },
{ &dopts, D_SWIDTH },
{ NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = UINT_MAX,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = D_SECTSIZE,
.conflicts = { { &sopts, S_SIZE },
{ &sopts, S_SECTSIZE },
{ NULL, LAST_CONFLICT } },
.convert = true,
.is_power_2 = true,
.minval = XFS_MIN_SECTORSIZE,
.maxval = XFS_MAX_SECTORSIZE,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = D_NOALIGN,
.conflicts = { { &dopts, D_SU },
{ &dopts, D_SW },
{ &dopts, D_SUNIT },
{ &dopts, D_SWIDTH },
{ NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
{ .index = D_RTINHERIT,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 1,
.maxval = 1,
.defaultval = 1,
},
{ .index = D_PROJINHERIT,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = UINT_MAX,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = D_EXTSZINHERIT,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = UINT_MAX,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = D_COWEXTSIZE,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = UINT_MAX,
.defaultval = SUBOPT_NEEDS_VAL,
},
},
};
struct opt_params iopts = {
.name = 'i',
.subopts = {
[I_ALIGN] = "align",
[I_MAXPCT] = "maxpct",
[I_PERBLOCK] = "perblock",
[I_SIZE] = "size",
[I_ATTR] = "attr",
[I_PROJID32BIT] = "projid32bit",
[I_SPINODES] = "sparse",
},
.subopt_params = {
{ .index = I_ALIGN,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
{ .index = I_MAXPCT,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 100,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = I_PERBLOCK,
.conflicts = { { &iopts, I_SIZE },
{ NULL, LAST_CONFLICT } },
.is_power_2 = true,
.minval = XFS_MIN_INODE_PERBLOCK,
.maxval = XFS_MAX_BLOCKSIZE / XFS_DINODE_MIN_SIZE,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = I_SIZE,
.conflicts = { { &iopts, I_PERBLOCK },
{ NULL, LAST_CONFLICT } },
.is_power_2 = true,
.minval = XFS_DINODE_MIN_SIZE,
.maxval = XFS_DINODE_MAX_SIZE,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = I_ATTR,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 2,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = I_PROJID32BIT,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
{ .index = I_SPINODES,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
},
};
struct opt_params lopts = {
.name = 'l',
.subopts = {
[L_AGNUM] = "agnum",
[L_INTERNAL] = "internal",
[L_SIZE] = "size",
[L_VERSION] = "version",
[L_SUNIT] = "sunit",
[L_SU] = "su",
[L_DEV] = "logdev",
[L_SECTSIZE] = "sectsize",
[L_FILE] = "file",
[L_NAME] = "name",
[L_LAZYSBCNTR] = "lazy-count",
},
.subopt_params = {
{ .index = L_AGNUM,
.conflicts = { { &lopts, L_DEV },
{ NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = UINT_MAX,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = L_INTERNAL,
.conflicts = { { &lopts, L_FILE },
{ &lopts, L_DEV },
{ &lopts, L_SECTSIZE },
{ NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
{ .index = L_SIZE,
.conflicts = { { NULL, LAST_CONFLICT } },
.convert = true,
.minval = 2 * 1024 * 1024LL, /* XXX: XFS_MIN_LOG_BYTES */
.maxval = XFS_MAX_LOG_BYTES,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = L_VERSION,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 1,
.maxval = 2,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = L_SUNIT,
.conflicts = { { &lopts, L_SU },
{ NULL, LAST_CONFLICT } },
.minval = 1,
.maxval = BTOBB(XLOG_MAX_RECORD_BSIZE),
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = L_SU,
.conflicts = { { &lopts, L_SUNIT },
{ NULL, LAST_CONFLICT } },
.convert = true,
.minval = BBTOB(1),
.maxval = XLOG_MAX_RECORD_BSIZE,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = L_DEV,
.conflicts = { { &lopts, L_AGNUM },
{ &lopts, L_NAME },
{ &lopts, L_INTERNAL },
{ NULL, LAST_CONFLICT } },
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = L_SECTSIZE,
.conflicts = { { &lopts, L_INTERNAL },
{ NULL, LAST_CONFLICT } },
.convert = true,
.is_power_2 = true,
.minval = XFS_MIN_SECTORSIZE,
.maxval = XFS_MAX_SECTORSIZE,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = L_FILE,
.conflicts = { { &lopts, L_INTERNAL },
{ NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
{ .index = L_NAME,
.conflicts = { { &lopts, L_AGNUM },
{ &lopts, L_DEV },
{ &lopts, L_INTERNAL },
{ NULL, LAST_CONFLICT } },
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = L_LAZYSBCNTR,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
},
};
struct opt_params nopts = {
.name = 'n',
.subopts = {
[N_SIZE] = "size",
[N_VERSION] = "version",
[N_FTYPE] = "ftype",
},
.subopt_params = {
{ .index = N_SIZE,
.conflicts = { { NULL, LAST_CONFLICT } },
.convert = true,
.is_power_2 = true,
.minval = 1 << XFS_MIN_REC_DIRSIZE,
.maxval = XFS_MAX_BLOCKSIZE,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = N_VERSION,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 2,
.maxval = 2,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = N_FTYPE,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
},
};
struct opt_params ropts = {
.name = 'r',
.subopts = {
[R_EXTSIZE] = "extsize",
[R_SIZE] = "size",
[R_DEV] = "rtdev",
[R_FILE] = "file",
[R_NAME] = "name",
[R_NOALIGN] = "noalign",
},
.subopt_params = {
{ .index = R_EXTSIZE,
.conflicts = { { NULL, LAST_CONFLICT } },
.convert = true,
.minval = XFS_MIN_RTEXTSIZE,
.maxval = XFS_MAX_RTEXTSIZE,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = R_SIZE,
.conflicts = { { NULL, LAST_CONFLICT } },
.convert = true,
.minval = 0,
.maxval = LLONG_MAX,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = R_DEV,
.conflicts = { { &ropts, R_NAME },
{ NULL, LAST_CONFLICT } },
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = R_FILE,
.minval = 0,
.maxval = 1,
.defaultval = 1,
.conflicts = { { NULL, LAST_CONFLICT } },
},
{ .index = R_NAME,
.conflicts = { { &ropts, R_DEV },
{ NULL, LAST_CONFLICT } },
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = R_NOALIGN,
.minval = 0,
.maxval = 1,
.defaultval = 1,
.conflicts = { { NULL, LAST_CONFLICT } },
},
},
};
struct opt_params sopts = {
.name = 's',
.subopts = {
[S_SIZE] = "size",
[S_SECTSIZE] = "sectsize",
},
.subopt_params = {
{ .index = S_SIZE,
.conflicts = { { &sopts, S_SECTSIZE },
{ &dopts, D_SECTSIZE },
{ NULL, LAST_CONFLICT } },
.convert = true,
.is_power_2 = true,
.minval = XFS_MIN_SECTORSIZE,
.maxval = XFS_MAX_SECTORSIZE,
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = S_SECTSIZE,
.conflicts = { { &sopts, S_SIZE },
{ &dopts, D_SECTSIZE },
{ NULL, LAST_CONFLICT } },
.convert = true,
.is_power_2 = true,
.minval = XFS_MIN_SECTORSIZE,
.maxval = XFS_MAX_SECTORSIZE,
.defaultval = SUBOPT_NEEDS_VAL,
},
},
};
struct opt_params mopts = {
.name = 'm',
.subopts = {
[M_CRC] = "crc",
[M_FINOBT] = "finobt",
[M_UUID] = "uuid",
[M_RMAPBT] = "rmapbt",
[M_REFLINK] = "reflink",
},
.subopt_params = {
{ .index = M_CRC,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
{ .index = M_FINOBT,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
{ .index = M_UUID,
.conflicts = { { NULL, LAST_CONFLICT } },
.defaultval = SUBOPT_NEEDS_VAL,
},
{ .index = M_RMAPBT,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
{ .index = M_REFLINK,
.conflicts = { { NULL, LAST_CONFLICT } },
.minval = 0,
.maxval = 1,
.defaultval = 1,
},
},
};
/* quick way of checking if a parameter was set on the CLI */
static bool
cli_opt_set(
struct opt_params *opts,
int subopt)
{
return opts->subopt_params[subopt].seen ||
opts->subopt_params[subopt].str_seen;
}
/*
* Options configured on the command line.
*
* This stores all the specific config parameters the user sets on the command
* line. We do not use these values directly - they are inputs to the mkfs
* geometry validation and override any default configuration value we have.
*
* We don't keep flags to indicate what parameters are set - if we need to check
* if an option was set on the command line, we check the relevant entry in the
* option table which records whether it was specified in the .seen and
* .str_seen variables in the table.
*
* Some parameters are stored as strings for post-parsing after their dependent
* options have been resolved (e.g. block size and sector size have been parsed
* and validated).
*
* This allows us to check that values have been set without needing separate
* flags for each value, and hence avoids needing to record and check for each
* specific option that can set the value later on in the code. In the cases
* where we don't have a cli_params structure around, the above cli_opt_set()
* function can be used.
*/
struct sb_feat_args {
int log_version;
int attr_version;
int dir_version;
bool inode_align; /* XFS_SB_VERSION_ALIGNBIT */
bool nci; /* XFS_SB_VERSION_BORGBIT */
bool lazy_sb_counters; /* XFS_SB_VERSION2_LAZYSBCOUNTBIT */
bool parent_pointers; /* XFS_SB_VERSION2_PARENTBIT */
bool projid32bit; /* XFS_SB_VERSION2_PROJID32BIT */
bool crcs_enabled; /* XFS_SB_VERSION2_CRCBIT */
bool dirftype; /* XFS_SB_VERSION2_FTYPE */
bool finobt; /* XFS_SB_FEAT_RO_COMPAT_FINOBT */
bool spinodes; /* XFS_SB_FEAT_INCOMPAT_SPINODES */
bool rmapbt; /* XFS_SB_FEAT_RO_COMPAT_RMAPBT */
bool reflink; /* XFS_SB_FEAT_RO_COMPAT_REFLINK */
bool nodalign;
bool nortalign;
};
struct cli_params {
int sectorsize;
int blocksize;
/* parameters that depend on sector/block size being validated. */
char *dsize;
char *agsize;
char *dsu;
char *dirblocksize;
char *logsize;
char *lsu;
char *rtextsize;
char *rtsize;
/* parameters where 0 is a valid CLI value */
int dsunit;
int dswidth;
int dsw;
int64_t logagno;
int loginternal;
int lsunit;
/* parameters where 0 is not a valid value */
int64_t agcount;
int inodesize;
int inopblock;
int imaxpct;
int lsectorsize;
uuid_t uuid;
/* feature flags that are set */
struct sb_feat_args sb_feat;
/* root inode characteristics */
struct fsxattr fsx;
/* libxfs device setup */
struct libxfs_xinit *xi;
};
/*
* Calculated filesystem feature and geometry information.
*
* This structure contains the information we will use to create the on-disk
* filesystem from. The validation and calculation code uses it to store all the
* temporary and final config state for the filesystem.
*
* The information in this structure will contain a mix of validated CLI input
* variables, default feature state and calculated values that are needed to
* construct the superblock and other on disk features. These are all in one
* place so that we don't have to pass handfuls of seemingly arbitrary variables
* around to different functions to do the work we need to do.
*/
struct mkfs_params {
int blocksize;
int blocklog;
int sectorsize;
int sectorlog;
int lsectorsize;
int lsectorlog;
int dirblocksize;
int dirblocklog;
int inodesize;
int inodelog;
int inopblock;
uint64_t dblocks;
uint64_t logblocks;
uint64_t rtblocks;
uint64_t rtextblocks;
uint64_t rtextents;
uint64_t rtbmblocks; /* rt bitmap blocks */
int dsunit; /* in FSBs */
int dswidth; /* in FSBs */
int lsunit; /* in FSBs */
uint64_t agsize;
uint64_t agcount;
int imaxpct;
bool loginternal;
uint64_t logstart;
uint64_t logagno;
uuid_t uuid;
char *label;
struct sb_feat_args sb_feat;
};
/*
* Default filesystem features and configuration values
*
* This structure contains the default mkfs values that are to be used when
* a user does not specify the option on the command line. We do not use these
* values directly - they are inputs to the mkfs geometry validation and
* calculations.
*/
struct mkfs_default_params {
char *source; /* where the defaults came from */
int sectorsize;
int blocksize;
/* feature flags that are set */
struct sb_feat_args sb_feat;
/* root inode characteristics */
struct fsxattr fsx;
};
static void __attribute__((noreturn))
usage( void )
{
fprintf(stderr, _("Usage: %s\n\
/* blocksize */ [-b size=num]\n\
/* metadata */ [-m crc=0|1,finobt=0|1,uuid=xxx,rmapbt=0|1,reflink=0|1]\n\
/* data subvol */ [-d agcount=n,agsize=n,file,name=xxx,size=num,\n\
(sunit=value,swidth=value|su=num,sw=num|noalign),\n\
sectsize=num\n\
/* force overwrite */ [-f]\n\
/* inode size */ [-i log=n|perblock=n|size=num,maxpct=n,attr=0|1|2,\n\
projid32bit=0|1,sparse=0|1]\n\
/* no discard */ [-K]\n\
/* log subvol */ [-l agnum=n,internal,size=num,logdev=xxx,version=n\n\
sunit=value|su=num,sectsize=num,lazy-count=0|1]\n\
/* label */ [-L label (maximum 12 characters)]\n\
/* naming */ [-n size=num,version=2|ci,ftype=0|1]\n\
/* no-op info only */ [-N]\n\
/* prototype file */ [-p fname]\n\
/* quiet */ [-q]\n\
/* realtime subvol */ [-r extsize=num,size=num,rtdev=xxx]\n\
/* sectorsize */ [-s size=num]\n\
/* version */ [-V]\n\
devicename\n\
<devicename> is required unless -d name=xxx is given.\n\
<num> is xxx (bytes), xxxs (sectors), xxxb (fs blocks), xxxk (xxx KiB),\n\
xxxm (xxx MiB), xxxg (xxx GiB), xxxt (xxx TiB) or xxxp (xxx PiB).\n\
<value> is xxx (512 byte blocks).\n"),
progname);
exit(1);
}
static void
conflict(
struct opt_params *opts,
int option,
struct opt_params *con_opts,
int conflict)
{
fprintf(stderr, _("Cannot specify both -%c %s and -%c %s\n"),
con_opts->name, con_opts->subopts[conflict],
opts->name, opts->subopts[option]);
usage();
}
static void
illegal(
const char *value,
const char *opt)
{
fprintf(stderr, _("Invalid value %s for -%s option\n"), value, opt);
usage();
}
static int
ispow2(
unsigned int i)
{
return (i & (i - 1)) == 0;
}
static void __attribute__((noreturn))
reqval(
char opt,
const char *tab[],
int idx)
{
fprintf(stderr, _("-%c %s option requires a value\n"), opt, tab[idx]);
usage();
}
static void
respec(
char opt,
const char *tab[],
int idx)
{
fprintf(stderr, "-%c ", opt);
if (tab)
fprintf(stderr, "%s ", tab[idx]);
fprintf(stderr, _("option respecified\n"));
usage();
}
static void
unknown(
char opt,
char *s)
{
fprintf(stderr, _("unknown option -%c %s\n"), opt, s);
usage();
}
long long
cvtnum(
unsigned int blksize,
unsigned int sectsize,
const char *s)
{
long long i;
char *sp;
int c;
i = strtoll(s, &sp, 0);
if (i == 0 && sp == s)
return -1LL;
if (*sp == '\0')
return i;
if (sp[1] != '\0')
return -1LL;
if (*sp == 'b') {
if (!blksize) {
fprintf(stderr,
_("Blocksize must be provided prior to using 'b' suffix.\n"));
usage();
} else {
return i * blksize;
}
}
if (*sp == 's') {
if (!sectsize) {
fprintf(stderr,
_("Sectorsize must be specified prior to using 's' suffix.\n"));
usage();
} else {
return i * sectsize;
}
}
c = tolower(*sp);
switch (c) {
case 'e':
i *= 1024LL;
/* fall through */
case 'p':
i *= 1024LL;
/* fall through */
case 't':
i *= 1024LL;
/* fall through */
case 'g':
i *= 1024LL;
/* fall through */
case 'm':
i *= 1024LL;
/* fall through */
case 'k':
return i * 1024LL;
default:
break;
}
return -1LL;
}
static void
check_device_type(
const char *name,
int *isfile,
bool no_size,
bool no_name,
int *create,
bool force_overwrite,
const char *optname)
{
struct stat statbuf;
if (*isfile && (no_size || no_name)) {
fprintf(stderr,
_("if -%s file then -%s name and -%s size are required\n"),
optname, optname, optname);
usage();
}
if (!name) {
fprintf(stderr, _("No device name specified\n"));
usage();
}
if (stat(name, &statbuf)) {
if (errno == ENOENT && *isfile) {
if (create)
*create = 1;
return;
}
fprintf(stderr,
_("Error accessing specified device %s: %s\n"),
name, strerror(errno));
usage();
return;
}
if (!force_overwrite && check_overwrite(name)) {
fprintf(stderr,
_("%s: Use the -f option to force overwrite.\n"),
progname);
exit(1);
}
/*
* We only want to completely truncate and recreate an existing file if
* we were specifically told it was a file. Set the create flag only in
* this case to trigger that behaviour.
*/
if (S_ISREG(statbuf.st_mode)) {
if (!*isfile)
*isfile = 1;
else if (create)
*create = 1;
return;
}
if (S_ISBLK(statbuf.st_mode)) {
if (*isfile) {
fprintf(stderr,
_("specified \"-%s file\" on a block device %s\n"),
optname, name);
usage();
}
return;
}
fprintf(stderr,
_("specified device %s not a file or block device\n"),
name);
usage();
}
static void
validate_ag_geometry(
int blocklog,
uint64_t dblocks,
uint64_t agsize,
uint64_t agcount)
{
if (agsize < XFS_AG_MIN_BLOCKS(blocklog)) {
fprintf(stderr,
_("agsize (%lld blocks) too small, need at least %lld blocks\n"),
(long long)agsize,
(long long)XFS_AG_MIN_BLOCKS(blocklog));
usage();
}
if (agsize > XFS_AG_MAX_BLOCKS(blocklog)) {
fprintf(stderr,
_("agsize (%lld blocks) too big, maximum is %lld blocks\n"),
(long long)agsize,
(long long)XFS_AG_MAX_BLOCKS(blocklog));
usage();
}
if (agsize > dblocks) {
fprintf(stderr,
_("agsize (%lld blocks) too big, data area is %lld blocks\n"),
(long long)agsize, (long long)dblocks);
usage();
}
if (agsize < XFS_AG_MIN_BLOCKS(blocklog)) {
fprintf(stderr,
_("too many allocation groups for size = %lld\n"),
(long long)agsize);
fprintf(stderr, _("need at most %lld allocation groups\n"),
(long long)(dblocks / XFS_AG_MIN_BLOCKS(blocklog) +
(dblocks % XFS_AG_MIN_BLOCKS(blocklog) != 0)));
usage();
}
if (agsize > XFS_AG_MAX_BLOCKS(blocklog)) {
fprintf(stderr,
_("too few allocation groups for size = %lld\n"), (long long)agsize);
fprintf(stderr,
_("need at least %lld allocation groups\n"),
(long long)(dblocks / XFS_AG_MAX_BLOCKS(blocklog) +
(dblocks % XFS_AG_MAX_BLOCKS(blocklog) != 0)));
usage();
}
/*
* If the last AG is too small, reduce the filesystem size
* and drop the blocks.
*/
if ( dblocks % agsize != 0 &&
(dblocks % agsize < XFS_AG_MIN_BLOCKS(blocklog))) {
fprintf(stderr,
_("last AG size %lld blocks too small, minimum size is %lld blocks\n"),
(long long)(dblocks % agsize),
(long long)XFS_AG_MIN_BLOCKS(blocklog));
usage();
}
/*
* If agcount is too large, make it smaller.
*/
if (agcount > XFS_MAX_AGNUMBER + 1) {
fprintf(stderr,
_("%lld allocation groups is too many, maximum is %lld\n"),
(long long)agcount, (long long)XFS_MAX_AGNUMBER + 1);
usage();
}
}
static void
zero_old_xfs_structures(
libxfs_init_t *xi,
xfs_sb_t *new_sb)
{
void *buf;
xfs_sb_t sb;
uint32_t bsize;
int i;
xfs_off_t off;
/*
* We open regular files with O_TRUNC|O_CREAT. Nothing to do here...
*/
if (xi->disfile && xi->dcreat)
return;
/*
* read in existing filesystem superblock, use its geometry
* settings and zero the existing secondary superblocks.
*/
buf = memalign(libxfs_device_alignment(), new_sb->sb_sectsize);
if (!buf) {
fprintf(stderr,
_("error reading existing superblock -- failed to memalign buffer\n"));
return;
}
memset(buf, 0, new_sb->sb_sectsize);
/*
* If we are creating an image file, it might be of zero length at this
* point in time. Hence reading the existing superblock is going to
* return zero bytes. It's not a failure we need to warn about in this
* case.
*/
off = pread(xi->dfd, buf, new_sb->sb_sectsize, 0);
if (off != new_sb->sb_sectsize) {
if (!xi->disfile)
fprintf(stderr,
_("error reading existing superblock: %s\n"),
strerror(errno));
goto done;
}
libxfs_sb_from_disk(&sb, buf);
/*
* perform same basic superblock validation to make sure we
* actually zero secondary blocks
*/
if (sb.sb_magicnum != XFS_SB_MAGIC || sb.sb_blocksize == 0)
goto done;
for (bsize = 1, i = 0; bsize < sb.sb_blocksize &&
i < sizeof(sb.sb_blocksize) * NBBY; i++)
bsize <<= 1;
if (i < XFS_MIN_BLOCKSIZE_LOG || i > XFS_MAX_BLOCKSIZE_LOG ||
i != sb.sb_blocklog)
goto done;
if (sb.sb_dblocks > ((uint64_t)sb.sb_agcount * sb.sb_agblocks) ||
sb.sb_dblocks < ((uint64_t)(sb.sb_agcount - 1) *
sb.sb_agblocks + XFS_MIN_AG_BLOCKS))
goto done;
/*
* block size and basic geometry seems alright, zero the secondaries.
*/
memset(buf, 0, new_sb->sb_sectsize);
off = 0;
for (i = 1; i < sb.sb_agcount; i++) {
off += sb.sb_agblocks;
if (pwrite(xi->dfd, buf, new_sb->sb_sectsize,
off << sb.sb_blocklog) == -1)
break;
}
done:
free(buf);
}
static void
discard_blocks(dev_t dev, uint64_t nsectors)
{
int fd;
/*
* We intentionally ignore errors from the discard ioctl. It is
* not necessary for the mkfs functionality but just an optimization.
*/
fd = libxfs_device_to_fd(dev);
if (fd > 0)
platform_discard_blocks(fd, 0, nsectors << 9);
}
static __attribute__((noreturn)) void
illegal_option(
const char *value,
struct opt_params *opts,
int index,
const char *reason)
{
fprintf(stderr,
_("Invalid value %s for -%c %s option. %s\n"),
value, opts->name, opts->subopts[index],
reason);
usage();
}
/*
* Check for conflicts and option respecification.
*/
static void
check_opt(
struct opt_params *opts,
int index,
bool str_seen)
{
struct subopt_param *sp = &opts->subopt_params[index];
int i;
if (sp->index != index) {
fprintf(stderr,
_("Developer screwed up option parsing (%d/%d)! Please report!\n"),
sp->index, index);
reqval(opts->name, opts->subopts, index);
}
/*
* Check for respecification of the option. This is more complex than it
* seems because some options are parsed twice - once as a string during
* input parsing, then later the string is passed to getnum for
* conversion into a number and bounds checking. Hence the two variables
* used to track the different uses based on the @str parameter passed
* to us.
*/
if (!str_seen) {
if (sp->seen)
respec(opts->name, opts->subopts, index);
sp->seen = true;
} else {
if (sp->str_seen)
respec(opts->name, opts->subopts, index);
sp->str_seen = true;
}
/* check for conflicts with the option */
for (i = 0; i < MAX_CONFLICTS; i++) {
struct _conflict *con = &sp->conflicts[i];
if (con->subopt == LAST_CONFLICT)
break;
if (con->opts->subopt_params[con->subopt].seen ||
con->opts->subopt_params[con->subopt].str_seen)
conflict(opts, index, con->opts, con->subopt);
}
}
static long long
getnum(
const char *str,
struct opt_params *opts,
int index)
{
struct subopt_param *sp = &opts->subopt_params[index];
long long c;
check_opt(opts, index, false);
/* empty strings might just return a default value */
if (!str || *str == '\0') {
if (sp->defaultval == SUBOPT_NEEDS_VAL)
reqval(opts->name, opts->subopts, index);
return sp->defaultval;
}
if (sp->minval == 0 && sp->maxval == 0) {
fprintf(stderr,
_("Option -%c %s has undefined minval/maxval."
"Can't verify value range. This is a bug.\n"),
opts->name, opts->subopts[index]);
exit(1);
}
/*
* Some values are pure numbers, others can have suffixes that define
* the units of the number. Those get passed to cvtnum(), otherwise we
* convert it ourselves to guarantee there is no trailing garbage in the
* number.
*/
if (sp->convert)
c = cvtnum(blocksize, sectorsize, str);
else {
char *str_end;
c = strtoll(str, &str_end, 0);
if (c == 0 && str_end == str)
illegal_option(str, opts, index,
_("Value not recognized as number."));
if (*str_end != '\0')
illegal_option(str, opts, index,
_("Unit suffixes are not allowed."));
}
/* Validity check the result. */
if (c < sp->minval)
illegal_option(str, opts, index, _("Value is too small."));
else if (c > sp->maxval)
illegal_option(str, opts, index, _("Value is too large."));
if (sp->is_power_2 && !ispow2(c))
illegal_option(str, opts, index, _("Value must be a power of 2."));
return c;
}
/*
* Option is a string - do all the option table work, and check there
* is actually an option string. Otherwise we don't do anything with the string
* here - validation will be done later when the string is converted to a value
* or used as a file/device path.
*/
static char *
getstr(
char *str,
struct opt_params *opts,
int index)
{
check_opt(opts, index, true);
/* empty strings for string options are not valid */
if (!str || *str == '\0')
reqval(opts->name, opts->subopts, index);
return str;
}
static int
block_opts_parser(
struct opt_params *opts,
int subopt,
char *value,
struct cli_params *cli)
{
switch (subopt) {
case B_SIZE:
cli->blocksize = getnum(value, opts, subopt);
break;
default:
return -EINVAL;
}
return 0;
}
static int
data_opts_parser(
struct opt_params *opts,
int subopt,
char *value,
struct cli_params *cli)
{
switch (subopt) {
case D_AGCOUNT:
cli->agcount = getnum(value, opts, subopt);
break;
case D_AGSIZE:
cli->agsize = getstr(value, opts, subopt);
break;
case D_FILE:
cli->xi->disfile = getnum(value, opts, subopt);
break;
case D_NAME:
cli->xi->dname = getstr(value, opts, subopt);
break;
case D_SIZE:
cli->dsize = getstr(value, opts, subopt);
break;
case D_SUNIT:
cli->dsunit = getnum(value, opts, subopt);
break;
case D_SWIDTH:
cli->dswidth = getnum(value, opts, subopt);
break;
case D_SU:
cli->dsu = getstr(value, opts, subopt);
break;
case D_SW:
cli->dsw = getnum(value, opts, subopt);
break;
case D_NOALIGN:
cli->sb_feat.nodalign = getnum(value, opts, subopt);
break;
case D_SECTSIZE:
cli->sectorsize = getnum(value, opts, subopt);
break;
case D_RTINHERIT:
if (getnum(value, opts, subopt))
cli->fsx.fsx_xflags |= XFS_DIFLAG_RTINHERIT;
break;
case D_PROJINHERIT:
cli->fsx.fsx_projid = getnum(value, opts, subopt);
cli->fsx.fsx_xflags |= XFS_DIFLAG_PROJINHERIT;
break;
case D_EXTSZINHERIT:
cli->fsx.fsx_extsize = getnum(value, opts, subopt);
cli->fsx.fsx_xflags |= XFS_DIFLAG_EXTSZINHERIT;
break;
case D_COWEXTSIZE:
cli->fsx.fsx_cowextsize = getnum(value, opts, subopt);
cli->fsx.fsx_xflags |= FS_XFLAG_COWEXTSIZE;
break;
default:
return -EINVAL;
}
return 0;
}
static int
inode_opts_parser(
struct opt_params *opts,
int subopt,
char *value,
struct cli_params *cli)
{
switch (subopt) {
case I_ALIGN:
cli->sb_feat.inode_align = getnum(value, opts, subopt);
break;
case I_MAXPCT:
cli->imaxpct = getnum(value, opts, subopt);
break;
case I_PERBLOCK:
cli->inopblock = getnum(value, opts, subopt);
break;
case I_SIZE:
cli->inodesize = getnum(value, opts, subopt);
break;
case I_ATTR:
cli->sb_feat.attr_version = getnum(value, opts, subopt);
break;
case I_PROJID32BIT:
cli->sb_feat.projid32bit = getnum(value, opts, subopt);
break;
case I_SPINODES:
cli->sb_feat.spinodes = getnum(value, opts, subopt);
break;
default:
return -EINVAL;
}
return 0;
}
static int
log_opts_parser(
struct opt_params *opts,
int subopt,
char *value,
struct cli_params *cli)
{
switch (subopt) {
case L_AGNUM:
cli->logagno = getnum(value, opts, subopt);
break;
case L_FILE:
cli->xi->lisfile = getnum(value, opts, subopt);
break;
case L_INTERNAL:
cli->loginternal = getnum(value, opts, subopt);
break;
case L_SU:
cli->lsu = getstr(value, opts, subopt);
break;
case L_SUNIT:
cli->lsunit = getnum(value, opts, subopt);
break;
case L_NAME:
case L_DEV:
cli->xi->logname = getstr(value, opts, subopt);
cli->loginternal = 0;
break;
case L_VERSION:
cli->sb_feat.log_version = getnum(value, opts, subopt);
break;
case L_SIZE:
cli->logsize = getstr(value, opts, subopt);
break;
case L_SECTSIZE:
cli->lsectorsize = getnum(value, opts, subopt);
break;
case L_LAZYSBCNTR:
cli->sb_feat.lazy_sb_counters = getnum(value, opts, subopt);
break;
default:
return -EINVAL;
}
return 0;
}
static int
meta_opts_parser(
struct opt_params *opts,
int subopt,
char *value,
struct cli_params *cli)
{
switch (subopt) {
case M_CRC:
cli->sb_feat.crcs_enabled = getnum(value, opts, subopt);
if (cli->sb_feat.crcs_enabled)
cli->sb_feat.dirftype = true;
break;
case M_FINOBT:
cli->sb_feat.finobt = getnum(value, opts, subopt);
break;
case M_UUID:
if (!value || *value == '\0')
reqval('m', opts->subopts, subopt);
if (platform_uuid_parse(value, &cli->uuid))
illegal(value, "m uuid");
break;
case M_RMAPBT:
cli->sb_feat.rmapbt = getnum(value, opts, subopt);
break;
case M_REFLINK:
cli->sb_feat.reflink = getnum(value, opts, subopt);
break;
default:
return -EINVAL;
}
return 0;
}
static int
naming_opts_parser(
struct opt_params *opts,
int subopt,
char *value,
struct cli_params *cli)
{
switch (subopt) {
case N_SIZE:
cli->dirblocksize = getstr(value, opts, subopt);
break;
case N_VERSION:
value = getstr(value, &nopts, subopt);
if (!strcasecmp(value, "ci")) {
/* ASCII CI mode */
cli->sb_feat.nci = true;
} else {
cli->sb_feat.dir_version = getnum(value, opts, subopt);
}
break;
case N_FTYPE:
cli->sb_feat.dirftype = getnum(value, opts, subopt);
break;
default:
return -EINVAL;
}
return 0;
}
static int
rtdev_opts_parser(
struct opt_params *opts,
int subopt,
char *value,
struct cli_params *cli)
{
switch (subopt) {
case R_EXTSIZE:
cli->rtextsize = getstr(value, opts, subopt);
break;
case R_FILE:
cli->xi->risfile = getnum(value, opts, subopt);
break;
case R_NAME:
case R_DEV:
cli->xi->rtname = getstr(value, opts, subopt);
break;
case R_SIZE:
cli->rtsize = getstr(value, opts, subopt);
break;
case R_NOALIGN:
cli->sb_feat.nortalign = getnum(value, opts, subopt);
break;
default:
return -EINVAL;
}
return 0;
}
static int
sector_opts_parser(
struct opt_params *opts,
int subopt,
char *value,
struct cli_params *cli)
{
switch (subopt) {
case S_SIZE:
case S_SECTSIZE:
cli->sectorsize = getnum(value, opts, subopt);
cli->lsectorsize = cli->sectorsize;
break;
default:
return -EINVAL;
}
return 0;
}
struct subopts {
char opt;
struct opt_params *opts;
int (*parser)();
} subopt_tab[] = {
{ 'b', &bopts, block_opts_parser },
{ 'd', &dopts, data_opts_parser },
{ 'i', &iopts, inode_opts_parser },
{ 'l', &lopts, log_opts_parser },
{ 'm', &mopts, meta_opts_parser },
{ 'n', &nopts, naming_opts_parser },
{ 'r', &ropts, rtdev_opts_parser },
{ 's', &sopts, sector_opts_parser },
{ '\0', NULL, NULL },
};
static void
parse_subopts(
char opt,
char *arg,
struct cli_params *cli)
{
struct subopts *sop = &subopt_tab[0];
char *p;
int ret = 0;
while (sop->opts) {
if (sop->opt == opt)
break;
sop++;
}
/* should never happen */
if (!sop->opts)
return;
p = arg;
while (*p != '\0') {
char **subopts = (char **)sop->opts->subopts;
char *value;
int subopt;
subopt = getsubopt(&p, subopts, &value);
ret = (sop->parser)(sop->opts, subopt, value, cli);
if (ret)
unknown(opt, value);
}
}
static void
validate_sectorsize(
struct mkfs_params *cfg,
struct cli_params *cli,
struct mkfs_default_params *dft,
struct fs_topology *ft,
char *dfile,
int dry_run,
int force_overwrite)
{
/* set configured sector sizes in preparation for checks */
if (!cli->sectorsize) {
cfg->sectorsize = dft->sectorsize;
} else {
cfg->sectorsize = cli->sectorsize;
}
cfg->sectorlog = libxfs_highbit32(cfg->sectorsize);
/*
* Before anything else, verify that we are correctly operating on
* files or block devices and set the control parameters correctly.
*/
check_device_type(dfile, &cli->xi->disfile, !cli->dsize, !dfile,
dry_run ? NULL : &cli->xi->dcreat,
force_overwrite, "d");
if (!cli->loginternal)
check_device_type(cli->xi->logname, &cli->xi->lisfile,
!cli->logsize, !cli->xi->logname,
dry_run ? NULL : &cli->xi->lcreat,
force_overwrite, "l");
if (cli->xi->rtname)
check_device_type(cli->xi->rtname, &cli->xi->risfile,
!cli->rtsize, !cli->xi->rtname,
dry_run ? NULL : &cli->xi->rcreat,
force_overwrite, "r");
/*
* Explicitly disable direct IO for image files so we don't error out on
* sector size mismatches between the new filesystem and the underlying
* host filesystem.
*/
if (cli->xi->disfile || cli->xi->lisfile || cli->xi->risfile)
cli->xi->isdirect = 0;
memset(ft, 0, sizeof(*ft));
get_topology(cli->xi, ft, force_overwrite);
if (!cli->sectorsize) {
/*
* Unless specified manually on the command line use the
* advertised sector size of the device. We use the physical
* sector size unless the requested block size is smaller
* than that, then we can use logical, but warn about the
* inefficiency.
*
* Set the topology sectors if they were not probed to the
* minimum supported sector size.
*/
if (!ft->lsectorsize)
ft->lsectorsize = XFS_MIN_SECTORSIZE;
/* Older kernels may not have physical/logical distinction */
if (!ft->psectorsize)
ft->psectorsize = ft->lsectorsize;
cfg->sectorsize = ft->psectorsize;
if (cfg->blocksize < cfg->sectorsize &&
cfg->blocksize >= ft->lsectorsize) {
fprintf(stderr,
_("specified blocksize %d is less than device physical sector size %d\n"
"switching to logical sector size %d\n"),
cfg->blocksize, ft->psectorsize,
ft->lsectorsize);
cfg->sectorsize = ft->lsectorsize;
}
cfg->sectorlog = libxfs_highbit32(cfg->sectorsize);
}
/* validate specified/probed sector size */
if (cfg->sectorsize < XFS_MIN_SECTORSIZE ||
cfg->sectorsize > XFS_MAX_SECTORSIZE) {
fprintf(stderr, _("illegal sector size %d\n"), cfg->sectorsize);
usage();
}
if (cfg->blocksize < cfg->sectorsize) {
fprintf(stderr,
_("block size %d cannot be smaller than sector size %d\n"),
cfg->blocksize, cfg->sectorsize);
usage();
}
if (cfg->sectorsize < ft->lsectorsize) {
fprintf(stderr, _("illegal sector size %d; hw sector is %d\n"),
cfg->sectorsize, ft->lsectorsize);
usage();
}
}
static void
validate_blocksize(
struct mkfs_params *cfg,
struct cli_params *cli,
struct mkfs_default_params *dft)
{
/*
* Blocksize and sectorsize first, other things depend on them
* For RAID4/5/6 we want to align sector size and block size,
* so we need to start with the device geometry extraction too.
*/
if (!cli->blocksize)
cfg->blocksize = dft->blocksize;
else
cfg->blocksize = cli->blocksize;
cfg->blocklog = libxfs_highbit32(cfg->blocksize);
/* validate block sizes are in range */
if (cfg->blocksize < XFS_MIN_BLOCKSIZE ||
cfg->blocksize > XFS_MAX_BLOCKSIZE) {
fprintf(stderr, _("illegal block size %d\n"), cfg->blocksize);
usage();
}
if (cli->sb_feat.crcs_enabled &&
cfg->blocksize < XFS_MIN_CRC_BLOCKSIZE) {
fprintf(stderr,
_("Minimum block size for CRC enabled filesystems is %d bytes.\n"),
XFS_MIN_CRC_BLOCKSIZE);
usage();
}
}
/*
* Grab log sector size and validate.
*
* XXX: should we probe sector size on external log device rather than using
* the data device sector size?
*/
static void
validate_log_sectorsize(
struct mkfs_params *cfg,
struct cli_params *cli,
struct mkfs_default_params *dft)
{
if (cli->loginternal && cli->lsectorsize &&
cli->lsectorsize != cfg->sectorsize) {
fprintf(stderr,
_("Can't change sector size on internal log!\n"));
usage();
}
if (cli->lsectorsize)
cfg->lsectorsize = cli->lsectorsize;
else if (cli->loginternal)
cfg->lsectorsize = cfg->sectorsize;
else
cfg->lsectorsize = dft->sectorsize;
cfg->lsectorlog = libxfs_highbit32(cfg->lsectorsize);
if (cfg->lsectorsize < XFS_MIN_SECTORSIZE ||
cfg->lsectorsize > XFS_MAX_SECTORSIZE ||
cfg->lsectorsize > cfg->blocksize) {
fprintf(stderr, _("illegal log sector size %d\n"),
cfg->lsectorsize);
usage();
}
if (cfg->lsectorsize > XFS_MIN_SECTORSIZE) {
if (cli->sb_feat.log_version < 2) {
/* user specified non-default log version */
fprintf(stderr,
_("Version 1 logs do not support sector size %d\n"),
cfg->lsectorsize);
usage();
}
}
/* if lsu or lsunit was specified, automatically use v2 logs */
if ((cli_opt_set(&lopts, L_SU) || cli_opt_set(&lopts, L_SUNIT)) &&
cli->sb_feat.log_version == 1) {
fprintf(stderr,
_("log stripe unit specified, using v2 logs\n"));
cli->sb_feat.log_version = 2;
}
}
/*
* Check that the incoming features make sense. The CLI structure was
* initialised with the default values before parsing, so we can just
* check it and copy it straight across to the cfg structure if it
* checks out.
*/
static void
validate_sb_features(
struct mkfs_params *cfg,
struct cli_params *cli)
{
/*
* Now we have blocks and sector sizes set up, check parameters that are
* no longer optional for CRC enabled filesystems. Catch them up front
* here before doing anything else.
*/
if (cli->sb_feat.crcs_enabled) {
/* minimum inode size is 512 bytes, rest checked later */
if (cli->inodesize &&
cli->inodesize < (1 << XFS_DINODE_DFL_CRC_LOG)) {
fprintf(stderr,
_("Minimum inode size for CRCs is %d bytes\n"),
1 << XFS_DINODE_DFL_CRC_LOG);
usage();
}
/* inodes always aligned */
if (!cli->sb_feat.inode_align) {
fprintf(stderr,
_("Inodes always aligned for CRC enabled filesystems\n"));
usage();
}
/* lazy sb counters always on */
if (!cli->sb_feat.lazy_sb_counters) {
fprintf(stderr,
_("Lazy superblock counters always enabled for CRC enabled filesystems\n"));
usage();
}
/* version 2 logs always on */
if (cli->sb_feat.log_version != 2) {
fprintf(stderr,
_("V2 logs always enabled for CRC enabled filesystems\n"));
usage();
}
/* attr2 always on */
if (cli->sb_feat.attr_version != 2) {
fprintf(stderr,
_("V2 attribute format always enabled on CRC enabled filesystems\n"));
usage();
}
/* 32 bit project quota always on */
/* attr2 always on */
if (!cli->sb_feat.projid32bit) {
fprintf(stderr,
_("32 bit Project IDs always enabled on CRC enabled filesystems\n"));
usage();
}
/* ftype always on */
if (!cli->sb_feat.dirftype) {
fprintf(stderr,
_("Directory ftype field always enabled on CRC enabled filesystems\n"));
usage();
}
} else {
/*
* The kernel doesn't currently support crc=0,finobt=1
* filesystems. If crcs are not enabled and the user has not
* explicitly turned finobt on, then silently turn it off to
* avoid an unnecessary warning.
* If the user explicitly tried to use crc=0,finobt=1,
* then issue an error.
* The same is also for sparse inodes.
*/
if (cli->sb_feat.finobt && cli_opt_set(&mopts, M_FINOBT)) {
fprintf(stderr,
_("finobt not supported without CRC support\n"));
usage();
}
cli->sb_feat.finobt = false;
if (cli->sb_feat.spinodes) {
fprintf(stderr,
_("sparse inodes not supported without CRC support\n"));
usage();
}
cli->sb_feat.spinodes = false;
if (cli->sb_feat.rmapbt) {
fprintf(stderr,
_("rmapbt not supported without CRC support\n"));
usage();
}
cli->sb_feat.rmapbt = false;
if (cli->sb_feat.reflink) {
fprintf(stderr,
_("reflink not supported without CRC support\n"));
usage();
}
cli->sb_feat.reflink = false;
}
if ((cli->fsx.fsx_xflags & FS_XFLAG_COWEXTSIZE) &&
!cli->sb_feat.reflink) {
fprintf(stderr,
_("cowextsize not supported without reflink support\n"));
usage();
}
if (cli->sb_feat.reflink && cli->xi->rtname) {
fprintf(stderr,
_("reflink not supported with realtime devices\n"));
usage();
cli->sb_feat.reflink = false;
}
if (cli->sb_feat.rmapbt && cli->xi->rtname) {
fprintf(stderr,
_("rmapbt not supported with realtime devices\n"));
usage();
cli->sb_feat.rmapbt = false;
}
/*
* Copy features across to config structure now.
*/
cfg->sb_feat = cli->sb_feat;
if (!platform_uuid_is_null(&cli->uuid))
platform_uuid_copy(&cfg->uuid, &cli->uuid);
}
static void
validate_dirblocksize(
struct mkfs_params *cfg,
struct cli_params *cli)
{
if (cli->dirblocksize)
cfg->dirblocksize = getnum(cli->dirblocksize, &nopts, N_SIZE);
if (cfg->dirblocksize) {
if (cfg->dirblocksize < cfg->blocksize ||
cfg->dirblocksize > XFS_MAX_BLOCKSIZE) {
fprintf(stderr, _("illegal directory block size %d\n"),
cfg->dirblocksize);
usage();
}
cfg->dirblocklog = libxfs_highbit32(cfg->dirblocksize);
return;
}
/* use default size based on current block size */
if (cfg->blocksize < (1 << XFS_MIN_REC_DIRSIZE))
cfg->dirblocklog = XFS_MIN_REC_DIRSIZE;
else
cfg->dirblocklog = cfg->blocklog;
cfg->dirblocksize = 1 << cfg->dirblocklog;
}
static void
validate_inodesize(
struct mkfs_params *cfg,
struct cli_params *cli)
{
if (cli->inopblock)
cfg->inodelog = cfg->blocklog - libxfs_highbit32(cli->inopblock);
else if (cli->inodesize)
cfg->inodelog = libxfs_highbit32(cli->inodesize);
else if (cfg->sb_feat.crcs_enabled)
cfg->inodelog = XFS_DINODE_DFL_CRC_LOG;
else
cfg->inodelog = XFS_DINODE_DFL_LOG;
cfg->inodesize = 1 << cfg->inodelog;
cfg->inopblock = cfg->blocksize / cfg->inodesize;
/* input parsing has already validated non-crc inode size range */
if (cfg->sb_feat.crcs_enabled &&
cfg->inodelog < XFS_DINODE_DFL_CRC_LOG) {
fprintf(stderr,
_("Minimum inode size for CRCs is %d bytes\n"),
1 << XFS_DINODE_DFL_CRC_LOG);
usage();
}
if (cfg->inodesize > cfg->blocksize / XFS_MIN_INODE_PERBLOCK ||
cfg->inopblock < XFS_MIN_INODE_PERBLOCK ||
cfg->inodesize < XFS_DINODE_MIN_SIZE ||
cfg->inodesize > XFS_DINODE_MAX_SIZE) {
int maxsz;
fprintf(stderr, _("illegal inode size %d\n"), cfg->inodesize);
maxsz = MIN(cfg->blocksize / XFS_MIN_INODE_PERBLOCK,
XFS_DINODE_MAX_SIZE);
if (XFS_DINODE_MIN_SIZE == maxsz)
fprintf(stderr,
_("allowable inode size with %d byte blocks is %d\n"),
cfg->blocksize, XFS_DINODE_MIN_SIZE);
else
fprintf(stderr,
_("allowable inode size with %d byte blocks is between %d and %d\n"),
cfg->blocksize, XFS_DINODE_MIN_SIZE, maxsz);
exit(1);
}
}
static xfs_rfsblock_t
calc_dev_size(
char *size,
struct mkfs_params *cfg,
struct opt_params *opts,
int sizeopt,
char *type)
{
uint64_t dbytes;
xfs_rfsblock_t dblocks;
if (!size)
return 0;
dbytes = getnum(size, opts, sizeopt);
if (dbytes % XFS_MIN_BLOCKSIZE) {
fprintf(stderr,
_("illegal %s length %lld, not a multiple of %d\n"),
type, (long long)dbytes, XFS_MIN_BLOCKSIZE);
usage();
}
dblocks = (xfs_rfsblock_t)(dbytes >> cfg->blocklog);
if (dbytes % cfg->blocksize) {
fprintf(stderr,
_("warning: %s length %lld not a multiple of %d, truncated to %lld\n"),
type, (long long)dbytes, cfg->blocksize,
(long long)(dblocks << cfg->blocklog));
}
return dblocks;
}
static void
validate_rtextsize(
struct mkfs_params *cfg,
struct cli_params *cli,
struct fs_topology *ft)
{
uint64_t rtextbytes;
/*
* If specified, check rt extent size against its constraints.
*/
if (cli->rtextsize) {
rtextbytes = getnum(cli->rtextsize, &ropts, R_EXTSIZE);
if (rtextbytes % cfg->blocksize) {
fprintf(stderr,
_("illegal rt extent size %lld, not a multiple of %d\n"),
(long long)rtextbytes, cfg->blocksize);
usage();
}
cfg->rtextblocks = (xfs_extlen_t)(rtextbytes >> cfg->blocklog);
} else {
/*
* If realtime extsize has not been specified by the user,
* and the underlying volume is striped, then set rtextblocks
* to the stripe width.
*/
uint64_t rswidth;
if (!cfg->sb_feat.nortalign && !cli->xi->risfile &&
!(!cli->rtsize && cli->xi->disfile))
rswidth = ft->rtswidth;
else
rswidth = 0;
/* check that rswidth is a multiple of fs blocksize */
if (!cfg->sb_feat.nortalign && rswidth &&
!(BBTOB(rswidth) % cfg->blocksize)) {
rswidth = DTOBT(rswidth, cfg->blocklog);
rtextbytes = rswidth << cfg->blocklog;
if (rtextbytes > XFS_MIN_RTEXTSIZE &&
rtextbytes <= XFS_MAX_RTEXTSIZE) {
cfg->rtextblocks = rswidth;
}
}
if (!cfg->rtextblocks) {
cfg->rtextblocks = (cfg->blocksize < XFS_MIN_RTEXTSIZE)
? XFS_MIN_RTEXTSIZE >> cfg->blocklog
: 1;
}
}
ASSERT(cfg->rtextblocks);
}
/*
* Validate the configured stripe geometry, or is none is specified, pull
* the configuration from the underlying device.
*
* CLI parameters come in as different units, go out as filesystem blocks.
*/
static void
calc_stripe_factors(
struct mkfs_params *cfg,
struct cli_params *cli,
struct fs_topology *ft)
{
long long int big_dswidth;
int dsunit = 0;
int dswidth = 0;
int lsunit = 0;
int dsu = 0;
int dsw = 0;
int lsu = 0;
bool use_dev = false;
if (cli_opt_set(&dopts, D_SUNIT))
dsunit = cli->dsunit;
if (cli_opt_set(&dopts, D_SWIDTH))
dswidth = cli->dswidth;
if (cli_opt_set(&dopts, D_SU))
dsu = getnum(cli->dsu, &dopts, D_SU);
if (cli_opt_set(&dopts, D_SW))
dsw = cli->dsw;
/* data sunit/swidth options */
if (cli_opt_set(&dopts, D_SUNIT) != cli_opt_set(&dopts, D_SWIDTH)) {
fprintf(stderr,
_("both data sunit and data swidth options must be specified\n"));
usage();
}
/* convert dsu/dsw to dsunit/dswidth and use them from now on */
if (dsu || dsw) {
if (cli_opt_set(&dopts, D_SU) != cli_opt_set(&dopts, D_SW)) {
fprintf(stderr,
_("both data su and data sw options must be specified\n"));
usage();
}
if (dsu % cfg->sectorsize) {
fprintf(stderr,
_("data su must be a multiple of the sector size (%d)\n"), cfg->sectorsize);
usage();
}
dsunit = (int)BTOBBT(dsu);
big_dswidth = (long long int)dsunit * dsw;
if (big_dswidth > INT_MAX) {
fprintf(stderr,
_("data stripe width (%lld) is too large of a multiple of the data stripe unit (%d)\n"),
big_dswidth, dsunit);
usage();
}
dswidth = big_dswidth;
}
if (dsunit && (!dswidth || (dswidth % dsunit != 0))) {
fprintf(stderr,
_("data stripe width (%d) must be a multiple of the data stripe unit (%d)\n"),
dswidth, dsunit);
usage();
}
/* If sunit & swidth were manually specified as 0, same as noalign */
if ((cli_opt_set(&dopts, D_SUNIT) || cli_opt_set(&dopts, D_SU)) &&
!dsunit && !dswidth)
cfg->sb_feat.nodalign = true;
/* if we are not using alignment, don't apply device defaults */
if (cfg->sb_feat.nodalign) {
cfg->dsunit = 0;
cfg->dswidth = 0;
goto check_lsunit;
}
/* if no stripe config set, use the device default */
if (!dsunit) {
dsunit = ft->dsunit;
dswidth = ft->dswidth;
use_dev = true;
} else {
/* check and warn is alignment is sub-optimal */
if (ft->dsunit && ft->dsunit != dsunit) {
fprintf(stderr,
_("%s: Specified data stripe unit %d is not the same as the volume stripe unit %d\n"),
progname, dsunit, ft->dsunit);
}
if (ft->dswidth && ft->dswidth != dswidth) {
fprintf(stderr,
_("%s: Specified data stripe width %d is not the same as the volume stripe width %d\n"),
progname, dswidth, ft->dswidth);
}
}
/*
* now we have our stripe config, check it's a multiple of block
* size.
*/
if ((BBTOB(dsunit) % cfg->blocksize) ||
(BBTOB(dswidth) % cfg->blocksize)) {
/*
* If we are using device defaults, just clear them and we're
* good to go. Otherwise bail out with an error.
*/
if (!use_dev) {
fprintf(stderr,
_("%s: Stripe unit(%d) or stripe width(%d) is not a multiple of the block size(%d)\n"),
progname, BBTOB(dsunit), BBTOB(dswidth),
cfg->blocksize);
exit(1);
}
dsunit = 0;
dswidth = 0;
cfg->sb_feat.nodalign = true;
}
/* convert from 512 byte blocks to fs blocksize */
cfg->dsunit = DTOBT(dsunit, cfg->blocklog);
cfg->dswidth = DTOBT(dswidth, cfg->blocklog);
check_lsunit:
/* log sunit options */
if (cli_opt_set(&lopts, L_SUNIT))
lsunit = cli->lsunit;
else if (cli_opt_set(&lopts, L_SU))
lsu = getnum(cli->lsu, &lopts, L_SU);
else if (cfg->lsectorsize > XLOG_HEADER_SIZE)
lsu = cfg->blocksize; /* lsunit matches filesystem block size */
if (lsu) {
/* verify if lsu is a multiple block size */
if (lsu % cfg->blocksize != 0) {
fprintf(stderr,
_("log stripe unit (%d) must be a multiple of the block size (%d)\n"),
lsu, cfg->blocksize);
usage();
}
lsunit = (int)BTOBBT(lsu);
}
if (BBTOB(lsunit) % cfg->blocksize != 0) {
fprintf(stderr,
_("log stripe unit (%d) must be a multiple of the block size (%d)\n"),
BBTOB(lsunit), cfg->blocksize);
usage();
}
/*
* check that log sunit is modulo fsblksize or default it to dsunit.
*/
if (lsunit) {
/* convert from 512 byte blocks to fs blocks */
cfg->lsunit = DTOBT(lsunit, cfg->blocklog);
} else if (cfg->sb_feat.log_version == 2 &&
cfg->loginternal && cfg->dsunit) {
/* lsunit and dsunit now in fs blocks */
cfg->lsunit = cfg->dsunit;
}
if (cfg->sb_feat.log_version == 2 &&
cfg->lsunit * cfg->blocksize > 256 * 1024) {
/* Warn only if specified on commandline */
if (cli->lsu || cli->lsunit != -1) {
fprintf(stderr,
_("log stripe unit (%d bytes) is too large (maximum is 256KiB)\n"
"log stripe unit adjusted to 32KiB\n"),
(cfg->lsunit * cfg->blocksize));
}
/* XXX: 64k block size? */
cfg->lsunit = (32 * 1024) / cfg->blocksize;
}
}
static void
open_devices(
struct mkfs_params *cfg,
struct libxfs_xinit *xi,
bool discard)
{
uint64_t sector_mask;
/*
* Initialize. This will open the log and rt devices as well.
*/
xi->setblksize = cfg->sectorsize;
if (!libxfs_init(xi))
usage();
if (!xi->ddev) {
fprintf(stderr, _("no device name given in argument list\n"));
usage();
}
/*
* Ok, Linux only has a 1024-byte resolution on device _size_,
* and the sizes below are in basic 512-byte blocks,
* so if we have (size % 2), on any partition, we can't get
* to the last 512 bytes. The same issue exists for larger
* sector sizes - we cannot write past the last sector.
*
* So, we reduce the size (in basic blocks) to a perfect
* multiple of the sector size, or 1024, whichever is larger.
*/
sector_mask = (uint64_t)-1 << (MAX(cfg->sectorlog, 10) - BBSHIFT);
xi->dsize &= sector_mask;
xi->rtsize &= sector_mask;
xi->logBBsize &= (uint64_t)-1 << (MAX(cfg->lsectorlog, 10) - BBSHIFT);
if (!discard)
return;
if (!xi->disfile)
discard_blocks(xi->ddev, xi->dsize);
if (xi->rtdev && !xi->risfile)
discard_blocks(xi->rtdev, xi->rtsize);
if (xi->logdev && xi->logdev != xi->ddev && !xi->lisfile)
discard_blocks(xi->logdev, xi->logBBsize);
}
static void
validate_datadev(
struct mkfs_params *cfg,
struct cli_params *cli)
{
struct libxfs_xinit *xi = cli->xi;
if (!xi->dsize) {
/*
* if the device is a file, we can't validate the size here.
* Instead, the file will be truncated to the correct length
* later on. if it's not a file, we've got a dud device.
*/
if (!xi->disfile) {
fprintf(stderr, _("can't get size of data subvolume\n"));
usage();
}
ASSERT(cfg->dblocks);
} else if (cfg->dblocks) {
/* check the size fits into the underlying device */
if (cfg->dblocks > DTOBT(xi->dsize, cfg->blocklog)) {
fprintf(stderr,
_("size %s specified for data subvolume is too large, maximum is %lld blocks\n"),
cli->dsize,
(long long)DTOBT(xi->dsize, cfg->blocklog));
usage();
}
} else {
/* no user size, so use the full block device */
cfg->dblocks = DTOBT(xi->dsize, cfg->blocklog);
}
if (cfg->dblocks < XFS_MIN_DATA_BLOCKS) {
fprintf(stderr,
_("size %lld of data subvolume is too small, minimum %d blocks\n"),
(long long)cfg->dblocks, XFS_MIN_DATA_BLOCKS);
usage();
}
if (xi->dbsize > cfg->sectorsize) {
fprintf(stderr, _(
"Warning: the data subvolume sector size %u is less than the sector size \n\
reported by the device (%u).\n"),
cfg->sectorsize, xi->dbsize);
}
}
/*
* This is more complex than it needs to be because we still support volume
* based external logs. They are only discovered *after* the devices have been
* opened, hence the crazy "is this really an internal log" checks here.
*/
static void
validate_logdev(
struct mkfs_params *cfg,
struct cli_params *cli,
char **devname)
{
struct libxfs_xinit *xi = cli->xi;
*devname = NULL;
/* check for volume log first */
if (cli->loginternal && xi->volname && xi->logdev) {
*devname = _("volume log");
cfg->loginternal = false;
} else
cfg->loginternal = cli->loginternal;
/* now run device checks */
if (cfg->loginternal) {
if (xi->logdev) {
fprintf(stderr,
_("can't have both external and internal logs\n"));
usage();
}
/*
* if no sector size has been specified on the command line,
* use what has been configured and validated for the data
* device.
*/
if (!cli->lsectorsize) {
cfg->lsectorsize = cfg->sectorsize;
cfg->lsectorlog = cfg->sectorlog;
}
if (cfg->sectorsize != cfg->lsectorsize) {
fprintf(stderr,
_("data and log sector sizes must be equal for internal logs\n"));
usage();
}
if (cli->logsize && cfg->logblocks >= cfg->dblocks) {
fprintf(stderr,
_("log size %lld too large for internal log\n"),
(long long)cfg->logblocks);
usage();
}
*devname = _("internal log");
return;
}
/* External/log subvolume checks */
if (xi->logname)
*devname = xi->logname;
if (!*devname || !xi->logdev) {
fprintf(stderr, _("no log subvolume or external log.\n"));
usage();
}
if (!cfg->logblocks) {
if (xi->logBBsize == 0) {
fprintf(stderr,
_("unable to get size of the log subvolume.\n"));
usage();
}
cfg->logblocks = DTOBT(xi->logBBsize, cfg->blocklog);
} else if (cfg->logblocks > DTOBT(xi->logBBsize, cfg->blocklog)) {
fprintf(stderr,
_("size %s specified for log subvolume is too large, maximum is %lld blocks\n"),
cli->logsize,
(long long)DTOBT(xi->logBBsize, cfg->blocklog));
usage();
}
if (xi->lbsize > cfg->lsectorsize) {
fprintf(stderr, _(
"Warning: the log subvolume sector size %u is less than the sector size\n\
reported by the device (%u).\n"),
cfg->lsectorsize, xi->lbsize);
}
}
static void
validate_rtdev(
struct mkfs_params *cfg,
struct cli_params *cli,
char **devname)
{
struct libxfs_xinit *xi = cli->xi;
*devname = NULL;
if (!xi->rtdev) {
if (cli->rtsize) {
fprintf(stderr,
_("size specified for non-existent rt subvolume\n"));
usage();
}
*devname = _("none");
cfg->rtblocks = 0;
cfg->rtextents = 0;
cfg->rtbmblocks = 0;
return;
}
if (!xi->rtsize) {
fprintf(stderr, _("Invalid zero length rt subvolume found\n"));
usage();
}
/* volume rtdev */
if (xi->volname)
*devname = _("volume rt");
else
*devname = xi->rtname;
if (cli->rtsize) {
if (cfg->rtblocks > DTOBT(xi->rtsize, cfg->blocklog)) {
fprintf(stderr,
_("size %s specified for rt subvolume is too large, maxi->um is %lld blocks\n"),
cli->rtsize,
(long long)DTOBT(xi->rtsize, cfg->blocklog));
usage();
}
if (xi->rtbsize > cfg->sectorsize) {
fprintf(stderr, _(
"Warning: the realtime subvolume sector size %u is less than the sector size\n\
reported by the device (%u).\n"),
cfg->sectorsize, xi->rtbsize);
}
} else {
/* grab volume size */
cfg->rtblocks = DTOBT(xi->rtsize, cfg->blocklog);
}
cfg->rtextents = cfg->rtblocks / cfg->rtextblocks;
cfg->rtbmblocks = (xfs_extlen_t)howmany(cfg->rtextents,
NBBY * cfg->blocksize);
}
static void
calculate_initial_ag_geometry(
struct mkfs_params *cfg,
struct cli_params *cli)
{
if (cli->agsize) { /* User-specified AG size */
cfg->agsize = getnum(cli->agsize, &dopts, D_AGSIZE);
/*
* Check specified agsize is a multiple of blocksize.
*/
if (cfg->agsize % cfg->blocksize) {
fprintf(stderr,
_("agsize (%s) not a multiple of fs blk size (%d)\n"),
cli->agsize, cfg->blocksize);
usage();
}
cfg->agsize /= cfg->blocksize;
cfg->agcount = cfg->dblocks / cfg->agsize +
(cfg->dblocks % cfg->agsize != 0);
} else if (cli->agcount) { /* User-specified AG count */
cfg->agcount = cli->agcount;
cfg->agsize = cfg->dblocks / cfg->agcount +
(cfg->dblocks % cfg->agcount != 0);
} else {
calc_default_ag_geometry(cfg->blocklog, cfg->dblocks,
cfg->dsunit, &cfg->agsize,
&cfg->agcount);
}
}
/*
* Align the AG size to stripe geometry. If this fails and we are using
* discovered stripe geometry, tell the caller to clear the stripe geometry.
* Otherwise, set the aligned geometry (valid or invalid!) so that the
* validation call will fail and exit.
*/
static void
align_ag_geometry(
struct mkfs_params *cfg)
{
uint64_t tmp_agsize;
int dsunit = cfg->dsunit;
if (!dsunit)
goto validate;
/*
* agsize is not a multiple of dsunit
*/
if ((cfg->agsize % dsunit) != 0) {
/*
* Round up to stripe unit boundary. Also make sure
* that agsize is still larger than
* XFS_AG_MIN_BLOCKS(blocklog)
*/
tmp_agsize = ((cfg->agsize + dsunit - 1) / dsunit) * dsunit;
/*
* Round down to stripe unit boundary if rounding up
* created an AG size that is larger than the AG max.
*/
if (tmp_agsize > XFS_AG_MAX_BLOCKS(cfg->blocklog))
tmp_agsize = (cfg->agsize / dsunit) * dsunit;
if (tmp_agsize < XFS_AG_MIN_BLOCKS(cfg->blocklog) &&
tmp_agsize > XFS_AG_MAX_BLOCKS(cfg->blocklog)) {
/*
* If the AG size is invalid and we are using device
* probed stripe alignment, just clear the alignment
* and continue on.
*/
if (!cli_opt_set(&dopts, D_SUNIT) &&
!cli_opt_set(&dopts, D_SU)) {
cfg->dsunit = 0;
cfg->dswidth = 0;
goto validate;
}
/*
* set the agsize to the invalid value so the following
* validation of the ag will fail and print a nice error
* and exit.
*/
cfg->agsize = tmp_agsize;
goto validate;
}
/* update geometry to be stripe unit aligned */
cfg->agsize = tmp_agsize;
if (!cli_opt_set(&dopts, D_AGCOUNT))
cfg->agcount = cfg->dblocks / cfg->agsize +
(cfg->dblocks % cfg->agsize != 0);
if (cli_opt_set(&dopts, D_AGSIZE))
fprintf(stderr,
_("agsize rounded to %lld, sunit = %d\n"),
(long long)cfg->agsize, dsunit);
}
if ((cfg->agsize % cfg->dswidth) == 0 &&
cfg->dswidth != cfg->dsunit &&
cfg->agcount > 1) {
if (cli_opt_set(&dopts, D_AGCOUNT) ||
cli_opt_set(&dopts, D_AGSIZE)) {
fprintf(stderr, _(
"Warning: AG size is a multiple of stripe width. This can cause performance\n\
problems by aligning all AGs on the same disk. To avoid this, run mkfs with\n\
an AG size that is one stripe unit smaller or larger, for example %llu.\n"),
(unsigned long long)cfg->agsize - dsunit);
goto validate;
}
/*
* This is a non-optimal configuration because all AGs start on
* the same disk in the stripe. Changing the AG size by one
* sunit will guarantee that this does not happen.
*/
tmp_agsize = cfg->agsize - dsunit;
if (tmp_agsize < XFS_AG_MIN_BLOCKS(cfg->blocklog)) {
tmp_agsize = cfg->agsize + dsunit;
if (cfg->dblocks < cfg->agsize) {
/* oh well, nothing to do */
tmp_agsize = cfg->agsize;
}
}
cfg->agsize = tmp_agsize;
cfg->agcount = cfg->dblocks / cfg->agsize +
(cfg->dblocks % cfg->agsize != 0);
}
validate:
/*
* If the last AG is too small, reduce the filesystem size
* and drop the blocks.
*/
if (cfg->dblocks % cfg->agsize != 0 &&
(cfg->dblocks % cfg->agsize < XFS_AG_MIN_BLOCKS(cfg->blocklog))) {
ASSERT(!cli_opt_set(&dopts, D_AGCOUNT));
cfg->dblocks = (xfs_rfsblock_t)((cfg->agcount - 1) * cfg->agsize);
cfg->agcount--;
ASSERT(cfg->agcount != 0);
}
validate_ag_geometry(cfg->blocklog, cfg->dblocks,
cfg->agsize, cfg->agcount);
}
static void
calculate_imaxpct(
struct mkfs_params *cfg,
struct cli_params *cli)
{
cfg->imaxpct = cli->imaxpct;
if (cfg->imaxpct)
return;
/*
* This returns the % of the disk space that is used for
* inodes, it changes relatively to the FS size:
* - over 50 TB, use 1%,
* - 1TB - 50 TB, use 5%,
* - under 1 TB, use XFS_DFL_IMAXIMUM_PCT (25%).
*/
if (cfg->dblocks < TERABYTES(1, cfg->blocklog))
cfg->imaxpct = XFS_DFL_IMAXIMUM_PCT;
else if (cfg->dblocks < TERABYTES(50, cfg->blocklog))
cfg->imaxpct = 5;
else
cfg->imaxpct = 1;
}
/*
* Set up the initial state of the superblock so we can start using the
* libxfs geometry macros.
*/
static void
sb_set_features(
struct mkfs_params *cfg,
struct xfs_sb *sbp)
{
struct sb_feat_args *fp = &cfg->sb_feat;
sbp->sb_versionnum = XFS_DFL_SB_VERSION_BITS;
if (fp->crcs_enabled)
sbp->sb_versionnum |= XFS_SB_VERSION_5;
else
sbp->sb_versionnum |= XFS_SB_VERSION_4;
if (fp->inode_align) {
int cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
sbp->sb_versionnum |= XFS_SB_VERSION_ALIGNBIT;
if (cfg->sb_feat.crcs_enabled)
cluster_size *= cfg->inodesize / XFS_DINODE_MIN_SIZE;
sbp->sb_inoalignmt = cluster_size >> cfg->blocklog;
} else
sbp->sb_inoalignmt = 0;
if (cfg->dsunit)
sbp->sb_versionnum |= XFS_SB_VERSION_DALIGNBIT;
if (fp->log_version == 2)
sbp->sb_versionnum |= XFS_SB_VERSION_LOGV2BIT;
if (fp->attr_version == 1)
sbp->sb_versionnum |= XFS_SB_VERSION_ATTRBIT;
if (fp->nci)
sbp->sb_versionnum |= XFS_SB_VERSION_BORGBIT;
if (cfg->sectorsize > BBSIZE || cfg->lsectorsize > BBSIZE) {
sbp->sb_versionnum |= XFS_SB_VERSION_SECTORBIT;
sbp->sb_logsectlog = (uint8_t)cfg->lsectorlog;
sbp->sb_logsectsize = (uint16_t)cfg->lsectorsize;
} else {
sbp->sb_logsectlog = 0;
sbp->sb_logsectsize = 0;
}
sbp->sb_features2 = 0;
if (fp->lazy_sb_counters)
sbp->sb_features2 |= XFS_SB_VERSION2_LAZYSBCOUNTBIT;
if (fp->projid32bit)
sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT;
if (fp->parent_pointers)
sbp->sb_features2 |= XFS_SB_VERSION2_PARENTBIT;
if (fp->crcs_enabled)
sbp->sb_features2 |= XFS_SB_VERSION2_CRCBIT;
if (fp->attr_version == 2)
sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT;
/* v5 superblocks have their own feature bit for dirftype */
if (fp->dirftype && !fp->crcs_enabled)
sbp->sb_features2 |= XFS_SB_VERSION2_FTYPE;
/* update whether extended features are in use */
if (sbp->sb_features2 != 0)
sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
/*
* Due to a structure alignment issue, sb_features2 ended up in one
* of two locations, the second "incorrect" location represented by
* the sb_bad_features2 field. To avoid older kernels mounting
* filesystems they shouldn't, set both field to the same value.
*/
sbp->sb_bad_features2 = sbp->sb_features2;
if (!fp->crcs_enabled)
return;
/* default features for v5 filesystems */
sbp->sb_features_compat = 0;
sbp->sb_features_ro_compat = 0;
sbp->sb_features_incompat = XFS_SB_FEAT_INCOMPAT_FTYPE;
sbp->sb_features_log_incompat = 0;
if (fp->finobt)
sbp->sb_features_ro_compat = XFS_SB_FEAT_RO_COMPAT_FINOBT;
if (fp->rmapbt)
sbp->sb_features_ro_compat |= XFS_SB_FEAT_RO_COMPAT_RMAPBT;
if (fp->reflink)
sbp->sb_features_ro_compat |= XFS_SB_FEAT_RO_COMPAT_REFLINK;
/*
* Sparse inode chunk support has two main inode alignment requirements.
* First, sparse chunk alignment must match the cluster size. Second,
* full chunk alignment must match the inode chunk size.
*
* Copy the already calculated/scaled inoalignmt to spino_align and
* update the former to the full inode chunk size.
*/
if (fp->spinodes) {
sbp->sb_spino_align = sbp->sb_inoalignmt;
sbp->sb_inoalignmt = XFS_INODES_PER_CHUNK *
cfg->inodesize >> cfg->blocklog;
sbp->sb_features_incompat |= XFS_SB_FEAT_INCOMPAT_SPINODES;
}
}
/*
* Make sure that the log size is a multiple of the stripe unit
*/
static void
align_log_size(
struct mkfs_params *cfg,
int sunit)
{
uint64_t tmp_logblocks;
/* nothing to do if it's already aligned. */
if ((cfg->logblocks % sunit) == 0)
return;
if (cli_opt_set(&lopts, L_SIZE)) {
fprintf(stderr,
_("log size %lld is not a multiple of the log stripe unit %d\n"),
(long long) cfg->logblocks, sunit);
usage();
}
tmp_logblocks = ((cfg->logblocks + (sunit - 1)) / sunit) * sunit;
/* If the log is too large, round down instead of round up */
if ((tmp_logblocks > XFS_MAX_LOG_BLOCKS) ||
((tmp_logblocks << cfg->blocklog) > XFS_MAX_LOG_BYTES)) {
tmp_logblocks = (cfg->logblocks / sunit) * sunit;
}
cfg->logblocks = tmp_logblocks;
}
/*
* Make sure that the internal log is correctly aligned to the specified
* stripe unit.
*/
static void
align_internal_log(
struct mkfs_params *cfg,
struct xfs_mount *mp,
int sunit)
{
/* round up log start if necessary */
if ((cfg->logstart % sunit) != 0)
cfg->logstart = ((cfg->logstart + (sunit - 1)) / sunit) * sunit;
/* round up/down the log size now */
align_log_size(cfg, sunit);
/* check the aligned log still fits in an AG. */
if (cfg->logblocks > cfg->agsize - XFS_FSB_TO_AGBNO(mp, cfg->logstart)) {
fprintf(stderr,
_("Due to stripe alignment, the internal log size (%lld) is too large.\n"
"Must fit within an allocation group.\n"),
(long long) cfg->logblocks);
usage();
}
}
void
validate_log_size(uint64_t logblocks, int blocklog, int min_logblocks)
{
if (logblocks < min_logblocks) {
fprintf(stderr,
_("log size %lld blocks too small, minimum size is %d blocks\n"),
(long long)logblocks, min_logblocks);
usage();
}
if (logblocks > XFS_MAX_LOG_BLOCKS) {
fprintf(stderr,
_("log size %lld blocks too large, maximum size is %lld blocks\n"),
(long long)logblocks, XFS_MAX_LOG_BLOCKS);
usage();
}
if ((logblocks << blocklog) > XFS_MAX_LOG_BYTES) {
fprintf(stderr,
_("log size %lld bytes too large, maximum size is %lld bytes\n"),
(long long)(logblocks << blocklog), XFS_MAX_LOG_BYTES);
usage();
}
}
static void
calculate_log_size(
struct mkfs_params *cfg,
struct cli_params *cli,
struct xfs_mount *mp)
{
struct xfs_sb *sbp = &mp->m_sb;
int min_logblocks;
struct xfs_mount mount;
/* we need a temporary mount to calculate the minimum log size. */
memset(&mount, 0, sizeof(mount));
mount.m_sb = *sbp;
libxfs_mount(&mount, &mp->m_sb, 0, 0, 0, 0);
min_logblocks = libxfs_log_calc_minimum_size(&mount);
libxfs_umount(&mount);
ASSERT(min_logblocks);
min_logblocks = MAX(XFS_MIN_LOG_BLOCKS, min_logblocks);
/* if we have lots of blocks, check against XFS_MIN_LOG_BYTES, too */
if (!cli->logsize &&
cfg->dblocks >= (1024*1024*1024) >> cfg->blocklog)
min_logblocks = MAX(min_logblocks,
XFS_MIN_LOG_BYTES >> cfg->blocklog);
/*
* external logs will have a device and size by now, so all we have
* to do is validate it against minimum size and align it.
*/
if (!cfg->loginternal) {
if (min_logblocks > cfg->logblocks) {
fprintf(stderr,
_("external log device %lld too small, must be at least %lld blocks\n"),
(long long)cfg->logblocks,
(long long)min_logblocks);
usage();
}
cfg->logstart = 0;
cfg->logagno = 0;
if (cfg->lsunit)
align_log_size(cfg, cfg->lsunit);
validate_log_size(cfg->logblocks, cfg->blocklog, min_logblocks);
return;
}
/* internal log - if no size specified, calculate automatically */
if (!cfg->logblocks) {
if (cfg->dblocks < GIGABYTES(1, cfg->blocklog)) {
/* tiny filesystems get minimum sized logs. */
cfg->logblocks = min_logblocks;
} else if (cfg->dblocks < GIGABYTES(16, cfg->blocklog)) {
/*
* For small filesystems, we want to use the
* XFS_MIN_LOG_BYTES for filesystems smaller than 16G if
* at all possible, ramping up to 128MB at 256GB.
*/
cfg->logblocks = MIN(XFS_MIN_LOG_BYTES >> cfg->blocklog,
min_logblocks * XFS_DFL_LOG_FACTOR);
} else {
/*
* With a 2GB max log size, default to maximum size
* at 4TB. This keeps the same ratio from the older
* max log size of 128M at 256GB fs size. IOWs,
* the ratio of fs size to log size is 2048:1.
*/
cfg->logblocks = (cfg->dblocks << cfg->blocklog) / 2048;
cfg->logblocks = cfg->logblocks >> cfg->blocklog;
}
/* Ensure the chosen size meets minimum log size requirements */
cfg->logblocks = MAX(min_logblocks, cfg->logblocks);
/*
* Make sure the log fits wholly within an AG
*
* XXX: If agf->freeblks ends up as 0 because the log uses all
* the free space, it causes the kernel all sorts of problems
* with per-ag reservations. Right now just back it off one
* block, but there's a whole can of worms here that needs to be
* opened to decide what is the valid maximum size of a log in
* an AG.
*/
cfg->logblocks = MIN(cfg->logblocks,
libxfs_alloc_ag_max_usable(mp) - 1);
/* and now clamp the size to the maximum supported size */
cfg->logblocks = MIN(cfg->logblocks, XFS_MAX_LOG_BLOCKS);
if ((cfg->logblocks << cfg->blocklog) > XFS_MAX_LOG_BYTES)
cfg->logblocks = XFS_MAX_LOG_BYTES >> cfg->blocklog;
validate_log_size(cfg->logblocks, cfg->blocklog, min_logblocks);
}
if (cfg->logblocks > sbp->sb_agblocks - libxfs_prealloc_blocks(mp)) {
fprintf(stderr,
_("internal log size %lld too large, must fit in allocation group\n"),
(long long)cfg->logblocks);
usage();
}
if (cli_opt_set(&lopts, L_AGNUM)) {
if (cli->logagno >= sbp->sb_agcount) {
fprintf(stderr,
_("log ag number %lld too large, must be less than %lld\n"),
(long long)cli->logagno,
(long long)sbp->sb_agcount);
usage();
}
cfg->logagno = cli->logagno;
} else
cfg->logagno = (xfs_agnumber_t)(sbp->sb_agcount / 2);
cfg->logstart = XFS_AGB_TO_FSB(mp, cfg->logagno,
libxfs_prealloc_blocks(mp));
/*
* Align the logstart at stripe unit boundary.
*/
if (cfg->lsunit) {
align_internal_log(cfg, mp, cfg->lsunit);
} else if (cfg->dsunit) {
align_internal_log(cfg, mp, cfg->dsunit);
}
validate_log_size(cfg->logblocks, cfg->blocklog, min_logblocks);
}
/*
* Set up superblock with the minimum parameters required for
* the libxfs macros needed by the log sizing code to run successfully.
* This includes a minimum log size calculation, so we need everything
* that goes into that calculation to be setup here including feature
* flags.
*/
static void
start_superblock_setup(
struct mkfs_params *cfg,
struct xfs_mount *mp,
struct xfs_sb *sbp)
{
sbp->sb_magicnum = XFS_SB_MAGIC;
sbp->sb_sectsize = (uint16_t)cfg->sectorsize;
sbp->sb_sectlog = (uint8_t)cfg->sectorlog;
sbp->sb_blocksize = cfg->blocksize;
sbp->sb_blocklog = (uint8_t)cfg->blocklog;
sbp->sb_agblocks = (xfs_agblock_t)cfg->agsize;
sbp->sb_agblklog = (uint8_t)log2_roundup(cfg->agsize);
sbp->sb_agcount = (xfs_agnumber_t)cfg->agcount;
sbp->sb_inodesize = (uint16_t)cfg->inodesize;
sbp->sb_inodelog = (uint8_t)cfg->inodelog;
sbp->sb_inopblock = (uint16_t)(cfg->blocksize / cfg->inodesize);
sbp->sb_inopblog = (uint8_t)(cfg->blocklog - cfg->inodelog);
sbp->sb_dirblklog = cfg->dirblocklog - cfg->blocklog;
sb_set_features(cfg, sbp);
/*
* log stripe unit is stored in bytes on disk and cannot be zero
* for v2 logs.
*/
if (cfg->sb_feat.log_version == 2) {
if (cfg->lsunit)
sbp->sb_logsunit = XFS_FSB_TO_B(mp, cfg->lsunit);
else
sbp->sb_logsunit = 1;
} else
sbp->sb_logsunit = 0;
}
static void
initialise_mount(
struct mkfs_params *cfg,
struct xfs_mount *mp,
struct xfs_sb *sbp)
{
/* Minimum needed for libxfs_prealloc_blocks() */
mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
}
static void
print_mkfs_cfg(
struct mkfs_params *cfg,
char *dfile,
char *logfile,
char *rtfile)
{
struct sb_feat_args *fp = &cfg->sb_feat;
printf(_(
"meta-data=%-22s isize=%-6d agcount=%lld, agsize=%lld blks\n"
" =%-22s sectsz=%-5u attr=%u, projid32bit=%u\n"
" =%-22s crc=%-8u finobt=%u, sparse=%u, rmapbt=%u, reflink=%u\n"
"data =%-22s bsize=%-6u blocks=%llu, imaxpct=%u\n"
" =%-22s sunit=%-6u swidth=%u blks\n"
"naming =version %-14u bsize=%-6u ascii-ci=%d ftype=%d\n"
"log =%-22s bsize=%-6d blocks=%lld, version=%d\n"
" =%-22s sectsz=%-5u sunit=%d blks, lazy-count=%d\n"
"realtime =%-22s extsz=%-6d blocks=%lld, rtextents=%lld\n"),
dfile, cfg->inodesize, (long long)cfg->agcount,
(long long)cfg->agsize,
"", cfg->sectorsize, fp->attr_version, fp->projid32bit,
"", fp->crcs_enabled, fp->finobt, fp->spinodes, fp->rmapbt,
fp->reflink,
"", cfg->blocksize, (long long)cfg->dblocks, cfg->imaxpct,
"", cfg->dsunit, cfg->dswidth,
fp->dir_version, cfg->dirblocksize, fp->nci, fp->dirftype,
logfile, cfg->blocksize, (long long)cfg->logblocks,
fp->log_version,
"", cfg->lsectorsize, cfg->lsunit, fp->lazy_sb_counters,
rtfile, (int)cfg->rtextblocks << cfg->blocklog,
(long long)cfg->rtblocks, (long long)cfg->rtextents);
}
/*
* Format everything from the generated config into the superblock that
* will be used to initialise the on-disk superblock. This is the in-memory
* copy, so no need to care about endian swapping here.
*/
static void
finish_superblock_setup(
struct mkfs_params *cfg,
struct xfs_mount *mp,
struct xfs_sb *sbp)
{
if (cfg->label)
strncpy(sbp->sb_fname, cfg->label, sizeof(sbp->sb_fname));
sbp->sb_dblocks = cfg->dblocks;
sbp->sb_rblocks = cfg->rtblocks;
sbp->sb_rextents = cfg->rtextents;
platform_uuid_copy(&sbp->sb_uuid, &cfg->uuid);
/* Only in memory; libxfs expects this as if read from disk */
platform_uuid_copy(&sbp->sb_meta_uuid, &cfg->uuid);
sbp->sb_logstart = cfg->logstart;
sbp->sb_rootino = sbp->sb_rbmino = sbp->sb_rsumino = NULLFSINO;
sbp->sb_rextsize = cfg->rtextblocks;
sbp->sb_agcount = (xfs_agnumber_t)cfg->agcount;
sbp->sb_rbmblocks = cfg->rtbmblocks;
sbp->sb_logblocks = (xfs_extlen_t)cfg->logblocks;
sbp->sb_rextslog = (uint8_t)(cfg->rtextents ?
libxfs_highbit32((unsigned int)cfg->rtextents) : 0);
sbp->sb_inprogress = 1; /* mkfs is in progress */
sbp->sb_imax_pct = cfg->imaxpct;
sbp->sb_icount = 0;
sbp->sb_ifree = 0;
sbp->sb_fdblocks = cfg->dblocks -
cfg->agcount * libxfs_prealloc_blocks(mp) -
(cfg->loginternal ? cfg->logblocks : 0);
sbp->sb_frextents = 0; /* will do a free later */
sbp->sb_uquotino = sbp->sb_gquotino = sbp->sb_pquotino = 0;
sbp->sb_qflags = 0;
sbp->sb_unit = cfg->dsunit;
sbp->sb_width = cfg->dswidth;
}
/*
* Sanitise the data and log devices and prepare them so libxfs can mount the
* device successfully. Also check we can access the rt device if configured.
*/
static void
prepare_devices(
struct mkfs_params *cfg,
struct libxfs_xinit *xi,
struct xfs_mount *mp,
struct xfs_sb *sbp,
bool clear_stale)
{
struct xfs_buf *buf;
int whack_blks = BTOBB(WHACK_SIZE);
int lsunit;
/*
* If there's an old XFS filesystem on the device with enough intact
* information that we can parse the superblock, there's enough
* information on disk to confuse a future xfs_repair call. To avoid
* this, whack all the old secondary superblocks that we can find.
*/
if (clear_stale)
zero_old_xfs_structures(xi, sbp);
/*
* If the data device is a file, grow out the file to its final size if
* needed so that the reads for the end of the device in the mount code
* will succeed.
*/
if (xi->disfile &&
xi->dsize * xi->dbsize < cfg->dblocks * cfg->blocksize) {
if (ftruncate(xi->dfd, cfg->dblocks * cfg->blocksize) < 0) {
fprintf(stderr,
_("%s: Growing the data section failed\n"),
progname);
exit(1);
}
/* update size to be able to whack blocks correctly */
xi->dsize = BTOBB(cfg->dblocks * cfg->blocksize);
}
/*
* Zero out the end to obliterate any old MD RAID (or other) metadata at
* the end of the device. (MD sb is ~64k from the end, take out a wider
* swath to be sure)
*/
buf = libxfs_getbuf(mp->m_ddev_targp, (xi->dsize - whack_blks),
whack_blks);
memset(XFS_BUF_PTR(buf), 0, WHACK_SIZE);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
libxfs_purgebuf(buf);
/*
* Now zero out the beginning of the device, to obliterate any old
* filesystem signatures out there. This should take care of
* swap (somewhere around the page size), jfs (32k),
* ext[2,3] and reiserfs (64k) - and hopefully all else.
*/
buf = libxfs_getbuf(mp->m_ddev_targp, 0, whack_blks);
memset(XFS_BUF_PTR(buf), 0, WHACK_SIZE);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
libxfs_purgebuf(buf);
/* OK, now write the superblock... */
buf = libxfs_getbuf(mp->m_ddev_targp, XFS_SB_DADDR, XFS_FSS_TO_BB(mp, 1));
buf->b_ops = &xfs_sb_buf_ops;
memset(XFS_BUF_PTR(buf), 0, cfg->sectorsize);
libxfs_sb_to_disk((void *)XFS_BUF_PTR(buf), sbp);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
libxfs_purgebuf(buf);
/* ...and zero the log.... */
lsunit = sbp->sb_logsunit;
if (lsunit == 1)
lsunit = sbp->sb_logsectsize;
libxfs_log_clear(mp->m_logdev_targp, NULL,
XFS_FSB_TO_DADDR(mp, cfg->logstart),
(xfs_extlen_t)XFS_FSB_TO_BB(mp, cfg->logblocks),
&sbp->sb_uuid, cfg->sb_feat.log_version,
lsunit, XLOG_FMT, XLOG_INIT_CYCLE, false);
/* finally, check we can write the last block in the realtime area */
if (mp->m_rtdev_targp->dev && cfg->rtblocks > 0) {
buf = libxfs_getbuf(mp->m_rtdev_targp,
XFS_FSB_TO_BB(mp, cfg->rtblocks - 1LL),
BTOBB(cfg->blocksize));
memset(XFS_BUF_PTR(buf), 0, cfg->blocksize);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
libxfs_purgebuf(buf);
}
}
/*
* XXX: this code is mostly common with the kernel growfs code.
* These initialisations should be pulled into libxfs to keep the
* kernel/userspace header initialisation code the same.
*/
static void
initialise_ag_headers(
struct mkfs_params *cfg,
struct xfs_mount *mp,
struct xfs_sb *sbp,
xfs_agnumber_t agno,
int *worst_freelist)
{
struct xfs_perag *pag = libxfs_perag_get(mp, agno);
struct xfs_agfl *agfl;
struct xfs_agf *agf;
struct xfs_agi *agi;
struct xfs_buf *buf;
struct xfs_btree_block *block;
struct xfs_alloc_rec *arec;
struct xfs_alloc_rec *nrec;
int bucket;
uint64_t agsize = cfg->agsize;
xfs_agblock_t agblocks;
bool is_log_ag = false;
int c;
if (cfg->loginternal && agno == cfg->logagno)
is_log_ag = true;
/*
* Superblock.
*/
buf = libxfs_getbuf(mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_SB_DADDR),
XFS_FSS_TO_BB(mp, 1));
buf->b_ops = &xfs_sb_buf_ops;
memset(XFS_BUF_PTR(buf), 0, cfg->sectorsize);
libxfs_sb_to_disk((void *)XFS_BUF_PTR(buf), sbp);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
/*
* AG header block: freespace
*/
buf = libxfs_getbuf(mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1));
buf->b_ops = &xfs_agf_buf_ops;
agf = XFS_BUF_TO_AGF(buf);
memset(agf, 0, cfg->sectorsize);
if (agno == cfg->agcount - 1)
agsize = cfg->dblocks - (xfs_rfsblock_t)(agno * agsize);
agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
agf->agf_seqno = cpu_to_be32(agno);
agf->agf_length = cpu_to_be32(agsize);
agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
pag->pagf_levels[XFS_BTNUM_BNOi] = 1;
pag->pagf_levels[XFS_BTNUM_CNTi] = 1;
if (xfs_sb_version_hasrmapbt(sbp)) {
agf->agf_roots[XFS_BTNUM_RMAPi] = cpu_to_be32(XFS_RMAP_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
agf->agf_rmap_blocks = cpu_to_be32(1);
}
if (xfs_sb_version_hasreflink(sbp)) {
agf->agf_refcount_root = cpu_to_be32(libxfs_refc_block(mp));
agf->agf_refcount_level = cpu_to_be32(1);
agf->agf_refcount_blocks = cpu_to_be32(1);
}
agf->agf_flfirst = 0;
agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
agf->agf_flcount = 0;
agblocks = (xfs_agblock_t)(agsize - libxfs_prealloc_blocks(mp));
agf->agf_freeblks = cpu_to_be32(agblocks);
agf->agf_longest = cpu_to_be32(agblocks);
if (xfs_sb_version_hascrc(sbp))
platform_uuid_copy(&agf->agf_uuid, &sbp->sb_uuid);
if (is_log_ag) {
be32_add_cpu(&agf->agf_freeblks, -(int64_t)cfg->logblocks);
agf->agf_longest = cpu_to_be32(agsize -
XFS_FSB_TO_AGBNO(mp, cfg->logstart) - cfg->logblocks);
}
if (libxfs_alloc_min_freelist(mp, pag) > *worst_freelist)
*worst_freelist = libxfs_alloc_min_freelist(mp, pag);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
/*
* AG freelist header block
*/
buf = libxfs_getbuf(mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1));
buf->b_ops = &xfs_agfl_buf_ops;
agfl = XFS_BUF_TO_AGFL(buf);
/* setting to 0xff results in initialisation to NULLAGBLOCK */
memset(agfl, 0xff, cfg->sectorsize);
if (xfs_sb_version_hascrc(sbp)) {
agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
agfl->agfl_seqno = cpu_to_be32(agno);
platform_uuid_copy(&agfl->agfl_uuid, &sbp->sb_uuid);
for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
}
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
/*
* AG header block: inodes
*/
buf = libxfs_getbuf(mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1));
agi = XFS_BUF_TO_AGI(buf);
buf->b_ops = &xfs_agi_buf_ops;
memset(agi, 0, cfg->sectorsize);
agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
agi->agi_seqno = cpu_to_be32(agno);
agi->agi_length = cpu_to_be32(agsize);
agi->agi_count = 0;
agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
agi->agi_level = cpu_to_be32(1);
if (xfs_sb_version_hasfinobt(sbp)) {
agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
agi->agi_free_level = cpu_to_be32(1);
}
agi->agi_freecount = 0;
agi->agi_newino = cpu_to_be32(NULLAGINO);
agi->agi_dirino = cpu_to_be32(NULLAGINO);
if (xfs_sb_version_hascrc(sbp))
platform_uuid_copy(&agi->agi_uuid, &sbp->sb_uuid);
for (c = 0; c < XFS_AGI_UNLINKED_BUCKETS; c++)
agi->agi_unlinked[c] = cpu_to_be32(NULLAGINO);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
/*
* BNO btree root block
*/
buf = libxfs_getbuf(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
BTOBB(cfg->blocksize));
buf->b_ops = &xfs_allocbt_buf_ops;
block = XFS_BUF_TO_BLOCK(buf);
memset(block, 0, cfg->blocksize);
libxfs_btree_init_block(mp, buf, XFS_BTNUM_BNO, 0, 1, agno, 0);
arec = XFS_ALLOC_REC_ADDR(mp, block, 1);
arec->ar_startblock = cpu_to_be32(libxfs_prealloc_blocks(mp));
if (is_log_ag) {
xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp, cfg->logstart);
ASSERT(start >= libxfs_prealloc_blocks(mp));
if (start != libxfs_prealloc_blocks(mp)) {
/*
* Modify first record to pad stripe align of log
*/
arec->ar_blockcount = cpu_to_be32(start -
libxfs_prealloc_blocks(mp));
nrec = arec + 1;
/*
* Insert second record at start of internal log
* which then gets trimmed.
*/
nrec->ar_startblock = cpu_to_be32(
be32_to_cpu(arec->ar_startblock) +
be32_to_cpu(arec->ar_blockcount));
arec = nrec;
be16_add_cpu(&block->bb_numrecs, 1);
}
/*
* Change record start to after the internal log
*/
be32_add_cpu(&arec->ar_startblock, cfg->logblocks);
}
/*
* Calculate the record block count and check for the case where
* the log might have consumed all available space in the AG. If
* so, reset the record count to 0 to avoid exposure of an invalid
* record start block.
*/
arec->ar_blockcount = cpu_to_be32(agsize -
be32_to_cpu(arec->ar_startblock));
if (!arec->ar_blockcount)
block->bb_numrecs = 0;
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
/*
* CNT btree root block
*/
buf = libxfs_getbuf(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
BTOBB(cfg->blocksize));
buf->b_ops = &xfs_allocbt_buf_ops;
block = XFS_BUF_TO_BLOCK(buf);
memset(block, 0, cfg->blocksize);
libxfs_btree_init_block(mp, buf, XFS_BTNUM_CNT, 0, 1, agno, 0);
arec = XFS_ALLOC_REC_ADDR(mp, block, 1);
arec->ar_startblock = cpu_to_be32(libxfs_prealloc_blocks(mp));
if (is_log_ag) {
xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp, cfg->logstart);
ASSERT(start >= libxfs_prealloc_blocks(mp));
if (start != libxfs_prealloc_blocks(mp)) {
arec->ar_blockcount = cpu_to_be32(start -
libxfs_prealloc_blocks(mp));
nrec = arec + 1;
nrec->ar_startblock = cpu_to_be32(
be32_to_cpu(arec->ar_startblock) +
be32_to_cpu(arec->ar_blockcount));
arec = nrec;
be16_add_cpu(&block->bb_numrecs, 1);
}
be32_add_cpu(&arec->ar_startblock, cfg->logblocks);
}
/*
* Calculate the record block count and check for the case where
* the log might have consumed all available space in the AG. If
* so, reset the record count to 0 to avoid exposure of an invalid
* record start block.
*/
arec->ar_blockcount = cpu_to_be32(agsize -
be32_to_cpu(arec->ar_startblock));
if (!arec->ar_blockcount)
block->bb_numrecs = 0;
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
/*
* refcount btree root block
*/
if (xfs_sb_version_hasreflink(sbp)) {
buf = libxfs_getbuf(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, libxfs_refc_block(mp)),
BTOBB(cfg->blocksize));
buf->b_ops = &xfs_refcountbt_buf_ops;
block = XFS_BUF_TO_BLOCK(buf);
memset(block, 0, cfg->blocksize);
libxfs_btree_init_block(mp, buf, XFS_BTNUM_REFC, 0, 0, agno, 0);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
}
/*
* INO btree root block
*/
buf = libxfs_getbuf(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
BTOBB(cfg->blocksize));
buf->b_ops = &xfs_inobt_buf_ops;
block = XFS_BUF_TO_BLOCK(buf);
memset(block, 0, cfg->blocksize);
libxfs_btree_init_block(mp, buf, XFS_BTNUM_INO, 0, 0, agno, 0);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
/*
* Free INO btree root block
*/
if (xfs_sb_version_hasfinobt(sbp)) {
buf = libxfs_getbuf(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_FIBT_BLOCK(mp)),
BTOBB(cfg->blocksize));
buf->b_ops = &xfs_inobt_buf_ops;
block = XFS_BUF_TO_BLOCK(buf);
memset(block, 0, cfg->blocksize);
libxfs_btree_init_block(mp, buf, XFS_BTNUM_FINO, 0, 0, agno, 0);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
}
/* RMAP btree root block */
if (xfs_sb_version_hasrmapbt(sbp)) {
struct xfs_rmap_rec *rrec;
buf = libxfs_getbuf(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_RMAP_BLOCK(mp)),
BTOBB(cfg->blocksize));
buf->b_ops = &xfs_rmapbt_buf_ops;
block = XFS_BUF_TO_BLOCK(buf);
memset(block, 0, cfg->blocksize);
libxfs_btree_init_block(mp, buf, XFS_BTNUM_RMAP, 0, 0, agno, 0);
/*
* mark the AG header regions as static metadata
* The BNO btree block is the first block after the
* headers, so it's location defines the size of region
* the static metadata consumes.
*/
rrec = XFS_RMAP_REC_ADDR(block, 1);
rrec->rm_startblock = 0;
rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
rrec->rm_offset = 0;
be16_add_cpu(&block->bb_numrecs, 1);
/* account freespace btree root blocks */
rrec = XFS_RMAP_REC_ADDR(block, 2);
rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
rrec->rm_blockcount = cpu_to_be32(2);
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
rrec->rm_offset = 0;
be16_add_cpu(&block->bb_numrecs, 1);
/* account inode btree root blocks */
rrec = XFS_RMAP_REC_ADDR(block, 3);
rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
XFS_IBT_BLOCK(mp));
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
rrec->rm_offset = 0;
be16_add_cpu(&block->bb_numrecs, 1);
/* account for rmap btree root */
rrec = XFS_RMAP_REC_ADDR(block, 4);
rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
rrec->rm_blockcount = cpu_to_be32(1);
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
rrec->rm_offset = 0;
be16_add_cpu(&block->bb_numrecs, 1);
/* account for refcount btree root */
if (xfs_sb_version_hasreflink(sbp)) {
rrec = XFS_RMAP_REC_ADDR(block, 5);
rrec->rm_startblock = cpu_to_be32(libxfs_refc_block(mp));
rrec->rm_blockcount = cpu_to_be32(1);
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
rrec->rm_offset = 0;
be16_add_cpu(&block->bb_numrecs, 1);
}
/* account for the log space */
if (is_log_ag) {
rrec = XFS_RMAP_REC_ADDR(block,
be16_to_cpu(block->bb_numrecs) + 1);
rrec->rm_startblock = cpu_to_be32(
XFS_FSB_TO_AGBNO(mp, cfg->logstart));
rrec->rm_blockcount = cpu_to_be32(cfg->logblocks);
rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG);
rrec->rm_offset = 0;
be16_add_cpu(&block->bb_numrecs, 1);
}
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
}
libxfs_perag_put(pag);
}
static void
initialise_ag_freespace(
struct xfs_mount *mp,
xfs_agnumber_t agno,
int worst_freelist)
{
struct xfs_alloc_arg args;
struct xfs_trans *tp;
struct xfs_trans_res tres = {0};
int c;
c = libxfs_trans_alloc(mp, &tres, worst_freelist, 0, 0, &tp);
if (c)
res_failed(c);
memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = mp;
args.agno = agno;
args.alignment = 1;
args.pag = libxfs_perag_get(mp, agno);
libxfs_alloc_fix_freelist(&args, 0);
libxfs_perag_put(args.pag);
libxfs_trans_commit(tp);
}
/*
* rewrite several secondary superblocks with the root inode number filled out.
* This can help repair recovery from a trashed primary superblock without
* losing the root inode.
*/
static void
rewrite_secondary_superblocks(
struct xfs_mount *mp)
{
struct xfs_buf *buf;
/* rewrite the last superblock */
buf = libxfs_readbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, mp->m_sb.sb_agcount - 1,
XFS_SB_DADDR),
XFS_FSS_TO_BB(mp, 1),
LIBXFS_EXIT_ON_FAILURE, &xfs_sb_buf_ops);
XFS_BUF_TO_SBP(buf)->sb_rootino = cpu_to_be64(mp->m_sb.sb_rootino);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
/* and one in the middle for luck if there's enough AGs for that */
if (mp->m_sb.sb_agcount <= 2)
return;
buf = libxfs_readbuf(mp->m_dev,
XFS_AGB_TO_DADDR(mp, (mp->m_sb.sb_agcount - 1) / 2,
XFS_SB_DADDR),
XFS_FSS_TO_BB(mp, 1),
LIBXFS_EXIT_ON_FAILURE, &xfs_sb_buf_ops);
XFS_BUF_TO_SBP(buf)->sb_rootino = cpu_to_be64(mp->m_sb.sb_rootino);
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
}
int
main(
int argc,
char **argv)
{
xfs_agnumber_t agno;
xfs_buf_t *buf;
int c;
char *dfile = NULL;
char *logfile = NULL;
char *rtfile = NULL;
int dry_run = 0;
int discard = 1;
int force_overwrite = 0;
int quiet = 0;
char *protofile = NULL;
char *protostring = NULL;
int worst_freelist = 0;
struct libxfs_xinit xi = {
.isdirect = LIBXFS_DIRECT,
.isreadonly = LIBXFS_EXCLUSIVELY,
};
struct xfs_mount mbuf = {};
struct xfs_mount *mp = &mbuf;
struct xfs_sb *sbp = &mp->m_sb;
struct fs_topology ft = {};
struct cli_params cli = {
.xi = &xi,
.loginternal = 1,
};
struct mkfs_params cfg = {};
/* build time defaults */
struct mkfs_default_params dft = {
.source = _("package build definitions"),
.sectorsize = XFS_MIN_SECTORSIZE,
.blocksize = 1 << XFS_DFL_BLOCKSIZE_LOG,
.sb_feat = {
.log_version = 2,
.attr_version = 2,
.dir_version = 2,
.inode_align = true,
.nci = false,
.lazy_sb_counters = true,
.projid32bit = true,
.crcs_enabled = true,
.dirftype = true,
.finobt = true,
.spinodes = false,
.rmapbt = false,
.reflink = false,
.parent_pointers = false,
.nodalign = false,
.nortalign = false,
},
};
platform_uuid_generate(&cli.uuid);
progname = basename(argv[0]);
setlocale(LC_ALL, "");
bindtextdomain(PACKAGE, LOCALEDIR);
textdomain(PACKAGE);
/*
* TODO: Sourcing defaults from a config file
*
* Before anything else, see if there's a config file with different
* defaults. If a file exists in <package location>, read in the new
* default values and overwrite them in the &dft structure. This way the
* new defaults will apply before we parse the CLI, and the CLI will
* still be able to override them. When more than one source is
* implemented, emit a message to indicate where the defaults being
* used came from.
*
* printf(_("Default configuration sourced from %s\n"), dft.source);
*/
/* copy new defaults into CLI parsing structure */
memcpy(&cli.sb_feat, &dft.sb_feat, sizeof(cli.sb_feat));
memcpy(&cli.fsx, &dft.fsx, sizeof(cli.fsx));
while ((c = getopt(argc, argv, "b:d:i:l:L:m:n:KNp:qr:s:CfV")) != EOF) {
switch (c) {
case 'C':
case 'f':
force_overwrite = 1;
break;
case 'b':
case 'd':
case 'i':
case 'l':
case 'm':
case 'n':
case 'r':
case 's':
parse_subopts(c, optarg, &cli);
break;
case 'L':
if (strlen(optarg) > sizeof(sbp->sb_fname))
illegal(optarg, "L");
cfg.label = optarg;
break;
case 'N':
dry_run = 1;
break;
case 'K':
discard = 0;
break;
case 'p':
if (protofile)
respec('p', NULL, 0);
protofile = optarg;
break;
case 'q':
quiet = 1;
break;
case 'V':
printf(_("%s version %s\n"), progname, VERSION);
exit(0);
case '?':
unknown(optopt, "");
}
}
if (argc - optind > 1) {
fprintf(stderr, _("extra arguments\n"));
usage();
} else if (argc - optind == 1) {
dfile = xi.volname = getstr(argv[optind], &dopts, D_NAME);
} else
dfile = xi.dname;
protostring = setup_proto(protofile);
/*
* Extract as much of the valid config as we can from the CLI input
* before opening the libxfs devices.
*/
validate_blocksize(&cfg, &cli, &dft);
validate_sectorsize(&cfg, &cli, &dft, &ft, dfile, dry_run,
force_overwrite);
/*
* XXX: we still need to set block size and sector size global variables
* so that getnum/cvtnum works correctly
*/
blocksize = cfg.blocksize;
sectorsize = cfg.sectorsize;
validate_log_sectorsize(&cfg, &cli, &dft);
validate_sb_features(&cfg, &cli);
/*
* we've now completed basic validation of the features, sector and
* block sizes, so from this point onwards we use the values found in
* the cfg structure for them, not the command line structure.
*/
validate_dirblocksize(&cfg, &cli);
validate_inodesize(&cfg, &cli);
/*
* if the device size was specified convert it to a block count
* now we have a valid block size. These will be set to zero if
* nothing was specified, indicating we should use the full device.
*/
cfg.dblocks = calc_dev_size(cli.dsize, &cfg, &dopts, D_SIZE, "data");
cfg.logblocks = calc_dev_size(cli.logsize, &cfg, &lopts, L_SIZE, "log");
cfg.rtblocks = calc_dev_size(cli.rtsize, &cfg, &ropts, R_SIZE, "rt");
validate_rtextsize(&cfg, &cli, &ft);
calc_stripe_factors(&cfg, &cli, &ft);
/*
* Open and validate the device configurations
*/
open_devices(&cfg, &xi, (discard && !dry_run));
validate_datadev(&cfg, &cli);
validate_logdev(&cfg, &cli, &logfile);
validate_rtdev(&cfg, &cli, &rtfile);
/*
* At this point when know exactly what size all the devices are,
* so we can start validating and calculating layout options that are
* dependent on device sizes. Once calculated, make sure everything
* aligns to device geometry correctly.
*/
calculate_initial_ag_geometry(&cfg, &cli);
align_ag_geometry(&cfg);
calculate_imaxpct(&cfg, &cli);
/*
* Set up the basic superblock parameters now so that we can use
* the geometry information we've already validated in libxfs
* provided functions to determine on-disk format information.
*/
start_superblock_setup(&cfg, mp, sbp);
initialise_mount(&cfg, mp, sbp);
/*
* With the mount set up, we can finally calculate the log size
* constraints and do default size calculations and final validation
*/
calculate_log_size(&cfg, &cli, mp);
if (!quiet || dry_run) {
print_mkfs_cfg(&cfg, dfile, logfile, rtfile);
if (dry_run)
exit(0);
}
finish_superblock_setup(&cfg, mp, sbp);
/*
* we need the libxfs buffer cache from here on in.
*/
libxfs_buftarg_init(mp, xi.ddev, xi.logdev, xi.rtdev);
/*
* Before we mount the filesystem we need to make sure the devices have
* enough of the filesystem structure on them that allows libxfs to
* mount.
*/
prepare_devices(&cfg, &xi, mp, sbp, force_overwrite);
mp = libxfs_mount(mp, sbp, xi.ddev, xi.logdev, xi.rtdev, 0);
if (mp == NULL) {
fprintf(stderr, _("%s: filesystem failed to initialize\n"),
progname);
exit(1);
}
/*
* Initialise all the static on disk metadata.
*/
for (agno = 0; agno < cfg.agcount; agno++)
initialise_ag_headers(&cfg, mp, sbp, agno, &worst_freelist);
/*
* Initialise the freespace freelists (i.e. AGFLs) in each AG.
*/
for (agno = 0; agno < cfg.agcount; agno++)
initialise_ag_freespace(mp, agno, worst_freelist);
/*
* Allocate the root inode and anything else in the proto file.
*/
parse_proto(mp, &cli.fsx, &protostring);
/*
* Protect ourselves against possible stupidity
*/
if (XFS_INO_TO_AGNO(mp, mp->m_sb.sb_rootino) != 0) {
fprintf(stderr,
_("%s: root inode created in AG %u, not AG 0\n"),
progname, XFS_INO_TO_AGNO(mp, mp->m_sb.sb_rootino));
exit(1);
}
/*
* Re-write multiple secondary superblocks with rootinode field set
*/
if (mp->m_sb.sb_agcount > 1)
rewrite_secondary_superblocks(mp);
/*
* Dump all inodes and buffers before marking us all done.
* Need to drop references to inodes we still hold, first.
*/
libxfs_rtmount_destroy(mp);
libxfs_bcache_purge();
/*
* Mark the filesystem ok.
*/
buf = libxfs_getsb(mp, LIBXFS_EXIT_ON_FAILURE);
(XFS_BUF_TO_SBP(buf))->sb_inprogress = 0;
libxfs_writebuf(buf, LIBXFS_EXIT_ON_FAILURE);
libxfs_umount(mp);
if (xi.rtdev)
libxfs_device_close(xi.rtdev);
if (xi.logdev && xi.logdev != xi.ddev)
libxfs_device_close(xi.logdev);
libxfs_device_close(xi.ddev);
return 0;
}
| 26.26981
| 88
| 0.666732
|
[
"geometry"
] |
f977948f1c47b4f58f52a5181d16507f11d7a5ca
| 2,586
|
c
|
C
|
src/hal/core/core.c
|
cevero/hal
|
6073763d8b3d4f311421d98e42c9fec893c937f9
|
[
"MIT"
] | 6
|
2019-03-26T20:25:26.000Z
|
2021-11-17T11:00:44.000Z
|
src/hal/core/core.c
|
cevero/hal
|
6073763d8b3d4f311421d98e42c9fec893c937f9
|
[
"MIT"
] | 358
|
2019-03-07T18:36:06.000Z
|
2021-03-26T21:51:38.000Z
|
src/hal/core/core.c
|
cevero/hal
|
6073763d8b3d4f311421d98e42c9fec893c937f9
|
[
"MIT"
] | 10
|
2019-03-07T18:49:48.000Z
|
2021-12-08T21:23:12.000Z
|
/*
* MIT License
*
* Copyright(c) 2011-2020 The Maintainers of Nanvix
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __unix64__
/* Must come first. */
#define __NEED_HAL_CORE
#include <nanvix/hal/core.h>
#include <nanvix/const.h>
#include <nanvix/hlib.h>
/*============================================================================*
* core_halt() *
*============================================================================*/
/**
* The core_halt() function halt the underlying core.
* After halting a core, instruction execution cannot be
* resumed on it.
*
* @author João Vicente Souto
*/
PUBLIC NORETURN void core_halt(void)
{
kprintf("[hal][core] halting...");
/* Disable all interrupts. */
interrupts_disable();
/* Stay here forever. */
UNREACHABLE();
}
/*============================================================================*
* core_setup() *
*============================================================================*/
/**
* The core_setup() function initializes all architectural structures
* of the underlying core. It initializes the Memory Management Unit
* (MMU), Interrupt Vector Table (IVT) as well as performance
* monitoring registers.
*
* @author Pedro Henrique Penna
*/
PUBLIC void core_setup(void *stack)
{
kprintf("[hal][core] booting up core...");
mmu_setup();
perf_setup();
ivt_setup(stack);
}
#else
typedef int make_iso_compilers_happy;
#endif
| 32.325
| 81
| 0.607502
|
[
"vector"
] |
f978742bfd76efa16407af4152bd819989eb6d6e
| 6,672
|
h
|
C
|
src/il_landscape.h
|
rhaamo/poc-cubeworld
|
b341eeeb0b2dbfd8c26dea869f57c70e1814402d
|
[
"MIT"
] | null | null | null |
src/il_landscape.h
|
rhaamo/poc-cubeworld
|
b341eeeb0b2dbfd8c26dea869f57c70e1814402d
|
[
"MIT"
] | null | null | null |
src/il_landscape.h
|
rhaamo/poc-cubeworld
|
b341eeeb0b2dbfd8c26dea869f57c70e1814402d
|
[
"MIT"
] | null | null | null |
/* Content of this file written by Dave Humphrey
* And possible modifications by Marc 'rhaamo' Lagrange
*/
#ifndef __IL_LANDSCAPE_H
#define __IL_LANDSCAPE_H
#include "il_map.h"
#include <vector>
#include <string>
#include "noiseutils/noiseutils.h"
namespace infland
{
static const int NUM_TEMPERATURE_COEF = 6;
struct altitude_params_t
{
float SeaLevel;
float MinLevel;
float MaxLevel;
int Octaves;
float X1, Y1, X2, Y2;
double Frequency;
double Lacunarity;
double Persistence;
altitude_params_t();
};
struct temperature_params_t
{
float Minimum;
float Maximum;
float Coef[NUM_TEMPERATURE_COEF+1];
int Blur;
temperature_params_t();
};
struct rainfall_params_t
{
int Blur;
rainfall_params_t();
};
struct wind_params_t
{
float TradeWindLongitude;
float TemperatureBlurMagnitude;
wind_params_t();
};
const size_t CLIMMASK_MATCHALL = 0;
const size_t CLIMMASK_MIN_TEMPERATURE = 1;
const size_t CLIMMASK_MAX_TEMPERATURE = 2;
const size_t CLIMMASK_MIN_ALTITUDE = 4;
const size_t CLIMMASK_MAX_ALTITUDE = 8;
const size_t CLIMMASK_MIN_RAINFALL = 16;
const size_t CLIMMASK_MAX_RAINFALL = 32;
const size_t CLIMMASK_MIN_LONGITUDE = 64;
const size_t CLIMMASK_MAX_LONGITUDE = 128;
struct climate_info_t
{
int Type;
size_t Mask;
CColor Color;
CColor TrueColor;
CGradient Gradient;
ilstring Name;
float MinTemperature;
float MaxTemperature;
float MinAltitude;
float MaxAltitude;
float MinRainfall;
float MaxRainfall;
float MinLongitude;
float MaxLongitude;
int PixelCount;
climate_info_t()
{
Mask = 0;
Type = 0;
PixelCount = 0;
MinTemperature = 0;
MaxTemperature = 0;
MinAltitude = 0;
MaxAltitude = 0;
MinRainfall = 0;
MaxRainfall = 0;
MinLongitude = 0;
MaxLongitude = 0;
Color = 0;
TrueColor = 0;
}
bool IsSet(const size_t Value)
{
return (Mask & Value) != 0;
}
void SetMask(const size_t Value, const bool Set)
{
if (Set)
Mask |= Value;
else
Mask &= ~Value;
}
void MakeGradient (void)
{
Gradient.Clear();
Gradient.AddPoint(-1, TrueColor.Darken(1.5));
Gradient.AddPoint(0, TrueColor);
Gradient.AddPoint(1, TrueColor.Darken(5));
}
};
typedef std::vector<climate_info_t> CClimateArray;
struct climate_params_t
{
climate_params_t();
};
struct erosion_params_t
{
float ErodeDelta;
size_t Blur;
size_t Blur2;
float CombineFactor;
erosion_params_t();
};
class CLandscape
{
/* Class constant definitions */
public:
static const size_t IL_DEFAULT_MAPWIDTH = 256;
static const size_t IL_DEFAULT_MAPHEIGHT = 256;
static const int IL_DEFAULT_SEED = 0;
static const float IL_DEFAULT_MINTEMPERATURE;
static const float IL_DEFAULT_MAXTEMPERATURE;
static altitude_params_t DEFAULT_ALTITUDE_PARAMS;
static temperature_params_t DEFAULT_TEMPERATURE_PARAMS;
static rainfall_params_t DEFAULT_RAINFALL_PARAMS;
static wind_params_t DEFAULT_WIND_PARAMS;
static climate_params_t DEFAULT_CLIMATE_PARAMS;
static erosion_params_t DEFAULT_EROSION_PARAMS;
static CGradient DEFAULT_ALTITUDE_GRADIENT;
static CGradient DEFAULT_TEMPERATURE_GRADIENT;
static CGradient DEFAULT_RAINFALL_GRADIENT;
CGradient TERRAIN_GRADIENT;
CGradient CLIMATE_GRADIENT;
/* Class member definitions */
private:
CMap m_AltitudeMap; /* Individual parameter maps */
CMap m_TemperatureMap;
CMap m_RainfallMap;
CMap m_WindMap;
CMap m_ClimateMap;
CMap m_WaterMap;
CMap m_ErosionMap;
CMap m_CombinedMap;
mapwrap_t m_MapWrapping;
int m_Seed; /* Seed for the pseudo-random number generators */
size_t m_Width; /* Map size in pixels */
size_t m_Height;
altitude_params_t m_AltitudeParams;
temperature_params_t m_TemperatureParams;
rainfall_params_t m_RainfallParams;
wind_params_t m_WindParams;
climate_params_t m_ClimateParams;
erosion_params_t m_ErosionParams;
CClimateArray m_Climates;
/* Class private implementation definitions */
private:
float ComputeTemperature_Norm (const float Y, const float Z);
float ComputeTemperature (const size_t X, const size_t Y);
float ComputeWindDirection (const size_t X, const size_t Y);
static int CreateTemperatureGradient (CGradient& Gradient);
static int CreateRainfallGrayGradient (CGradient& Gradient);
static int CreateRainfallColorGradient (CGradient& Gradient);
int CreateSimpleTerrainGradient (CGradient& Gradient);
int CreateClimateGradient (CGradient& Gradient);
void BlurTemperatureWind(void);
climate_info_t* FindClimate(const size_t X, const size_t Y);
void ErodeStart(const size_t X, const size_t Y);
void ErodePoint(const size_t X, const size_t Y);
/* Class public interface definitions */
public:
CLandscape();
CLandscape(const CLandscape& Source);
~CLandscape();
CLandscape& operator=(const CLandscape &Source);
void AddClimate(const climate_info_t Climate);
void CreateDefaultClimates(void);
void ClearClimates(void);
void OutputClimateCounts(void);
void CreateAltitude (const altitude_params_t Params = DEFAULT_ALTITUDE_PARAMS);
void CreateTemperature (const temperature_params_t Params = DEFAULT_TEMPERATURE_PARAMS);
void CreateRainfall (const rainfall_params_t Params = DEFAULT_RAINFALL_PARAMS);
void CreateWind (const wind_params_t Params = DEFAULT_WIND_PARAMS);
void CreateClimate (const climate_params_t Params = DEFAULT_CLIMATE_PARAMS);
void CreateErosion (const erosion_params_t Params = DEFAULT_EROSION_PARAMS);
void CreateCombinedMap (void);
void CreateWaterMap (void);
CMap& GetAltitudeMap (void) { return m_AltitudeMap; }
CMap& GetTemperatureMap (void) { return m_TemperatureMap; }
CMap& GetRainfallMap (void) { return m_RainfallMap; }
CMap& GetWindMap (void) { return m_WindMap; }
CMap& GetClimateMap (void) { return m_ClimateMap; }
CMap& GetWaterMap (void) { return m_WaterMap; }
CMap& GetErosionMap (void) { return m_ErosionMap; }
CMap& GetCombinedMap (void) { return m_CombinedMap; }
int GetSeed (void) const { return m_Seed; }
void OutputModifiedTerrainBMP (const TCHAR* pFilename);
void SetSeed(const int Seed);
void SetSize(const size_t Width, const size_t Height);
float ComputeLongitude(const size_t PixelY);
};
};
#endif
| 23.006897
| 91
| 0.706535
|
[
"vector"
] |
f9844575a5107694a18aaa69566d8dd1eea18883
| 58,953
|
c
|
C
|
ds/security/protocols/schannel/spbase/keyxmspk.c
|
npocmaka/Windows-Server-2003
|
5c6fe3db626b63a384230a1aa6b92ac416b0765f
|
[
"Unlicense"
] | 17
|
2020-11-13T13:42:52.000Z
|
2021-09-16T09:13:13.000Z
|
ds/security/protocols/schannel/spbase/keyxmspk.c
|
sancho1952007/Windows-Server-2003
|
5c6fe3db626b63a384230a1aa6b92ac416b0765f
|
[
"Unlicense"
] | 2
|
2020-10-19T08:02:06.000Z
|
2020-10-19T08:23:18.000Z
|
ds/security/protocols/schannel/spbase/keyxmspk.c
|
sancho1952007/Windows-Server-2003
|
5c6fe3db626b63a384230a1aa6b92ac416b0765f
|
[
"Unlicense"
] | 14
|
2020-11-14T09:43:20.000Z
|
2021-08-28T08:59:57.000Z
|
//+---------------------------------------------------------------------------
//
// Microsoft Windows
// Copyright (C) Microsoft Corporation, 1992 - 1995.
//
// File: keyxmspk.c
//
// Contents:
//
// Classes:
//
// Functions:
//
// History: 09-23-97 jbanes LSA integration stuff.
//
//----------------------------------------------------------------------------
#include <spbase.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
// PROV_RSA_SCHANNEL handle used when building ClientHello messages.
HCRYPTPROV g_hRsaSchannel = 0;
PROV_ENUMALGS_EX * g_pRsaSchannelAlgs = NULL;
DWORD g_cRsaSchannelAlgs = 0;
SP_STATUS
Ssl3ParseServerKeyExchange(
PSPContext pContext,
PBYTE pbMessage,
DWORD cbMessage,
HCRYPTKEY hServerPublic,
HCRYPTKEY *phNewServerPublic);
SP_STATUS
PkcsFinishMasterKey(
PSPContext pContext,
HCRYPTKEY hMasterKey);
SP_STATUS
WINAPI
PkcsGenerateServerExchangeValue(
SPContext * pContext, // in
PUCHAR pServerExchangeValue, // out
DWORD * pcbServerExchangeValue // in/out
);
SP_STATUS
WINAPI
PkcsGenerateClientExchangeValue(
SPContext * pContext, // in
PUCHAR pServerExchangeValue, // in
DWORD cbServerExchangeValue, // in
PUCHAR pClientClearValue, // out
DWORD * pcbClientClearValue, // in/out
PUCHAR pClientExchangeValue, // out
DWORD * pcbClientExchangeValue // in/out
);
SP_STATUS
WINAPI
PkcsGenerateServerMasterKey(
SPContext * pContext, // in
PUCHAR pClientClearValue, // in
DWORD cbClientClearValue, // in
PUCHAR pClientExchangeValue, // in
DWORD cbClientExchangeValue // in
);
KeyExchangeSystem keyexchPKCS = {
SP_EXCH_RSA_PKCS1,
"RSA",
// PkcsPrivateFromBlob,
PkcsGenerateServerExchangeValue,
PkcsGenerateClientExchangeValue,
PkcsGenerateServerMasterKey,
};
VOID
ReverseMemCopy(
PUCHAR Dest,
PUCHAR Source,
ULONG Size)
{
PUCHAR p;
p = Dest + Size - 1;
while(p >= Dest)
{
*p-- = *Source++;
}
}
SP_STATUS
GenerateSsl3KeyPair(
PSPContext pContext, // in
DWORD dwKeySize, // in
HCRYPTPROV *phEphemeralProv, // out
HCRYPTKEY * phEphemeralKey) // out
{
HCRYPTPROV * phEphemProv;
PCRYPT_KEY_PROV_INFO pProvInfo = NULL;
PSPCredentialGroup pCredGroup;
PSPCredential pCred;
DWORD cbSize;
SP_STATUS pctRet;
pCredGroup = pContext->RipeZombie->pServerCred;
if(pCredGroup == NULL)
{
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
LockCredentialExclusive(pCredGroup);
pCred = pContext->RipeZombie->pActiveServerCred;
if(pCred == NULL)
{
pctRet = SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
goto cleanup;
}
if(dwKeySize == 512)
{
phEphemProv = &pCred->hEphem512Prov;
}
else if(dwKeySize == 1024)
{
phEphemProv = &pCred->hEphem1024Prov;
}
else
{
pctRet = SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
goto cleanup;
}
//
// Obtain CSP context.
//
if(*phEphemProv == 0)
{
// Read the certificate context's "key info" property.
if(CertGetCertificateContextProperty(pCred->pCert,
CERT_KEY_PROV_INFO_PROP_ID,
NULL,
&cbSize))
{
SafeAllocaAllocate(pProvInfo, cbSize);
if(pProvInfo == NULL)
{
pctRet = SP_LOG_RESULT(SEC_E_INSUFFICIENT_MEMORY);
goto cleanup;
}
if(!CertGetCertificateContextProperty(pCred->pCert,
CERT_KEY_PROV_INFO_PROP_ID,
pProvInfo,
&cbSize))
{
DebugLog((SP_LOG_ERROR, "Error 0x%x reading CERT_KEY_PROV_INFO_PROP_ID\n",GetLastError()));
SafeAllocaFree(pProvInfo);
pProvInfo = NULL;
}
}
// Obtain a "verify only" csp context.
if(pProvInfo)
{
// If the private key belongs to one of the Microsoft PROV_RSA_FULL
// CSPs, then manually divert it to the Microsoft PROV_RSA_SCHANNEL
// CSP. This works because both CSP types use the same private key
// storage scheme.
if(pProvInfo->dwProvType == PROV_RSA_FULL)
{
if(lstrcmpW(pProvInfo->pwszProvName, MS_DEF_PROV_W) == 0 ||
lstrcmpW(pProvInfo->pwszProvName, MS_STRONG_PROV_W) == 0 ||
lstrcmpW(pProvInfo->pwszProvName, MS_ENHANCED_PROV_W) == 0)
{
DebugLog((DEB_WARN, "Force CSP type to PROV_RSA_SCHANNEL.\n"));
pProvInfo->pwszProvName = MS_DEF_RSA_SCHANNEL_PROV_W;
pProvInfo->dwProvType = PROV_RSA_SCHANNEL;
}
}
if(!CryptAcquireContextW(phEphemProv,
NULL,
pProvInfo->pwszProvName,
pProvInfo->dwProvType,
CRYPT_VERIFYCONTEXT))
{
SP_LOG_RESULT(GetLastError());
pctRet = SEC_E_NO_CREDENTIALS;
goto cleanup;
}
SafeAllocaFree(pProvInfo);
pProvInfo = NULL;
}
else
{
if(!CryptAcquireContextW(phEphemProv,
NULL,
NULL,
PROV_RSA_SCHANNEL,
CRYPT_VERIFYCONTEXT))
{
SP_LOG_RESULT(GetLastError());
pctRet = SEC_E_NO_CREDENTIALS;
goto cleanup;
}
}
}
//
// Obtain handle to private key.
//
if(!CryptGetUserKey(*phEphemProv,
AT_KEYEXCHANGE,
phEphemeralKey))
{
// Key does not exist, so attempt to create one.
DebugLog((DEB_TRACE, "Creating %d-bit ephemeral key.\n", dwKeySize));
if(!CryptGenKey(*phEphemProv,
AT_KEYEXCHANGE,
(dwKeySize << 16),
phEphemeralKey))
{
DebugLog((DEB_ERROR, "Error 0x%x generating ephemeral key\n", GetLastError()));
pctRet = SEC_E_NO_CREDENTIALS;
goto cleanup;
}
DebugLog((DEB_TRACE, "Ephemeral key created okay.\n"));
}
*phEphemeralProv = *phEphemProv;
pctRet = PCT_ERR_OK;
cleanup:
if(pProvInfo)
{
SafeAllocaFree(pProvInfo);
}
UnlockCredential(pCredGroup);
return pctRet;
}
//+---------------------------------------------------------------------------
//
// Function: PkcsGenerateServerExchangeValue
//
// Synopsis: Create a ServerKeyExchange message, containing an ephemeral
// RSA key.
//
// Arguments: [pContext] -- Schannel context.
// [pServerExchangeValue] --
// [pcbServerExchangeValue] --
//
// History: 10-09-97 jbanes Added CAPI integration.
//
// Notes: This routine is called by the server-side only.
//
// In the case of SSL3 or TLS, the ServerKeyExchange message
// consists of the following structure, signed with the
// server's private key.
//
// struct {
// opaque rsa_modulus<1..2^16-1>;
// opaque rsa_exponent<1..2^16-1>;
// } Server RSA Params;
//
// This message is only sent when the server's private key
// is greater then 512 bits and an export cipher suite is
// being negotiated.
//
//----------------------------------------------------------------------------
SP_STATUS
WINAPI
PkcsGenerateServerExchangeValue(
PSPContext pContext, // in
PBYTE pServerExchangeValue, // out
DWORD * pcbServerExchangeValue) // in/out
{
PSPCredential pCred;
HCRYPTKEY hServerKey;
HCRYPTPROV hEphemeralProv;
HCRYPTKEY hEphemeralKey;
DWORD cbData;
DWORD cbServerModulus;
PBYTE pbBlob = NULL;
DWORD cbBlob;
BLOBHEADER * pBlobHeader = NULL;
RSAPUBKEY * pRsaPubKey = NULL;
PBYTE pbModulus = NULL;
DWORD cbModulus;
DWORD cbExp;
PBYTE pbMessage = NULL;
DWORD cbSignature;
HCRYPTHASH hHash;
BYTE rgbHashValue[CB_MD5_DIGEST_LEN + CB_SHA_DIGEST_LEN];
UINT i;
SP_STATUS pctRet;
BOOL fImpersonating = FALSE;
UNICipherMap * pCipherSuite;
DWORD cbAllowedKeySize;
pCred = pContext->RipeZombie->pActiveServerCred;
if(pCred == NULL)
{
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
pContext->fExchKey = FALSE;
if(pContext->RipeZombie->fProtocol == SP_PROT_SSL2_SERVER ||
pContext->RipeZombie->fProtocol == SP_PROT_PCT1_SERVER)
{
// There is no ServerExchangeValue for SSL2 or PCT1
*pcbServerExchangeValue = 0;
return PCT_ERR_OK;
}
if(pContext->RipeZombie->fProtocol != SP_PROT_SSL3_SERVER &&
pContext->RipeZombie->fProtocol != SP_PROT_TLS1_SERVER)
{
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
//
// Determine if ServerKeyExchange message is necessary.
//
pCipherSuite = &UniAvailableCiphers[pContext->dwPendingCipherSuiteIndex];
if(pCipherSuite->dwFlags & DOMESTIC_CIPHER_SUITE)
{
// Message not necessary.
*pcbServerExchangeValue = 0;
return PCT_ERR_OK;
}
if(pCred->hProv == 0)
{
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
fImpersonating = SslImpersonateClient();
if(!CryptGetUserKey(pCred->hProv,
pCred->dwKeySpec,
&hServerKey))
{
DebugLog((DEB_ERROR, "Error 0x%x obtaining handle to server public key\n",
GetLastError()));
pctRet = PCT_INT_INTERNAL_ERROR;
goto cleanup;
}
cbData = sizeof(DWORD);
if(!CryptGetKeyParam(hServerKey,
KP_BLOCKLEN,
(PBYTE)&cbServerModulus,
&cbData,
0))
{
SP_LOG_RESULT(GetLastError());
CryptDestroyKey(hServerKey);
pctRet = PCT_INT_INTERNAL_ERROR;
goto cleanup;
}
CryptDestroyKey(hServerKey);
if(pCipherSuite->dwFlags & EXPORT56_CIPHER_SUITE)
{
cbAllowedKeySize = 1024;
}
else
{
cbAllowedKeySize = 512;
}
if(cbServerModulus <= cbAllowedKeySize)
{
// Message not necessary.
*pcbServerExchangeValue = 0;
pctRet = PCT_ERR_OK;
goto cleanup;
}
// Convert size from bits to bytes.
cbServerModulus /= 8;
pContext->fExchKey = TRUE;
if(fImpersonating)
{
RevertToSelf();
fImpersonating = FALSE;
}
//
// Compute approximate size of ServerKeyExchange message.
//
if(pServerExchangeValue == NULL)
{
*pcbServerExchangeValue =
2 + cbAllowedKeySize / 8 + // modulus
2 + sizeof(DWORD) + // exponent
2 + cbServerModulus; // signature
pctRet = PCT_ERR_OK;
goto cleanup;
}
//
// Get handle to 512-bit ephemeral RSA key. Generate it if
// we haven't already.
//
pctRet = GenerateSsl3KeyPair(pContext,
cbAllowedKeySize,
&hEphemeralProv,
&hEphemeralKey);
if(pctRet != PCT_ERR_OK)
{
SP_LOG_RESULT(pctRet);
goto cleanup;
}
//
// Export ephemeral key.
//
if(!CryptExportKey(hEphemeralKey,
0,
PUBLICKEYBLOB,
0,
NULL,
&cbBlob))
{
SP_LOG_RESULT(GetLastError());
pctRet = PCT_INT_INTERNAL_ERROR;
goto cleanup;
}
SafeAllocaAllocate(pbBlob, cbBlob);
if(pbBlob == NULL)
{
pctRet = SP_LOG_RESULT(SEC_E_INSUFFICIENT_MEMORY);
goto cleanup;
}
if(!CryptExportKey(hEphemeralKey,
0,
PUBLICKEYBLOB,
0,
pbBlob,
&cbBlob))
{
SP_LOG_RESULT(GetLastError());
SafeAllocaFree(pbBlob);
pctRet = PCT_INT_INTERNAL_ERROR;
goto cleanup;
}
//
// Destroy handle to ephemeral key. Don't release the ephemeral hProv
// though--that's owned by the credential.
CryptDestroyKey(hEphemeralKey);
//
// Build message from key blob.
//
pBlobHeader = (BLOBHEADER *)pbBlob;
pRsaPubKey = (RSAPUBKEY *)(pBlobHeader + 1);
pbModulus = (BYTE *)(pRsaPubKey + 1);
cbModulus = pRsaPubKey->bitlen / 8;
pbMessage = pServerExchangeValue;
pbMessage[0] = MSBOF(cbModulus);
pbMessage[1] = LSBOF(cbModulus);
pbMessage += 2;
ReverseMemCopy(pbMessage, pbModulus, cbModulus);
pbMessage += cbModulus;
// Don't laugh, this works - pete
cbExp = ((pRsaPubKey->pubexp & 0xff000000) ? 4 :
((pRsaPubKey->pubexp & 0x00ff0000) ? 3 :
((pRsaPubKey->pubexp & 0x0000ff00) ? 2 : 1)));
pbMessage[0] = MSBOF(cbExp);
pbMessage[1] = LSBOF(cbExp);
pbMessage += 2;
ReverseMemCopy(pbMessage, (PBYTE)&pRsaPubKey->pubexp, cbExp);
pbMessage += cbExp;
SafeAllocaFree(pbBlob);
pbBlob = NULL;
fImpersonating = SslImpersonateClient();
// Generate hash values
ComputeServerExchangeHashes(
pContext,
pServerExchangeValue,
(int)(pbMessage - pServerExchangeValue),
rgbHashValue,
rgbHashValue + CB_MD5_DIGEST_LEN);
// Sign hash value.
if(!CryptCreateHash(pCred->hProv,
CALG_SSL3_SHAMD5,
0,
0,
&hHash))
{
SP_LOG_RESULT(GetLastError());
pctRet = PCT_INT_INTERNAL_ERROR;
goto cleanup;
}
if(!CryptSetHashParam(hHash,
HP_HASHVAL,
rgbHashValue,
0))
{
SP_LOG_RESULT(GetLastError());
pctRet = PCT_INT_INTERNAL_ERROR;
goto cleanup;
}
DebugLog((DEB_TRACE, "Signing server_key_exchange message.\n"));
cbSignature = cbServerModulus;
if(!CryptSignHash(hHash,
pCred->dwKeySpec,
NULL,
0,
pbMessage + 2,
&cbSignature))
{
SP_LOG_RESULT(GetLastError());
pctRet = PCT_INT_INTERNAL_ERROR;
goto cleanup;
}
DebugLog((DEB_TRACE, "Server_key_exchange message signed successfully.\n"));
CryptDestroyHash(hHash);
pbMessage[0] = MSBOF(cbSignature);
pbMessage[1] = LSBOF(cbSignature);
pbMessage += 2;
// Reverse signature.
for(i = 0; i < cbSignature / 2; i++)
{
BYTE n = pbMessage[i];
pbMessage[i] = pbMessage[cbSignature - i -1];
pbMessage[cbSignature - i -1] = n;
}
pbMessage += cbSignature;
*pcbServerExchangeValue = (DWORD)(pbMessage - pServerExchangeValue);
// Use ephemeral key for the new connection.
pContext->RipeZombie->hMasterProv = hEphemeralProv;
pContext->RipeZombie->dwFlags |= SP_CACHE_FLAG_MASTER_EPHEM;
pctRet = PCT_ERR_OK;
cleanup:
if(fImpersonating)
{
RevertToSelf();
}
return pctRet;
}
SP_STATUS
WINAPI
PkcsGenerateClientExchangeValue(
SPContext * pContext, // in
PUCHAR pServerExchangeValue, // in
DWORD cbServerExchangeValue, // in
PUCHAR pClientClearValue, // out
DWORD * pcbClientClearValue, // in/out
PUCHAR pClientExchangeValue, // out
DWORD * pcbClientExchangeValue) // in/out
{
PSPCredentialGroup pCred;
DWORD cbSecret;
DWORD cbMasterKey;
HCRYPTKEY hServerPublic = 0;
DWORD dwGenFlags = 0;
DWORD dwExportFlags = 0;
SP_STATUS pctRet = PCT_ERR_OK;
BLOBHEADER *pPublicBlob;
DWORD cbPublicBlob;
DWORD cbHeader;
ALG_ID Algid = 0;
DWORD cbData;
DWORD cbEncryptedKey;
DWORD dwEnabledProtocols;
DWORD dwHighestProtocol;
if(pContext->RipeZombie->hMasterProv == 0)
{
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
pCred = pContext->pCredGroup;
if(pCred == NULL)
{
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
// We're doing a full handshake.
pContext->Flags |= CONTEXT_FLAG_FULL_HANDSHAKE;
//
// Determine highest supported protocol.
//
dwEnabledProtocols = pContext->dwClientEnabledProtocols;
if(dwEnabledProtocols & SP_PROT_TLS1_CLIENT)
{
dwHighestProtocol = TLS1_CLIENT_VERSION;
}
else if(dwEnabledProtocols & SP_PROT_SSL3_CLIENT)
{
dwHighestProtocol = SSL3_CLIENT_VERSION;
}
else
{
dwHighestProtocol = SSL2_CLIENT_VERSION;
}
// Get key length.
cbSecret = pContext->pPendingCipherInfo->cbSecret;
//
// Import server's public key.
//
pPublicBlob = pContext->RipeZombie->pRemotePublic->pPublic;
cbPublicBlob = pContext->RipeZombie->pRemotePublic->cbPublic;
cbEncryptedKey = sizeof(BLOBHEADER) + sizeof(ALG_ID) + cbPublicBlob;
if(pClientExchangeValue == NULL)
{
*pcbClientExchangeValue = cbEncryptedKey;
pctRet = PCT_ERR_OK;
goto done;
}
if(*pcbClientExchangeValue < cbEncryptedKey)
{
*pcbClientExchangeValue = cbEncryptedKey;
pctRet = SP_LOG_RESULT(PCT_INT_BUFF_TOO_SMALL);
goto done;
}
if(!CryptImportKey(pContext->RipeZombie->hMasterProv,
(PBYTE)pPublicBlob,
cbPublicBlob,
0,
0,
&hServerPublic))
{
SP_LOG_RESULT(GetLastError());
pctRet = PCT_INT_INTERNAL_ERROR;
goto done;
}
//
// Do protocol specific stuff.
//
switch(pContext->RipeZombie->fProtocol)
{
case SP_PROT_PCT1_CLIENT:
Algid = CALG_PCT1_MASTER;
dwGenFlags = CRYPT_EXPORTABLE;
// Generate the clear key value.
if(cbSecret < PCT1_MASTER_KEY_SIZE)
{
pContext->RipeZombie->cbClearKey = PCT1_MASTER_KEY_SIZE - cbSecret;
pctRet = GenerateRandomBits(pContext->RipeZombie->pClearKey,
pContext->RipeZombie->cbClearKey);
if(!NT_SUCCESS(pctRet))
{
goto done;
}
*pcbClientClearValue = pContext->RipeZombie->cbClearKey;
CopyMemory( pClientClearValue,
pContext->RipeZombie->pClearKey,
pContext->RipeZombie->cbClearKey);
}
else
{
*pcbClientClearValue = pContext->RipeZombie->cbClearKey = 0;
}
break;
case SP_PROT_SSL2_CLIENT:
Algid = CALG_SSL2_MASTER;
dwGenFlags = CRYPT_EXPORTABLE;
cbMasterKey = pContext->pPendingCipherInfo->cbKey;
dwGenFlags |= ((cbSecret << 3) << 16);
// Generate the clear key value.
pContext->RipeZombie->cbClearKey = cbMasterKey - cbSecret;
if(pContext->RipeZombie->cbClearKey > 0)
{
pctRet = GenerateRandomBits(pContext->RipeZombie->pClearKey,
pContext->RipeZombie->cbClearKey);
if(!NT_SUCCESS(pctRet))
{
goto done;
}
CopyMemory(pClientClearValue,
pContext->RipeZombie->pClearKey,
pContext->RipeZombie->cbClearKey);
}
*pcbClientClearValue = pContext->RipeZombie->cbClearKey;
if(dwEnabledProtocols & (SP_PROT_SSL3 | SP_PROT_TLS1))
{
// If we're a client doing SSL2, and
// SSL3 is enabled, then for some reason
// the server requested SSL2. Maybe
// A man in the middle changed the server
// version in the server hello to roll
// back. Pad with 8 0x03's so the server
// can detect this.
dwExportFlags = CRYPT_SSL2_FALLBACK;
}
break;
case SP_PROT_TLS1_CLIENT:
Algid = CALG_TLS1_MASTER;
// drop through to SSL3
case SP_PROT_SSL3_CLIENT:
dwGenFlags = CRYPT_EXPORTABLE;
if(0 == Algid)
{
Algid = CALG_SSL3_MASTER;
}
// Generate the clear key value (always empty).
pContext->RipeZombie->cbClearKey = 0;
if(pcbClientClearValue) *pcbClientClearValue = 0;
if(cbServerExchangeValue && pServerExchangeValue)
{
// In ssl3, we look at the server exchange value.
// It may be a 512-bit public key, signed
// by the server public key. In this case, we need to
// use that as our master_secret encryption key.
HCRYPTKEY hNewServerPublic;
pctRet = Ssl3ParseServerKeyExchange(pContext,
pServerExchangeValue,
cbServerExchangeValue,
hServerPublic,
&hNewServerPublic);
if(pctRet != PCT_ERR_OK)
{
goto done;
}
// Destroy public key from certificate.
CryptDestroyKey(hServerPublic);
// Use public key from ServerKeyExchange instead.
hServerPublic = hNewServerPublic;
}
break;
default:
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
// Generate the master_secret.
if(!CryptGenKey(pContext->RipeZombie->hMasterProv,
Algid,
dwGenFlags,
&pContext->RipeZombie->hMasterKey))
{
SP_LOG_RESULT(GetLastError());
pctRet = PCT_INT_INTERNAL_ERROR;
goto done;
}
#if 1
// This is currently commented out because when connecting to a server running
// an old version of schannel (NT4 SP3 or so), then we will connect using SSL3,
// but the highest supported protocol is 0x0301. This confuses the server and
// it drops the connection.
// Set highest supported protocol. The CSP will place this version number
// in the pre_master_secret.
if(!CryptSetKeyParam(pContext->RipeZombie->hMasterKey,
KP_HIGHEST_VERSION,
(PBYTE)&dwHighestProtocol,
0))
{
SP_LOG_RESULT(GetLastError());
}
#endif
// Encrypt the master_secret.
DebugLog((DEB_TRACE, "Encrypt the master secret.\n"));
if(!CryptExportKey(pContext->RipeZombie->hMasterKey,
hServerPublic,
SIMPLEBLOB,
dwExportFlags,
pClientExchangeValue,
&cbEncryptedKey))
{
SP_LOG_RESULT(GetLastError());
pctRet = PCT_INT_INTERNAL_ERROR;
goto done;
}
DebugLog((DEB_TRACE, "Master secret encrypted successfully.\n"));
// Determine size of key exchange key.
cbData = sizeof(DWORD);
if(!CryptGetKeyParam(hServerPublic,
KP_BLOCKLEN,
(PBYTE)&pContext->RipeZombie->dwExchStrength,
&cbData,
0))
{
SP_LOG_RESULT(GetLastError());
pContext->RipeZombie->dwExchStrength = 0;
}
// Strip off the blob header and copy the encrypted master_secret
// to the output buffer. Note that it is also converted to big endian.
cbHeader = sizeof(BLOBHEADER) + sizeof(ALG_ID);
cbEncryptedKey -= cbHeader;
if(pContext->RipeZombie->fProtocol == SP_PROT_TLS1_CLIENT)
{
MoveMemory(pClientExchangeValue + 2, pClientExchangeValue + cbHeader, cbEncryptedKey);
ReverseInPlace(pClientExchangeValue + 2, cbEncryptedKey);
pClientExchangeValue[0] = MSBOF(cbEncryptedKey);
pClientExchangeValue[1] = LSBOF(cbEncryptedKey);
*pcbClientExchangeValue = 2 + cbEncryptedKey;
}
else
{
MoveMemory(pClientExchangeValue, pClientExchangeValue + cbHeader, cbEncryptedKey);
ReverseInPlace(pClientExchangeValue, cbEncryptedKey);
*pcbClientExchangeValue = cbEncryptedKey;
}
// Build the session keys.
pctRet = MakeSessionKeys(pContext,
pContext->RipeZombie->hMasterProv,
pContext->RipeZombie->hMasterKey);
if(pctRet != PCT_ERR_OK)
{
goto done;
}
// Update perf counter.
InterlockedIncrement(&g_cClientHandshakes);
pctRet = PCT_ERR_OK;
done:
if(hServerPublic) CryptDestroyKey(hServerPublic);
return pctRet;
}
SP_STATUS
GenerateRandomMasterKey(
PSPContext pContext,
HCRYPTKEY * phMasterKey)
{
DWORD dwGenFlags = 0;
ALG_ID Algid = 0;
DWORD cbSecret;
cbSecret = pContext->pPendingCipherInfo->cbSecret;
switch(pContext->RipeZombie->fProtocol)
{
case SP_PROT_PCT1_SERVER:
Algid = CALG_PCT1_MASTER;
dwGenFlags = CRYPT_EXPORTABLE;
break;
case SP_PROT_SSL2_SERVER:
Algid = CALG_SSL2_MASTER;
dwGenFlags = CRYPT_EXPORTABLE;
dwGenFlags |= ((cbSecret << 3) << 16);
break;
case SP_PROT_TLS1_SERVER:
Algid = CALG_TLS1_MASTER;
dwGenFlags = CRYPT_EXPORTABLE;
break;
case SP_PROT_SSL3_SERVER:
Algid = CALG_SSL3_MASTER;
dwGenFlags = CRYPT_EXPORTABLE;
break;
default:
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
// Generate the master_secret.
if(!CryptGenKey(pContext->RipeZombie->hMasterProv,
Algid,
dwGenFlags,
phMasterKey))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
return PCT_ERR_OK;
}
//+---------------------------------------------------------------------------
//
// Function: PkcsGenerateServerMasterKey
//
// Synopsis: Decrypt the master secret (from the ClientKeyExchange message)
// and derive the session keys from it.
//
// Arguments: [pContext] -- Schannel context.
// [pClientClearValue] -- Not used.
// [cbClientClearValue] -- Not used.
// [pClientExchangeValue] -- Pointer PKCS #2 block.
// [cbClientExchangeValue] -- Length of block.
//
// History: 10-02-97 jbanes Created.
//
// Notes: This routine is called by the server-side only.
//
//----------------------------------------------------------------------------
SP_STATUS
WINAPI
PkcsGenerateServerMasterKey(
PSPContext pContext, // in, out
PUCHAR pClientClearValue, // in
DWORD cbClientClearValue, // in
PUCHAR pClientExchangeValue, // in
DWORD cbClientExchangeValue) // in
{
PSPCredentialGroup pCred;
PBYTE pbBlob = NULL;
DWORD cbBlob;
ALG_ID Algid;
HCRYPTKEY hMasterKey;
HCRYPTKEY hExchKey = 0;
DWORD dwFlags = 0;
SP_STATUS pctRet;
DWORD cbData;
DWORD dwEnabledProtocols;
DWORD dwHighestProtocol;
BOOL fImpersonating = FALSE;
DWORD cbExpectedLength;
pCred = pContext->RipeZombie->pServerCred;
if(pCred == NULL)
{
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
dwEnabledProtocols = (g_ProtEnabled & pCred->grbitEnabledProtocols);
if(dwEnabledProtocols & SP_PROT_TLS1_SERVER)
{
dwHighestProtocol = TLS1_CLIENT_VERSION;
}
else if(dwEnabledProtocols & SP_PROT_SSL3_SERVER)
{
dwHighestProtocol = SSL3_CLIENT_VERSION;
}
else
{
dwHighestProtocol = SSL2_CLIENT_VERSION;
}
// We're doing a full handshake.
pContext->Flags |= CONTEXT_FLAG_FULL_HANDSHAKE;
// Determine encryption algid
switch(pContext->RipeZombie->fProtocol)
{
case SP_PROT_PCT1_SERVER:
Algid = CALG_PCT1_MASTER;
if(cbClientClearValue > sizeof(pContext->RipeZombie->pClearKey))
{
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
CopyMemory(pContext->RipeZombie->pClearKey,
pClientClearValue,
cbClientClearValue);
pContext->RipeZombie->cbClearKey = cbClientClearValue;
break;
case SP_PROT_SSL2_SERVER:
Algid = CALG_SSL2_MASTER;
if(dwEnabledProtocols & (SP_PROT_SSL3 | SP_PROT_TLS1))
{
// We're a server doing SSL2, and we also support SSL3.
// If the encryption block contains the 8 0x03 padding
// bytes, then abort the connection.
dwFlags = CRYPT_SSL2_FALLBACK;
}
if(cbClientClearValue > sizeof(pContext->RipeZombie->pClearKey))
{
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
CopyMemory(pContext->RipeZombie->pClearKey,
pClientClearValue,
cbClientClearValue);
pContext->RipeZombie->cbClearKey = cbClientClearValue;
break;
case SP_PROT_SSL3_SERVER:
Algid = CALG_SSL3_MASTER;
break;
case SP_PROT_TLS1_SERVER:
Algid = CALG_TLS1_MASTER;
break;
default:
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
// Get handle to key exchange key.
if(!CryptGetUserKey(pContext->RipeZombie->hMasterProv,
AT_KEYEXCHANGE,
&hExchKey))
{
SP_LOG_RESULT(GetLastError());
pctRet = PCT_INT_INTERNAL_ERROR;
goto cleanup;
}
// Determine size of key exchange key.
cbData = sizeof(DWORD);
if(!CryptGetKeyParam(hExchKey,
KP_BLOCKLEN,
(PBYTE)&pContext->RipeZombie->dwExchStrength,
&cbData,
0))
{
SP_LOG_RESULT(GetLastError());
pContext->RipeZombie->dwExchStrength = 0;
}
cbExpectedLength = (pContext->RipeZombie->dwExchStrength + 7) / 8;
if(cbClientExchangeValue < 2)
{
pctRet = SP_LOG_RESULT(PCT_INT_ILLEGAL_MSG);
goto cleanup;
}
// Remove (pseudo-optional) vector in front of the encrypted master key.
if(pContext->RipeZombie->fProtocol == SP_PROT_SSL3_SERVER ||
pContext->RipeZombie->fProtocol == SP_PROT_TLS1_SERVER)
{
DWORD cbMsg = MAKEWORD(pClientExchangeValue[1], pClientExchangeValue[0]);
if((cbMsg == cbExpectedLength) && (cbMsg + 2 == cbClientExchangeValue))
{
pClientExchangeValue += 2;
cbClientExchangeValue -= 2;
}
}
if(cbClientExchangeValue < cbExpectedLength)
{
pctRet = SP_LOG_RESULT(PCT_INT_ILLEGAL_MSG);
goto cleanup;
}
// Allocate memory for blob.
cbBlob = sizeof(BLOBHEADER) + sizeof(ALG_ID) + cbClientExchangeValue;
SafeAllocaAllocate(pbBlob, cbBlob);
if(pbBlob == NULL)
{
return SP_LOG_RESULT(SEC_E_INSUFFICIENT_MEMORY);
}
// Build SIMPLEBLOB.
{
BLOBHEADER *pBlobHeader = (BLOBHEADER *)pbBlob;
ALG_ID *pAlgid = (ALG_ID *)(pBlobHeader + 1);
BYTE *pData = (BYTE *)(pAlgid + 1);
pBlobHeader->bType = SIMPLEBLOB;
pBlobHeader->bVersion = CUR_BLOB_VERSION;
pBlobHeader->reserved = 0;
pBlobHeader->aiKeyAlg = Algid;
*pAlgid = CALG_RSA_KEYX;
ReverseMemCopy(pData, pClientExchangeValue, cbClientExchangeValue);
}
DebugLog((DEB_TRACE, "Decrypt the master secret.\n"));
if(!(pContext->RipeZombie->dwFlags & SP_CACHE_FLAG_MASTER_EPHEM))
{
fImpersonating = SslImpersonateClient();
}
// Decrypt the master_secret.
if(!CryptImportKey(pContext->RipeZombie->hMasterProv,
pbBlob,
cbBlob,
hExchKey,
dwFlags,
&hMasterKey))
{
SP_LOG_RESULT(GetLastError());
DebugLog((DEB_TRACE, "Master secret did not decrypt correctly.\n"));
// Guard against the PKCS#1 attack by generating a
// random master key.
pctRet = GenerateRandomMasterKey(pContext, &hMasterKey);
if(pctRet != PCT_ERR_OK)
{
pctRet = PCT_INT_INTERNAL_ERROR;
goto cleanup;
}
}
else
{
DebugLog((DEB_TRACE, "Master secret decrypted successfully.\n"));
// Set highest supported protocol. The CSP will use this to check for
// version fallback attacks.
if(!CryptSetKeyParam(hMasterKey,
KP_HIGHEST_VERSION,
(PBYTE)&dwHighestProtocol,
CRYPT_SERVER))
{
SP_LOG_RESULT(GetLastError());
if(GetLastError() == NTE_BAD_VER)
{
pctRet = SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
CryptDestroyKey(hMasterKey);
goto cleanup;
}
}
}
pContext->RipeZombie->hMasterKey = hMasterKey;
CryptDestroyKey(hExchKey);
hExchKey = 0;
// Build the session keys.
pctRet = MakeSessionKeys(pContext,
pContext->RipeZombie->hMasterProv,
hMasterKey);
if(pctRet != PCT_ERR_OK)
{
SP_LOG_RESULT(pctRet);
goto cleanup;
}
// Update perf counter.
InterlockedIncrement(&g_cServerHandshakes);
pctRet = PCT_ERR_OK;
cleanup:
if(fImpersonating)
{
RevertToSelf();
}
if(pbBlob != NULL)
{
SafeAllocaFree(pbBlob);
}
if(hExchKey)
{
CryptDestroyKey(hExchKey);
}
return pctRet;
}
//+---------------------------------------------------------------------------
//
// Function: PkcsFinishMasterKey
//
// Synopsis: Complete the derivation of the master key by programming the
// CSP with the (protocol dependent) auxilary plaintext
// information.
//
// Arguments: [pContext] -- Schannel context.
// [hMasterKey] -- Handle to master key.
//
// History: 10-03-97 jbanes Created.
//
// Notes: This routine is called by the server-side only.
//
//----------------------------------------------------------------------------
SP_STATUS
PkcsFinishMasterKey(
PSPContext pContext, // in, out
HCRYPTKEY hMasterKey) // in
{
PCipherInfo pCipherInfo = NULL;
PHashInfo pHashInfo = NULL;
SCHANNEL_ALG Algorithm;
BOOL fExportable = TRUE;
DWORD dwCipherFlags;
if(pContext->RipeZombie->hMasterProv == 0)
{
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
// Get pointer to pending cipher system.
pCipherInfo = pContext->pPendingCipherInfo;
// Get pointer to pending hash system.
pHashInfo = pContext->pPendingHashInfo;
// Determine whether this is an "exportable" cipher.
if(pContext->dwPendingCipherSuiteIndex)
{
// Use cipher suite flags (SSL3 & TLS).
dwCipherFlags = UniAvailableCiphers[pContext->dwPendingCipherSuiteIndex].dwFlags;
if(dwCipherFlags & DOMESTIC_CIPHER_SUITE)
{
fExportable = FALSE;
}
}
else
{
// Use key length (PCT & SSL2).
if(pCipherInfo->dwStrength > 40)
{
fExportable = FALSE;
}
}
// Specify encryption algorithm.
if(pCipherInfo->aiCipher != CALG_NULLCIPHER)
{
ZeroMemory(&Algorithm, sizeof(Algorithm));
Algorithm.dwUse = SCHANNEL_ENC_KEY;
Algorithm.Algid = pCipherInfo->aiCipher;
Algorithm.cBits = pCipherInfo->cbSecret * 8;
if(fExportable)
{
Algorithm.dwFlags = INTERNATIONAL_USAGE;
}
if(!CryptSetKeyParam(hMasterKey,
KP_SCHANNEL_ALG,
(PBYTE)&Algorithm,
0))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
}
// Specify hash algorithm.
Algorithm.dwUse = SCHANNEL_MAC_KEY;
Algorithm.Algid = pHashInfo->aiHash;
Algorithm.cBits = pHashInfo->cbCheckSum * 8;
if(!CryptSetKeyParam(hMasterKey,
KP_SCHANNEL_ALG,
(PBYTE)&Algorithm,
0))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
// Finish creating the master_secret.
switch(pContext->RipeZombie->fProtocol)
{
case SP_PROT_PCT1_CLIENT:
case SP_PROT_PCT1_SERVER:
{
CRYPT_DATA_BLOB Data;
// Specify clear key value.
if(pContext->RipeZombie->cbClearKey)
{
Data.pbData = pContext->RipeZombie->pClearKey;
Data.cbData = pContext->RipeZombie->cbClearKey;
if(!CryptSetKeyParam(hMasterKey,
KP_CLEAR_KEY,
(BYTE*)&Data,
0))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
}
// Specify the CH_CHALLENGE_DATA.
Data.pbData = pContext->pChallenge;
Data.cbData = pContext->cbChallenge;
if(!CryptSetKeyParam(hMasterKey,
KP_CLIENT_RANDOM,
(BYTE*)&Data,
0))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
// Specify the SH_CONNECTION_ID_DATA.
Data.pbData = pContext->pConnectionID;
Data.cbData = pContext->cbConnectionID;
if(!CryptSetKeyParam(hMasterKey,
KP_SERVER_RANDOM,
(BYTE*)&Data,
0))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
// Specify the SH_CERTIFICATE_DATA.
Data.pbData = pContext->RipeZombie->pbServerCertificate;
Data.cbData = pContext->RipeZombie->cbServerCertificate;
if(!CryptSetKeyParam(hMasterKey,
KP_CERTIFICATE,
(BYTE*)&Data,
0))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
break;
}
case SP_PROT_SSL2_CLIENT:
case SP_PROT_SSL2_SERVER:
{
CRYPT_DATA_BLOB Data;
// Specify clear key value.
if(pContext->RipeZombie->cbClearKey)
{
Data.pbData = pContext->RipeZombie->pClearKey;
Data.cbData = pContext->RipeZombie->cbClearKey;
if(!CryptSetKeyParam(hMasterKey,
KP_CLEAR_KEY,
(BYTE*)&Data,
0))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
}
// Specify the CH_CHALLENGE_DATA.
Data.pbData = pContext->pChallenge;
Data.cbData = pContext->cbChallenge;
if(!CryptSetKeyParam(hMasterKey,
KP_CLIENT_RANDOM,
(BYTE*)&Data,
0))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
// Specify the SH_CONNECTION_ID_DATA.
Data.pbData = pContext->pConnectionID;
Data.cbData = pContext->cbConnectionID;
if(!CryptSetKeyParam(hMasterKey,
KP_SERVER_RANDOM,
(BYTE*)&Data,
0))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
break;
}
case SP_PROT_SSL3_CLIENT:
case SP_PROT_SSL3_SERVER:
case SP_PROT_TLS1_CLIENT:
case SP_PROT_TLS1_SERVER:
{
CRYPT_DATA_BLOB Data;
// Specify client_random.
Data.pbData = pContext->rgbS3CRandom;
Data.cbData = CB_SSL3_RANDOM;
if(!CryptSetKeyParam(hMasterKey,
KP_CLIENT_RANDOM,
(BYTE*)&Data,
0))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
// Specify server_random.
Data.pbData = pContext->rgbS3SRandom;
Data.cbData = CB_SSL3_RANDOM;
if(!CryptSetKeyParam(hMasterKey,
KP_SERVER_RANDOM,
(BYTE*)&Data,
0))
{
SP_LOG_RESULT(GetLastError());
return PCT_INT_INTERNAL_ERROR;
}
break;
}
default:
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
return PCT_ERR_OK;
}
//+---------------------------------------------------------------------------
//
// Function: MakeSessionKeys
//
// Synopsis: Derive the session keys from the completed master key.
//
// Arguments: [pContext] -- Schannel context.
// [hProv] --
// [hMasterKey] -- Handle to master key.
//
// History: 10-03-97 jbanes Created.
//
// Notes: This routine is called by the server-side only.
//
//----------------------------------------------------------------------------
SP_STATUS
MakeSessionKeys(
PSPContext pContext, // in
HCRYPTPROV hProv, // in
HCRYPTKEY hMasterKey) // in
{
HCRYPTHASH hMasterHash = 0;
HCRYPTKEY hLocalMasterKey = 0;
BOOL fClient;
SP_STATUS pctRet;
//
// Duplicate the master key if we're doing a reconnect handshake. This
// will allow us to set the client_random and server_random properties
// on the key without having to worry about different threads
// interferring with each other.
//
if((pContext->Flags & CONTEXT_FLAG_FULL_HANDSHAKE) == 0)
{
if(!CryptDuplicateKey(hMasterKey, NULL, 0, &hLocalMasterKey))
{
pctRet = SP_LOG_RESULT(GetLastError());
goto cleanup;
}
hMasterKey = hLocalMasterKey;
}
// Finish the master_secret.
pctRet = PkcsFinishMasterKey(pContext, hMasterKey);
if(pctRet != PCT_ERR_OK)
{
SP_LOG_RESULT(pctRet);
goto cleanup;
}
fClient = !(pContext->RipeZombie->fProtocol & SP_PROT_SERVERS);
// Create the master hash object from the master_secret key.
if(!CryptCreateHash(hProv,
CALG_SCHANNEL_MASTER_HASH,
hMasterKey,
0,
&hMasterHash))
{
pctRet = SP_LOG_RESULT(GetLastError());
goto cleanup;
}
// Derive read key from the master hash object.
if(pContext->hPendingReadKey)
{
CryptDestroyKey(pContext->hPendingReadKey);
pContext->hPendingReadKey = 0;
}
if(pContext->pPendingCipherInfo->aiCipher != CALG_NULLCIPHER)
{
if(!CryptDeriveKey(hProv,
CALG_SCHANNEL_ENC_KEY,
hMasterHash,
CRYPT_EXPORTABLE | (fClient ? CRYPT_SERVER : 0),
&pContext->hPendingReadKey))
{
pctRet = SP_LOG_RESULT(GetLastError());
goto cleanup;
}
}
// Derive write key from the master hash object.
if(pContext->hPendingWriteKey)
{
CryptDestroyKey(pContext->hPendingWriteKey);
pContext->hPendingWriteKey = 0;
}
if(pContext->pPendingCipherInfo->aiCipher != CALG_NULLCIPHER)
{
if(!CryptDeriveKey(hProv,
CALG_SCHANNEL_ENC_KEY,
hMasterHash,
CRYPT_EXPORTABLE | (fClient ? 0 : CRYPT_SERVER),
&pContext->hPendingWriteKey))
{
pctRet = SP_LOG_RESULT(GetLastError());
goto cleanup;
}
}
if((pContext->RipeZombie->fProtocol & SP_PROT_SSL2) ||
(pContext->RipeZombie->fProtocol & SP_PROT_PCT1))
{
// Set the IV on the client and server encryption keys
if(!CryptSetKeyParam(pContext->hPendingReadKey,
KP_IV,
pContext->RipeZombie->pKeyArgs,
0))
{
pctRet = SP_LOG_RESULT(GetLastError());
goto cleanup;
}
if(!CryptSetKeyParam(pContext->hPendingWriteKey,
KP_IV,
pContext->RipeZombie->pKeyArgs,
0))
{
pctRet = SP_LOG_RESULT(GetLastError());
goto cleanup;
}
}
if(pContext->RipeZombie->fProtocol & SP_PROT_SSL2)
{
// SSL 2.0 uses same set of keys for both encryption and MAC.
pContext->hPendingReadMAC = 0;
pContext->hPendingWriteMAC = 0;
}
else
{
// Derive read MAC from the master hash object.
if(pContext->hPendingReadMAC)
{
CryptDestroyKey(pContext->hPendingReadMAC);
}
if(!CryptDeriveKey(hProv,
CALG_SCHANNEL_MAC_KEY,
hMasterHash,
CRYPT_EXPORTABLE | (fClient ? CRYPT_SERVER : 0),
&pContext->hPendingReadMAC))
{
pctRet = SP_LOG_RESULT(GetLastError());
goto cleanup;
}
// Derive write MAC from the master hash object.
if(pContext->hPendingWriteMAC)
{
CryptDestroyKey(pContext->hPendingWriteMAC);
}
if(!CryptDeriveKey(hProv,
CALG_SCHANNEL_MAC_KEY,
hMasterHash,
CRYPT_EXPORTABLE | (fClient ? 0 : CRYPT_SERVER),
&pContext->hPendingWriteMAC))
{
pctRet = SP_LOG_RESULT(GetLastError());
goto cleanup;
}
}
pctRet = PCT_ERR_OK;
cleanup:
if(hMasterHash)
{
CryptDestroyHash(hMasterHash);
}
if(hLocalMasterKey)
{
CryptDestroyKey(hLocalMasterKey);
}
return pctRet;
}
//+---------------------------------------------------------------------------
//
// Function: Ssl3ParseServerKeyExchange
//
// Synopsis: Parse the ServerKeyExchange message and import modulus and
// exponent into a CryptoAPI public key.
//
// Arguments: [pContext] -- Schannel context.
//
// [pbMessage] -- Pointer to message.
//
// [cbMessage] -- Message length.
//
// [hServerPublic] -- Handle to public key from server's
// certificate. This is used to verify
// the message's signature.
//
// [phNewServerPublic] -- (output) Handle to new public key.
//
//
// History: 10-23-97 jbanes Created.
//
// Notes: This routine is called by the client-side only.
//
// The format of the ServerKeyExchange message is:
//
// struct {
// select (KeyExchangeAlgorithm) {
// case diffie_hellman:
// ServerDHParams params;
// Signature signed_params;
// case rsa:
// ServerRSAParams params;
// Signature signed_params;
// case fortezza_dms:
// ServerFortezzaParams params;
// };
// } ServerKeyExchange;
//
// struct {
// opaque rsa_modulus<1..2^16-1>;
// opaque rsa_exponent<1..2^16-1>;
// } ServerRSAParams;
//
//----------------------------------------------------------------------------
SP_STATUS
Ssl3ParseServerKeyExchange(
PSPContext pContext, // in
PBYTE pbMessage, // in
DWORD cbMessage, // in
HCRYPTKEY hServerPublic, // in
HCRYPTKEY *phNewServerPublic) // out
{
PBYTE pbModulus = NULL;
DWORD cbModulus;
PBYTE pbExponent = NULL;
DWORD cbExponent;
PBYTE pbServerParams = NULL;
DWORD cbServerParams;
DWORD dwExponent;
DWORD i;
if(pbMessage == NULL || cbMessage == 0)
{
*phNewServerPublic = 0;
return PCT_ERR_OK;
}
// Mark start of ServerRSAParams structure.
// This is used to build hash values.
pbServerParams = pbMessage;
if(cbMessage < 3)
{
return SP_LOG_RESULT(PCT_INT_ILLEGAL_MSG);
}
// Modulus length
cbModulus = MAKEWORD(pbMessage[1], pbMessage[0]);
pbMessage += 2;
// Since the modulus is encoded as an INTEGER, it is padded with a leading
// zero if its most significant bit is one. Remove this padding, if
// present.
if(pbMessage[0] == 0)
{
cbModulus -= 1;
pbMessage += 1;
}
if(cbModulus < 512/8 || cbModulus > 1024/8)
{
return SP_LOG_RESULT(PCT_INT_ILLEGAL_MSG);
}
// Modulus
pbModulus = pbMessage;
pbMessage += cbModulus;
// Exponent length
cbExponent = MAKEWORD(pbMessage[1], pbMessage[0]);
if(cbExponent < 1 || cbExponent > 4)
{
return SP_LOG_RESULT(PCT_INT_ILLEGAL_MSG);
}
pbMessage += 2;
// Exponent
pbExponent = pbMessage;
pbMessage += cbExponent;
// form a (little endian) DWORD from exponent data
dwExponent = 0;
for(i = 0; i < cbExponent; i++)
{
dwExponent <<= 8;
dwExponent |= pbExponent[i];
}
// Compute length of ServerRSAParams structure.
cbServerParams = (DWORD)(pbMessage - pbServerParams);
//
// digitally-signed struct {
// select(SignatureAlgorithm) {
// case anonymous: struct { };
// case rsa:
// opaque md5_hash[16];
// opaque sha_hash[20];
// case dsa:
// opaque sha_hash[20];
// };
// } Signature;
//
{
BYTE rgbHashValue[CB_MD5_DIGEST_LEN + CB_SHA_DIGEST_LEN];
PBYTE pbSignature;
DWORD cbSignature;
HCRYPTHASH hHash;
PBYTE pbLocalBuffer;
DWORD cbLocalBuffer;
// Signature block length
cbSignature = ((INT)pbMessage[0] << 8) + pbMessage[1];
pbMessage += 2;
pbSignature = pbMessage;
// Allocate buffer for RSA operation.
cbLocalBuffer = cbSignature;
SafeAllocaAllocate(pbLocalBuffer, cbLocalBuffer);
if(pbLocalBuffer == NULL)
{
return SP_LOG_RESULT(SEC_E_INSUFFICIENT_MEMORY);
}
// Reverse the signature.
ReverseMemCopy(pbLocalBuffer, pbSignature, cbSignature);
// Compute MD5 and SHA hash values.
ComputeServerExchangeHashes(pContext,
pbServerParams,
cbServerParams,
rgbHashValue,
rgbHashValue + CB_MD5_DIGEST_LEN);
if(!CryptCreateHash(pContext->RipeZombie->hMasterProv,
CALG_SSL3_SHAMD5,
0,
0,
&hHash))
{
SP_LOG_RESULT(GetLastError());
SafeAllocaFree(pbLocalBuffer);
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
// set hash value
if(!CryptSetHashParam(hHash,
HP_HASHVAL,
rgbHashValue,
0))
{
SP_LOG_RESULT(GetLastError());
CryptDestroyHash(hHash);
SafeAllocaFree(pbLocalBuffer);
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
DebugLog((DEB_TRACE, "Verify server_key_exchange message signature.\n"));
if(!CryptVerifySignature(hHash,
pbLocalBuffer,
cbSignature,
hServerPublic,
NULL,
0))
{
DebugLog((DEB_WARN, "Signature Verify Failed: %x\n", GetLastError()));
CryptDestroyHash(hHash);
SafeAllocaFree(pbLocalBuffer);
return SP_LOG_RESULT(PCT_ERR_INTEGRITY_CHECK_FAILED);
}
DebugLog((DEB_TRACE, "Server_key_exchange message signature verified okay.\n"));
CryptDestroyHash(hHash);
SafeAllocaFree(pbLocalBuffer);
}
//
// Import ephemeral public key into CSP.
//
{
BLOBHEADER *pBlobHeader;
RSAPUBKEY *pRsaPubKey;
PBYTE pbBlob;
DWORD cbBlob;
// Allocate memory for PUBLICKEYBLOB.
cbBlob = sizeof(BLOBHEADER) + sizeof(RSAPUBKEY) + cbModulus;
SafeAllocaAllocate(pbBlob, cbBlob);
if(pbBlob == NULL)
{
return SP_LOG_RESULT(SEC_E_INSUFFICIENT_MEMORY);
}
// Build PUBLICKEYBLOB from modulus and exponent.
pBlobHeader = (BLOBHEADER *)pbBlob;
pRsaPubKey = (RSAPUBKEY *)(pBlobHeader + 1);
pBlobHeader->bType = PUBLICKEYBLOB;
pBlobHeader->bVersion = CUR_BLOB_VERSION;
pBlobHeader->reserved = 0;
pBlobHeader->aiKeyAlg = CALG_RSA_KEYX;
pRsaPubKey->magic = 0x31415352; // RSA1
pRsaPubKey->bitlen = cbModulus * 8;
pRsaPubKey->pubexp = dwExponent;
ReverseMemCopy((PBYTE)(pRsaPubKey + 1), pbModulus, cbModulus);
if(!CryptImportKey(pContext->RipeZombie->hMasterProv,
pbBlob,
cbBlob,
0,
0,
phNewServerPublic))
{
SP_LOG_RESULT(GetLastError());
SafeAllocaFree(pbBlob);
return SP_LOG_RESULT(PCT_INT_INTERNAL_ERROR);
}
SafeAllocaFree(pbBlob);
}
return PCT_ERR_OK;
}
| 30.356849
| 108
| 0.510746
|
[
"object",
"vector"
] |
f99809eb12d4312138c193d0ad018ab48c2a5150
| 366
|
h
|
C
|
MetroGame/adfdafsdf/Renderable.h
|
Kevintjeb/OpenCV-MetroGame
|
e3a88108576853c2f9f5a5901074b2d4a547cd39
|
[
"MIT"
] | null | null | null |
MetroGame/adfdafsdf/Renderable.h
|
Kevintjeb/OpenCV-MetroGame
|
e3a88108576853c2f9f5a5901074b2d4a547cd39
|
[
"MIT"
] | null | null | null |
MetroGame/adfdafsdf/Renderable.h
|
Kevintjeb/OpenCV-MetroGame
|
e3a88108576853c2f9f5a5901074b2d4a547cd39
|
[
"MIT"
] | null | null | null |
#pragma once
#include "Vect.h"
#include "Model.h"
#include <string>
namespace mg_gameLogic
{
class Renderable
{
public:
Vec3f position, rotation, scale;
float angle;
std::string model;
Renderable();
Renderable(const Renderable&);
Renderable(std::string model, Vec3f pos, float angle, Vec3f rot, Vec3f scale = Vec3f(1, 1, 1));
~Renderable();
};
}
| 17.428571
| 97
| 0.691257
|
[
"model"
] |
f99bdaaac0f89994d85070e6597ff3370e5a4f7d
| 1,656
|
h
|
C
|
Promo_2023_Projet_Mai/Promo_2023_Projet_Mai/TerritoryConquest.h
|
dev1ous/Jelly-War
|
55353c1f111101a0286370236541b087fdf28d17
|
[
"Apache-2.0"
] | null | null | null |
Promo_2023_Projet_Mai/Promo_2023_Projet_Mai/TerritoryConquest.h
|
dev1ous/Jelly-War
|
55353c1f111101a0286370236541b087fdf28d17
|
[
"Apache-2.0"
] | null | null | null |
Promo_2023_Projet_Mai/Promo_2023_Projet_Mai/TerritoryConquest.h
|
dev1ous/Jelly-War
|
55353c1f111101a0286370236541b087fdf28d17
|
[
"Apache-2.0"
] | null | null | null |
#ifndef TERRITORYCONQUEST_H
#define TERRITORYCONQUEST_H
#include "FSM_Manager.h"
#include "TC_Joueur.h"
#include "CasesManager.h"
#include "Animator.h"
#include "SFML/Audio.hpp"
class FSM_Manager;
class TerritoryConquest final : public FSM
{
public:
enum class State_TC {
Expli, Game, Win
};
TerritoryConquest(FSM_Manager& mgr, WindowManager& w, sf::Font& font, const bool change);
bool Collision(TC_Joueur*);
void AssigningColor(TC_Joueur*, const float&);
void scores_result(TC_Joueur*);
void RunningTimer(const float&);
sf::Text printTimer();
void Pause()override {}
void Resume()override {}
void processEvents(const sf::Event&)override;
void update(const float&)override;
void draw()override;
void hud_others(TC_Joueur* joueur);
void Victoire(const float& dt);
void Set_Classement();
inline static sf::Music m_musicTC;
private:
Animator anim;
State_TC m_stateTC{ State_TC::Expli };
sf::VertexArray m_grid;
sf::Sprite HUD;
sf::Vector2f posHUD;
sf::Sprite m_arene, m_expli;
sf::Sprite Ecran_Victoire;
sf::Sprite place;
sf::Sprite Tete;
sf::Sprite loterie;
sf::Sprite perso_colo;
sf::Sprite Ecran;
sf::Sprite Lueur;
sf::Font font2;
Bonus bonuswin;
Animator anim2;
TC_Joueur m_joueur;
sf::Text Tclassement;
sf::String text_class;
std::vector <int> tabscore;
std::map<int, TC_Joueur*> m_joueurs;
CasesManager m_cases;
float timer{ 30.f };
float timer_expli{15.f };
int compteur{ 0 };
sf::Text ScoreText;
std::array<std::array<sf::Texture, 3>, 8> m_arrayTexture;
const int min{ 0 };
const int max{ 2 };
const int SizeCase{ 100 };
float m_timer_anim{ 0.f };
float m_timer_anim_2{ 0.f };
};
#endif
| 21.506494
| 90
| 0.728261
|
[
"vector"
] |
f9be3ae3df8be23cff2feac0ee47c9b56d3f1554
| 2,500
|
h
|
C
|
src/crypto/aes.h
|
utix/lib-common
|
ae09e6174b4f76a1f3d659705ae85cc0e669219e
|
[
"Apache-2.0"
] | 1
|
2020-07-17T15:51:18.000Z
|
2020-07-17T15:51:18.000Z
|
src/crypto/aes.h
|
nicopauss/lib-common
|
f65e33b77b4936898953a0a092029be2fb94a834
|
[
"Apache-2.0"
] | null | null | null |
src/crypto/aes.h
|
nicopauss/lib-common
|
f65e33b77b4936898953a0a092029be2fb94a834
|
[
"Apache-2.0"
] | null | null | null |
/**
* \file aes.h
*/
#ifndef XYSSL_AES_H
#define XYSSL_AES_H
#define AES_ENCRYPT 1
#define AES_DECRYPT 0
/**
* \brief AES context structure
*/
typedef struct {
int nr; /*!< number of rounds */
uint32_t * nonnull rk; /*!< AES round keys */
uint32_t buf[68]; /*!< unaligned data */
} aes_ctx;
#ifdef __cplusplus
extern "C" {
#endif
/**
* \brief AES key schedule (encryption)
*
* \param ctx AES context to be initialized
* \param key encryption key
* \param keysize must be 128, 192 or 256
*/
void aes_setkey_enc(aes_ctx * nonnull ctx, const byte * nonnull key,
int keysize) __leaf;
/**
* \brief AES key schedule (decryption)
*
* \param ctx AES context to be initialized
* \param key decryption key
* \param keysize must be 128, 192 or 256
*/
void aes_setkey_dec(aes_ctx * nonnull ctx, const byte * nonnull key,
int keysize) __leaf;
/**
* \brief AES-ECB block encryption/decryption
*
* \param ctx AES context
* \param mode AES_ENCRYPT or AES_DECRYPT
* \param input 16-byte input block
* \param output 16-byte output block
*/
void aes_crypt_ecb(aes_ctx * nonnull ctx, int mode, const byte input[16],
byte output[16]) __leaf;
/**
* \brief AES-CBC buffer encryption/decryption
*
* \param ctx AES context
* \param mode AES_ENCRYPT or AES_DECRYPT
* \param length length of the input data
* \param iv initialization vector (updated after use)
* \param input buffer holding the input data
* \param output buffer holding the output data
*/
void aes_crypt_cbc(aes_ctx * nonnull ctx, int mode, int length,
byte iv[16], const byte * nonnull input,
byte * nonnull output) __leaf;
/**
* \brief AES-CFB buffer encryption/decryption
*
* \param ctx AES context
* \param mode AES_ENCRYPT or AES_DECRYPT
* \param length length of the input data
* \param iv_off offset in IV (updated after use)
* \param iv initialization vector (updated after use)
* \param input buffer holding the input data
* \param output buffer holding the output data
*/
void aes_crypt_cfb(aes_ctx * nonnull ctx, int mode, int length,
int * nonnull iv_off, byte iv[16],
const byte * nonnull input, byte * nonnull output) __leaf;
#ifdef __cplusplus
}
#endif
#endif /* aes.h */
| 28.409091
| 77
| 0.6224
|
[
"vector"
] |
f9c09a03b8a1310e08b5df4c0236103a8c18f9f2
| 1,409
|
h
|
C
|
misc/gnuplot/GnuplotSplotElementColorLines.h
|
k-a-z-u/KLib
|
cd4d11fb68921b2ff42a6f90055447f5df9a5b1e
|
[
"Apache-2.0"
] | 2
|
2016-12-17T10:14:18.000Z
|
2019-07-16T09:06:07.000Z
|
misc/gnuplot/GnuplotSplotElementColorLines.h
|
k-a-z-u/KLib
|
cd4d11fb68921b2ff42a6f90055447f5df9a5b1e
|
[
"Apache-2.0"
] | null | null | null |
misc/gnuplot/GnuplotSplotElementColorLines.h
|
k-a-z-u/KLib
|
cd4d11fb68921b2ff42a6f90055447f5df9a5b1e
|
[
"Apache-2.0"
] | 3
|
2015-06-23T09:48:49.000Z
|
2018-03-28T08:28:06.000Z
|
#ifndef GNUPLOTSPLOTELEMENTCOLORLINES_H
#define GNUPLOTSPLOTELEMENTCOLORLINES_H
#include <vector>
#include "GnuplotSplotElementRaw.h"
#include "attributes/GnuplotAttrStroke.h"
#include "misc/GnuplotStroke.h"
namespace K {
class GnuplotSplotElementColorLines : public GnuplotSplotElementRaw, public GnuplotAttrStroke {
struct ColorPoint {
GnuplotPoint3 p;
const float color;
ColorPoint(const GnuplotPoint3& p, const float color) : p(p), color(color) {;}
};
std::vector<ColorPoint> points;
public:
void addHeaderTo(std::ostream& ss, const GnuplotStringMod* mod) const override {
if (empty()) {return;}
ss << "'-' with lines palette ";
ss << attrCustom << " ";
ss << stroke.toGP(false);
ss << " title '" << mod->modEntryTitle(title) << "'";
}
/** add an empty line */
void splitFace() {
GnuplotPoint3 gp = GnuplotPoint3::getEmpty();
points.push_back(ColorPoint(gp, 0));
}
/** add a new point to output */
void add(const GnuplotPoint3 p, const float palette) {
points.push_back(ColorPoint(p,palette));
}
bool empty() const override {
return points.empty();
}
void addDataTo(std::ostream& ss) const override {
for (const ColorPoint& p : points) {
if (p.p.isEmpty()) {continue;}
ss << p.p.x << ' ' << p.p.y << ' ' << p.p.z << ' ' << p.color << "\n";
}
ss << "e\n";
}
};
}
#endif // GNUPLOTSPLOTELEMENTCOLORLINES_H
| 23.483333
| 96
| 0.654365
|
[
"vector"
] |
58fc8d027d228c4141c25c12b2fff9c1beb396ed
| 2,762
|
h
|
C
|
apps/DARTS_LENET/layers/inputLayer.h
|
randres2011/DARTS
|
d3a0d28926b15796661783f91451dcd313905582
|
[
"BSD-2-Clause"
] | null | null | null |
apps/DARTS_LENET/layers/inputLayer.h
|
randres2011/DARTS
|
d3a0d28926b15796661783f91451dcd313905582
|
[
"BSD-2-Clause"
] | null | null | null |
apps/DARTS_LENET/layers/inputLayer.h
|
randres2011/DARTS
|
d3a0d28926b15796661783f91451dcd313905582
|
[
"BSD-2-Clause"
] | null | null | null |
/*
* Copyright (c) 2011-2014, University of Delaware
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
// DARTS PLUS
#pragma once
#include "darts.h"
#include "../infrastructure/layerTemplate.h"
#include <vector>
#include "../matrix/matrix.h"
#include "../Parameters.h"
#include "../infrastructure/Framework.h"
using namespace darts;
class LoadInput : public Codelet
{
public:
LoadInput(uint32_t dep, uint32_t res, ThreadedProcedure * myTP, uint32_t stat):
Codelet(dep, res, myTP, stat) { }
virtual void fire(void);
};
class InputLayer : public Layer
{
public:
matrix *input;
LoadInput input_cd;
InputLayer(trig_cd *prevLayerTrig, trig_cd *nextLayerTrig, uint32_t layerId, uint64_t *startTime, uint64_t *stopTime, matrix *input_) :
Layer(prevLayerTrig, nextLayerTrig, layerId, startTime, stopTime),
input(input_), input_cd(1, 1, this, SHORTWAIT)
{
Layer::insertElementToMap((Codelet *)&input_cd);
Layer::registerCodelet((Codelet *)&input_cd);
Layer::layerReadyToStart();
}
};
void
LoadInput::fire(void)
{
InputLayer *myTP = static_cast<InputLayer*>(myTP_); // We obtain our TP
initOneMatrix(myTP->input);
for (uint32_t i = 0; i < myTP->Layer::nextLayerMaps->size(); i++)
{
myTP->Layer::nextLayerMaps->at(i)->decDep();
}
//std::cout<<next_map->size()<<" LoadInput\n";
//resetCodelet();
}
| 33.277108
| 143
| 0.700579
|
[
"vector"
] |
58ff064204d21d1e9487fb4846b402dce420bc0a
| 528
|
h
|
C
|
book/CH08/S19_Binding_a_pipeline_object.h
|
THISISAGOODNAME/vkCookBook
|
d022b4151a02c33e5c58534dc53ca39610eee7b5
|
[
"MIT"
] | 5
|
2019-03-02T16:29:15.000Z
|
2021-11-07T11:07:53.000Z
|
book/CH08/S19_Binding_a_pipeline_object.h
|
THISISAGOODNAME/vkCookBook
|
d022b4151a02c33e5c58534dc53ca39610eee7b5
|
[
"MIT"
] | null | null | null |
book/CH08/S19_Binding_a_pipeline_object.h
|
THISISAGOODNAME/vkCookBook
|
d022b4151a02c33e5c58534dc53ca39610eee7b5
|
[
"MIT"
] | 2
|
2018-07-10T18:15:40.000Z
|
2020-01-03T04:02:32.000Z
|
//
// Created by aicdg on 2017/6/22.
//
//
// Chapter: 08 Graphics and Compute Pipelines
// Recipe: 19 Binding a pipeline object
#ifndef VKCOOKBOOK_S19_BINDING_A_PIPELINE_OBJECT_H
#define VKCOOKBOOK_S19_BINDING_A_PIPELINE_OBJECT_H
#include "Common.h"
namespace VKCookbook {
void BindPipelineObject( VkCommandBuffer command_buffer,
VkPipelineBindPoint pipeline_type,
VkPipeline pipeline );
};
#endif //VKCOOKBOOK_S19_BINDING_A_PIPELINE_OBJECT_H
| 22
| 64
| 0.693182
|
[
"object"
] |
45094c4ebf6c77eb1a622f5b9b37ddaf8a4232c1
| 9,385
|
h
|
C
|
mc/leandro.h
|
Leandro-OBt/scripts
|
ac9847ec1aaea4fe20803a1413a1c9ab819b889f
|
[
"MIT"
] | 2
|
2020-04-08T01:48:34.000Z
|
2020-05-31T23:42:38.000Z
|
mc/leandro.h
|
Leandro-OBt/scripts
|
ac9847ec1aaea4fe20803a1413a1c9ab819b889f
|
[
"MIT"
] | null | null | null |
mc/leandro.h
|
Leandro-OBt/scripts
|
ac9847ec1aaea4fe20803a1413a1c9ab819b889f
|
[
"MIT"
] | null | null | null |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
typedef struct {
char res_name[5];
int res_num;
float x, y, z; // coordinates
char type; // atom type
int atm_num;
char element[2];
char atm_name[5];
} atom ;
// Writes the model number. This must be the first line of each model in the file
void output_pdb_model( FILE *output,int n_model ){
fprintf(output,"MODEL %d\n",n_model);
}
// Writes the full coordinate line for a given atom "atm" in the file.
void output_pdb_coordinates( FILE *output , int atm , atom *p ){
fprintf(output,"ATOM %5d %4s %3s A%4d %8.3f%8.3f%8.3f 1.00 0.00 %s\n", atm , p[atm].atm_name , p[atm].res_name, p[atm].res_num , p[atm].x , p[atm].y , p[atm].z , p[atm].element);
}
// Writes "ENDMDL". This must be the last line of each model in the file
void output_pdb_endmdl( FILE *output ){
fprintf(output,"ENDMDL\n");
}
void output_pdb( FILE *output , atom *p , int total_atm, int model ){
int i;
output_pdb_model( output , i );
for( i=1 ; i<=total_atm ; i++ ) output_pdb_coordinates(output, i, p);
output_pdb_endmdl(output);
}
// Reads one coordinate line of the file.
// !! This function relies in the full PDB file format, with chain and element information !!
void input_pdb_coordinates(FILE *input, int atm , atom *p){
char garbage[20];
fscanf(input,"%s%s%s%s%s%s%f%f%f%s%s%s",&garbage[0],&garbage[0],&p[atm].atm_name[0],&p[atm].res_name[0],&garbage[0],&garbage[0],&p[atm].x,&p[atm].y,&p[atm].z,&garbage[0],&garbage[0],&p[atm].element[0]);
//fscanf(input,"%s%s%s%s%s%f%f%f%s%s%s",&garbage[0],&garbage[0],&p[atm].atm_name[0],&p[atm].res_name[0],&garbage[0],&p[atm].x,&p[atm].y,&p[atm].z,&garbage[0],&garbage[0],&p[atm].element[0]);
//fscanf(input,"%s%s%s%s%s%f%f%f%s%s",&garbage[0],&garbage[0],&p[atm].atm_name[0],&p[atm].res_name[0],&garbage[0],&p[atm].x,&p[atm].y,&p[atm].z,&garbage[0],&garbage[0]);
// fscanf(input,"%s%s%f%f%f" , &p[atm].atm_name[0] , &p[atm].res_name[0] , &p[atm].x , &p[atm].y , &p[atm].z );
// ATOM 22 HD1 HIS 2 39.600 61.520 29.570 1.00 0.00
}
/*
void input_pdb_coordinates(FILE *input, int atm , atom *p){
char garbage[20]; //fscanf(input,"%s%s%s%s%s%s%f%f%f%s%s%s",&garbage[0],&garbage[0],&p[atm].atm_name[0],&p[atm].res_name[0],&garbage[0],&garbage[0],&p[atm].x,&p[atm].y,&p[atm].z,&garbage[0],&garbage[0],&p[atm].element[0]);
//fscanf(input,"%s%s%s%s%s%f%f%f%s%s%s",&garbage[0],&garbage[0],&p[atm].atm_name[0],&p[atm].res_name[0],&garbage[0],&p[atm].x,&p[atm].y,&p[atm].z,&garbage[0],&garbage[0],&p[atm].element[0]);
fscanf(input,"%s%s%s%s%s%f%f%f%s%s",&garbage[0],&garbage[0],&p[atm].atm_name[0],&p[atm].res_name[0],&garbage[0],&p[atm].x,&p[atm].y,&p[atm].z,&garbage[0],&garbage[0]);
// ATOM 22 HD1 HIS 2 39.600 61.520 29.570 1.00 0.00
}
*/
void read_pdb(char filename[200] , int total_atm, atom *p){
int atm , res; // atom and residue numbers
char command[500];
res = 0;
sprintf( command, "grep ATOM %s > temporary_pdb_atom ; cut -c 12-21 temporary_pdb_atom > temporary_atm ; cut -c 30-55 temporary_pdb_atom > temporary_coord ; paste temporary_atm temporary_coord > temporary_read_PDB_coordinates", filename);
system(command);
FILE *input;
input=fopen("temporary_read_PDB_coordinates","rb");
for( atm=1 ; atm<=total_atm ; atm++ ){
input_pdb_coordinates( input , atm, p );
if ( strcmp(p[atm].atm_name,"N")==0 ) res++;
p[atm].atm_num = atm;
p[atm].res_num = res;
}
fclose(input);
system("rm temporary_pdb_atom temporary_atm temporary_coord temporary_read_PDB_coordinates");
}
float distance(atom *p, int A, int B){
return sqrt( ((p[A].x-p[B].x)*(p[A].x-p[B].x)) + ((p[A].y-p[B].y)*(p[A].y-p[B].y)) + ((p[A].z-p[B].z)*(p[A].z-p[B].z)) );
}
float distance_inter(atom *a, int A, atom *b, int B){
return sqrt( pow((a[A].x-b[B].x),2) + pow((a[A].y-b[B].y),2) + pow((a[A].z-b[B].z),2) );
}
// Returns an integer random number between 0 and "max"
int random_int(int max){
int r;
r = rand() % (max+1);
return r;
}
// Returns a float random number between 0 and "max". The precision can be tuned by the constant "C"
float random_float(float max){
float r;
int C;
int temp;
C=10000;
temp = (int) (max * C);
r = rand() % (temp+1);
r /= C;
return r;
}
void random_seed(){
srand(time(NULL));
}
void copy_structure(atom *from , atom *to , int total_atm){
int i;
for( i=1 ; i<=total_atm ; i++ ){
to[i].x = from[i].x ;
to[i].y = from[i].y ;
to[i].z = from[i].z ;
}
}
// This function computes the contact map from the coordinates of "atom *p". The contact matrix is stored in "int **matrix"
// Only the CA atoms are considered
//
// cutoff is the distance criterion for above which a pair of residues are considered as contacting
// alpha is the number of neighbor residues which must be ignored.
//
// March 04, 2014
int contact_map_ca( int total_atm, atom *p, int alpha, float cutoff, int **matrix){
int i,j;
int total_contacts;
total_contacts=0;
for( i=1 ; i<=total_atm ; i++){
for( j=(i+1); j<=total_atm ; j++ ){
if( strcmp(p[i].atm_name,"CA")==0 && strcmp(p[j].atm_name,"CA")==0 ){
if( p[j].res_num > (p[i].res_num + alpha) ){
if( distance(p, i ,j) <= cutoff ){
total_contacts++;
matrix[ p[i].res_num ][ p[j].res_num ]++;
}
}
}
}
}
return total_contacts;
}
// This function computes the contact map from the coordinates of "atom *p". The contact matrix is stored in "int **matrix"
//
// cutoff is the distance criterion for above which a pair of residues are considered as contacting
// alpha is the number of neighbor residues which must be ignored.
//
// March 04, 2014
int contact_map( int total_atm, atom *p, int alpha, float cutoff, int **matrix){
int i,j;
int total_contacts;
total_contacts=0;
for( i=1 ; i<=total_atm ; i++){
for( j=(i+1); j<=total_atm ; j++ ){
if( p[j].res_num > (p[i].res_num + alpha) ){
if( distance(p, i ,j) <= cutoff ){
total_contacts++;
matrix[ p[i].res_num ][ p[j].res_num ]++;
}
}
}
}
return total_contacts;
}
// Function for dynamic memory allocation of an array of the "atom" struct
//
// March 04, 2014
atom *allocate_atom( atom *p , int total_atm ){
p = (atom *) malloc( (total_atm+1) * sizeof(atom) );
return p;
}
// Function for memory liberation of a previously allocated array of the "atom" struct
//
// March 04, 2014
void free_atom( atom *p , int total_atm){
free(p);
}
// Function for dynamic memory allocation of a 1D vector of integers
//
// May 25, 2014
int *allocate_int( int *a , int size){
a = (int *) malloc( (size+1) * sizeof(int) );
return a;
}
// Function for memory liberation of a previously allocated 1D vector of integers
//
// May 25, 2014
void free_int( int *a , int size ){
free( a );
}
// Function for initializing a 1D vector of integers
//
// May 25, 2014
void initialize_int( int *a , int size , int value){
int i;
for( i=0 ; i<=size ; i++ ){
a[i] = value ;
}
}
// Function for dynamic memory allocation of a 1D vector of floats
//
// May 25, 2014
float *allocate_float( float *a , int size){
a = (float *) malloc( (size+1) * sizeof(float) );
return a;
}
// Function for memory liberation of a previously allocated 1D vector of integers
//
// May 25, 2014
void free_float( float *a , int size ){
free( a );
}
// Function for initializing a 1D vector of integers
//
// May 25, 2014
void initialize_float( float *a , int size , float value){
int i;
for( i=0 ; i<=size ; i++ ){
a[i] = value ;
}
}
// Function for dynamic memory allocation of a 2D matrix of integers
//
// March 04, 2014
int **allocate_int_2d( int **a , int rows, int columns ){
int i;
a = (int **) malloc( (rows+1) * sizeof(int *) );
for( i=0 ; i<=rows ; i++ ) a[i] = (int *) malloc( (columns+1) * sizeof(int) );
return a;
}
// Function for memory liberation of a previously allocated 2D matrix of integers
//
// March 04, 2014
void free_int_2d( int **a , int rows, int columns ){
int i;
for( i=0 ; i<=rows ; i++ ) free( a[i] );
free( a );
}
// Function for initializing a 2D matrix of integers
//
// March 04, 2014
void initialize_int_2d( int **a , int rows, int columns , int value){
int i, j;
for( i=0 ; i<=rows ; i++ ){
for( j=0 ; j<=columns ; j++ ){
a[i][j] = value ;
}
}
}
// Function for dynamic memory allocation of a 3D matrix of integers
//
// May 02, 2014
int ***allocate_int_3d( int ***a , int x, int y, int z){
int i,j;
a = (int ***) malloc( (x+1) * sizeof(int) );
for( i=0 ; i<=x ; i++ ) a[i] = (int **) malloc( (y+1) * sizeof(int) );
for( i=0 ; i<=x ; i++ ){
for( j=0 ; j<=y ; j++ ) a[i][j] = (int *) malloc( (z+1) * sizeof(int) );
}
return a;
}
// Function for memory liberation of a previously allocated 2D matrix of integers
//
// May 02, 2014
void free_int_3d( int ***a , int x, int y, int z ){
int i,j;
for( i=0 ; i<=x ; i++ ){
for( j=0 ; j<=y ; j++ ) free( a[i][j] );
}
for( i=0 ; i<=x ; i++ ) free( a[i] );
free( a );
}
// Function for initializing a 3D matrix of integers
//
// May 02, 2014
void initialize_int_3d( int ***a , int x, int y , int z, int value){
int i,j,k;
for( i=0 ; i<=x ; i++ ){
for( j=0 ; j<=y ; j++ ){
for( k=0 ; k<=z ; k++ ){
a[i][j][k] = value ;
printf("%d\t%d\t%d\n",i,j,k);
}
}
}
}
| 22.723971
| 239
| 0.609909
|
[
"vector",
"model",
"3d"
] |
450a4b0bab288ce3bdc0bc7efded57809f7135fc
| 688
|
h
|
C
|
LeetCode/C++/0554._Brick_Wall/solution.h
|
icgw/LeetCode
|
cb70ca87aa4604d1aec83d4224b3489eacebba75
|
[
"MIT"
] | 4
|
2018-09-12T09:32:17.000Z
|
2018-12-06T03:17:38.000Z
|
LeetCode/C++/0554._Brick_Wall/solution.h
|
icgw/algorithm
|
cb70ca87aa4604d1aec83d4224b3489eacebba75
|
[
"MIT"
] | null | null | null |
LeetCode/C++/0554._Brick_Wall/solution.h
|
icgw/algorithm
|
cb70ca87aa4604d1aec83d4224b3489eacebba75
|
[
"MIT"
] | null | null | null |
/*
* solution.h
* Copyright (C) 2020 Guowei Chen <[email protected]>
*
* Distributed under terms of the GPL license.
*/
#ifndef _SOLUTION_H_
#define _SOLUTION_H_
#include <vector>
using std::vector;
#include <map>
using std::map;
#include <algorithm>
using std::min;
class Solution {
public:
int leastBricks(vector<vector<int>>& wall) {
map<int, int> brickEdges;
int height = wall.size();
int minCross = height;
for (auto& w : wall) {
int width = 0;
for (int i = 0; i < w.size() - 1; ++i) {
width += w[i];
minCross = min(minCross, height - (++brickEdges[width]));
}
}
return minCross;
}
};
#endif /* !_SOLUTION_H_ */
| 18.594595
| 65
| 0.601744
|
[
"vector"
] |
4512560cc99f122da13b985b8189f6850e4dad31
| 49,597
|
c
|
C
|
src/cmdio/linux/opu_process.c
|
chisuhua/codrive
|
9b4cd8649004d1bee1fe90a49a41e8f44f2bb5be
|
[
"MIT"
] | null | null | null |
src/cmdio/linux/opu_process.c
|
chisuhua/codrive
|
9b4cd8649004d1bee1fe90a49a41e8f44f2bb5be
|
[
"MIT"
] | null | null | null |
src/cmdio/linux/opu_process.c
|
chisuhua/codrive
|
9b4cd8649004d1bee1fe90a49a41e8f44f2bb5be
|
[
"MIT"
] | null | null | null |
#include <linux/mutex.h>
#include <linux/log2.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/mmu_context.h>
#include <linux/slab.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/compat.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/pm_runtime.h>
#include "amdgpu_amdopu.h"
#include "amdgpu.h"
struct mm_struct;
#include "opu_priv.h"
#include "opu_device_queue_manager.h"
#include "opu_dbgmgr.h"
#include "opu_iommu.h"
#include "opu_svm.h"
/*
* List of struct opu_process (field opu_process).
* Unique/indexed by mm_struct*
*/
DEFINE_HASHTABLE(opu_processes_table, OPU_PROCESS_TABLE_SIZE);
static DEFINE_MUTEX(opu_processes_mutex);
DEFINE_SRCU(opu_processes_srcu);
/* For process termination handling */
static struct workqueue_struct *opu_process_wq;
/* Ordered, single-threaded workqueue for restoring evicted
* processes. Restoring multiple processes concurrently under memory
* pressure can lead to processes blocking each other from validating
* their BOs and result in a live-lock situation where processes
* remain evicted indefinitely.
*/
static struct workqueue_struct *opu_restore_wq;
static struct opu_process *find_process(const struct task_struct *thread);
static void opu_process_ref_release(struct kref *ref);
static struct opu_process *create_process(const struct task_struct *thread);
static int opu_process_init_cwsr_apu(struct opu_process *p, struct file *filep);
static void evict_process_worker(struct work_struct *work);
static void restore_process_worker(struct work_struct *work);
struct opu_procfs_tree {
struct kobject *kobj;
};
static struct opu_procfs_tree procfs;
/*
* Structure for SDMA activity tracking
*/
struct opu_sdma_activity_handler_workarea {
struct work_struct sdma_activity_work;
struct opu_process_device *pdd;
uint64_t sdma_activity_counter;
};
struct temp_sdma_queue_list {
uint64_t __user *rptr;
uint64_t sdma_val;
unsigned int queue_id;
struct list_head list;
};
static void opu_sdma_activity_worker(struct work_struct *work)
{
struct opu_sdma_activity_handler_workarea *workarea;
struct opu_process_device *pdd;
uint64_t val;
struct mm_struct *mm;
struct queue *q;
struct qcm_process_device *qpd;
struct device_queue_manager *dqm;
int ret = 0;
struct temp_sdma_queue_list sdma_q_list;
struct temp_sdma_queue_list *sdma_q, *next;
workarea = container_of(work, struct opu_sdma_activity_handler_workarea,
sdma_activity_work);
pdd = workarea->pdd;
if (!pdd)
return;
dqm = pdd->dev->dqm;
qpd = &pdd->qpd;
if (!dqm || !qpd)
return;
/*
* Total SDMA activity is current SDMA activity + past SDMA activity
* Past SDMA count is stored in pdd.
* To get the current activity counters for all active SDMA queues,
* we loop over all SDMA queues and get their counts from user-space.
*
* We cannot call get_user() with dqm_lock held as it can cause
* a circular lock dependency situation. To read the SDMA stats,
* we need to do the following:
*
* 1. Create a temporary list of SDMA queue nodes from the qpd->queues_list,
* with dqm_lock/dqm_unlock().
* 2. Call get_user() for each node in temporary list without dqm_lock.
* Save the SDMA count for each node and also add the count to the total
* SDMA count counter.
* Its possible, during this step, a few SDMA queue nodes got deleted
* from the qpd->queues_list.
* 3. Do a second pass over qpd->queues_list to check if any nodes got deleted.
* If any node got deleted, its SDMA count would be captured in the sdma
* past activity counter. So subtract the SDMA counter stored in step 2
* for this node from the total SDMA count.
*/
INIT_LIST_HEAD(&sdma_q_list.list);
/*
* Create the temp list of all SDMA queues
*/
dqm_lock(dqm);
list_for_each_entry(q, &qpd->queues_list, list) {
if ((q->properties.type != OPU_QUEUE_TYPE_SDMA) &&
(q->properties.type != OPU_QUEUE_TYPE_SDMA_XGMI))
continue;
sdma_q = kzalloc(sizeof(struct temp_sdma_queue_list), GFP_KERNEL);
if (!sdma_q) {
dqm_unlock(dqm);
goto cleanup;
}
INIT_LIST_HEAD(&sdma_q->list);
sdma_q->rptr = (uint64_t __user *)q->properties.read_ptr;
sdma_q->queue_id = q->properties.queue_id;
list_add_tail(&sdma_q->list, &sdma_q_list.list);
}
/*
* If the temp list is empty, then no SDMA queues nodes were found in
* qpd->queues_list. Return the past activity count as the total sdma
* count
*/
if (list_empty(&sdma_q_list.list)) {
workarea->sdma_activity_counter = pdd->sdma_past_activity_counter;
dqm_unlock(dqm);
return;
}
dqm_unlock(dqm);
/*
* Get the usage count for each SDMA queue in temp_list.
*/
mm = get_task_mm(pdd->process->lead_thread);
if (!mm)
goto cleanup;
kthread_use_mm(mm);
list_for_each_entry(sdma_q, &sdma_q_list.list, list) {
val = 0;
ret = read_sdma_queue_counter(sdma_q->rptr, &val);
if (ret) {
pr_debug("Failed to read SDMA queue active counter for queue id: %d",
sdma_q->queue_id);
} else {
sdma_q->sdma_val = val;
workarea->sdma_activity_counter += val;
}
}
kthread_unuse_mm(mm);
mmput(mm);
/*
* Do a second iteration over qpd_queues_list to check if any SDMA
* nodes got deleted while fetching SDMA counter.
*/
dqm_lock(dqm);
workarea->sdma_activity_counter += pdd->sdma_past_activity_counter;
list_for_each_entry(q, &qpd->queues_list, list) {
if (list_empty(&sdma_q_list.list))
break;
if ((q->properties.type != OPU_QUEUE_TYPE_SDMA) &&
(q->properties.type != OPU_QUEUE_TYPE_SDMA_XGMI))
continue;
list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
if (((uint64_t __user *)q->properties.read_ptr == sdma_q->rptr) &&
(sdma_q->queue_id == q->properties.queue_id)) {
list_del(&sdma_q->list);
kfree(sdma_q);
break;
}
}
}
dqm_unlock(dqm);
/*
* If temp list is not empty, it implies some queues got deleted
* from qpd->queues_list during SDMA usage read. Subtract the SDMA
* count for each node from the total SDMA count.
*/
list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
workarea->sdma_activity_counter -= sdma_q->sdma_val;
list_del(&sdma_q->list);
kfree(sdma_q);
}
return;
cleanup:
list_for_each_entry_safe(sdma_q, next, &sdma_q_list.list, list) {
list_del(&sdma_q->list);
kfree(sdma_q);
}
}
/**
* @opu_get_cu_occupancy - Collect number of waves in-flight on this device
* by current process. Translates acquired wave count into number of compute units
* that are occupied.
*
* @atr: Handle of attribute that allows reporting of wave count. The attribute
* handle encapsulates GPU device it is associated with, thereby allowing collection
* of waves in flight, etc
*
* @buffer: Handle of user provided buffer updated with wave count
*
* Return: Number of bytes written to user buffer or an error value
*/
static int opu_get_cu_occupancy(struct attribute *attr, char *buffer)
{
int cu_cnt;
int wave_cnt;
int max_waves_per_cu;
struct opu_dev *dev = NULL;
struct opu_process *proc = NULL;
struct opu_process_device *pdd = NULL;
pdd = container_of(attr, struct opu_process_device, attr_cu_occupancy);
dev = pdd->dev;
if (dev->opu2kgd->get_cu_occupancy == NULL)
return -EINVAL;
cu_cnt = 0;
proc = pdd->process;
if (pdd->qpd.queue_count == 0) {
pr_debug("Gpu-Id: %d has no active queues for process %d\n",
dev->id, proc->pasid);
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
}
/* Collect wave count from device if it supports */
wave_cnt = 0;
max_waves_per_cu = 0;
dev->opu2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt,
&max_waves_per_cu);
/* Translate wave count to number of compute units */
cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
}
static ssize_t opu_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
if (strcmp(attr->name, "pasid") == 0) {
struct opu_process *p = container_of(attr, struct opu_process,
attr_pasid);
return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
} else if (strncmp(attr->name, "vram_", 5) == 0) {
struct opu_process_device *pdd = container_of(attr, struct opu_process_device,
attr_vram);
return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
} else if (strncmp(attr->name, "sdma_", 5) == 0) {
struct opu_process_device *pdd = container_of(attr, struct opu_process_device,
attr_sdma);
struct opu_sdma_activity_handler_workarea sdma_activity_work_handler;
INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
opu_sdma_activity_worker);
sdma_activity_work_handler.pdd = pdd;
sdma_activity_work_handler.sdma_activity_counter = 0;
schedule_work(&sdma_activity_work_handler.sdma_activity_work);
flush_work(&sdma_activity_work_handler.sdma_activity_work);
return snprintf(buffer, PAGE_SIZE, "%llu\n",
(sdma_activity_work_handler.sdma_activity_counter)/
SDMA_ACTIVITY_DIVISOR);
} else {
pr_err("Invalid attribute");
return -EINVAL;
}
return 0;
}
static void opu_procfs_kobj_release(struct kobject *kobj)
{
kfree(kobj);
}
static const struct sysfs_ops opu_procfs_ops = {
.show = opu_procfs_show,
};
static struct kobj_type procfs_type = {
.release = opu_procfs_kobj_release,
.sysfs_ops = &opu_procfs_ops,
};
void opu_procfs_init(void)
{
int ret = 0;
procfs.kobj = opu_alloc_struct(procfs.kobj);
if (!procfs.kobj)
return;
ret = kobject_init_and_add(procfs.kobj, &procfs_type,
&opu_device->kobj, "proc");
if (ret) {
pr_warn("Could not create procfs proc folder");
/* If we fail to create the procfs, clean up */
opu_procfs_shutdown();
}
}
void opu_procfs_shutdown(void)
{
if (procfs.kobj) {
kobject_del(procfs.kobj);
kobject_put(procfs.kobj);
procfs.kobj = NULL;
}
}
static ssize_t opu_procfs_queue_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
struct queue *q = container_of(kobj, struct queue, kobj);
if (!strcmp(attr->name, "size"))
return snprintf(buffer, PAGE_SIZE, "%llu",
q->properties.queue_size);
else if (!strcmp(attr->name, "type"))
return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
else if (!strcmp(attr->name, "gpuid"))
return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
else
pr_err("Invalid attribute");
return 0;
}
static ssize_t opu_procfs_stats_show(struct kobject *kobj,
struct attribute *attr, char *buffer)
{
if (strcmp(attr->name, "evicted_ms") == 0) {
struct opu_process_device *pdd = container_of(attr,
struct opu_process_device,
attr_evict);
uint64_t evict_jiffies;
evict_jiffies = atomic64_read(&pdd->evict_duration_counter);
return snprintf(buffer,
PAGE_SIZE,
"%llu\n",
jiffies64_to_msecs(evict_jiffies));
/* Sysfs handle that gets CU occupancy is per device */
} else if (strcmp(attr->name, "cu_occupancy") == 0) {
return opu_get_cu_occupancy(attr, buffer);
} else {
pr_err("Invalid attribute");
}
return 0;
}
static ssize_t opu_sysfs_counters_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct opu_process_device *pdd;
if (!strcmp(attr->name, "faults")) {
pdd = container_of(attr, struct opu_process_device,
attr_faults);
return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
}
if (!strcmp(attr->name, "page_in")) {
pdd = container_of(attr, struct opu_process_device,
attr_page_in);
return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
}
if (!strcmp(attr->name, "page_out")) {
pdd = container_of(attr, struct opu_process_device,
attr_page_out);
return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
}
return 0;
}
static struct attribute attr_queue_size = {
.name = "size",
.mode = OPU_SYSFS_FILE_MODE
};
static struct attribute attr_queue_type = {
.name = "type",
.mode = OPU_SYSFS_FILE_MODE
};
static struct attribute attr_queue_gpuid = {
.name = "gpuid",
.mode = OPU_SYSFS_FILE_MODE
};
static struct attribute *procfs_queue_attrs[] = {
&attr_queue_size,
&attr_queue_type,
&attr_queue_gpuid,
NULL
};
static const struct sysfs_ops procfs_queue_ops = {
.show = opu_procfs_queue_show,
};
static struct kobj_type procfs_queue_type = {
.sysfs_ops = &procfs_queue_ops,
.default_attrs = procfs_queue_attrs,
};
static const struct sysfs_ops procfs_stats_ops = {
.show = opu_procfs_stats_show,
};
static struct kobj_type procfs_stats_type = {
.sysfs_ops = &procfs_stats_ops,
.release = opu_procfs_kobj_release,
};
static const struct sysfs_ops sysfs_counters_ops = {
.show = opu_sysfs_counters_show,
};
static struct kobj_type sysfs_counters_type = {
.sysfs_ops = &sysfs_counters_ops,
.release = opu_procfs_kobj_release,
};
int opu_procfs_add_queue(struct queue *q)
{
struct opu_process *proc;
int ret;
if (!q || !q->process)
return -EINVAL;
proc = q->process;
/* Create proc/<pid>/queues/<queue id> folder */
if (!proc->kobj_queues)
return -EFAULT;
ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
proc->kobj_queues, "%u", q->properties.queue_id);
if (ret < 0) {
pr_warn("Creating proc/<pid>/queues/%u failed",
q->properties.queue_id);
kobject_put(&q->kobj);
return ret;
}
return 0;
}
static void opu_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
char *name)
{
int ret;
if (!kobj || !attr || !name)
return;
attr->name = name;
attr->mode = OPU_SYSFS_FILE_MODE;
sysfs_attr_init(attr);
ret = sysfs_create_file(kobj, attr);
if (ret)
pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
}
static void opu_procfs_add_sysfs_stats(struct opu_process *p)
{
int ret;
int i;
char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
if (!p || !p->kobj)
return;
/*
* Create sysfs files for each GPU:
* - proc/<pid>/stats_<gpuid>/
* - proc/<pid>/stats_<gpuid>/evicted_ms
* - proc/<pid>/stats_<gpuid>/cu_occupancy
*/
for (i = 0; i < p->n_pdds; i++) {
struct opu_process_device *pdd = p->pdds[i];
snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
"stats_%u", pdd->dev->id);
pdd->kobj_stats = opu_alloc_struct(pdd->kobj_stats);
if (!pdd->kobj_stats)
return;
ret = kobject_init_and_add(pdd->kobj_stats,
&procfs_stats_type,
p->kobj,
stats_dir_filename);
if (ret) {
pr_warn("Creating OPU proc/stats_%s folder failed",
stats_dir_filename);
kobject_put(pdd->kobj_stats);
pdd->kobj_stats = NULL;
return;
}
opu_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
"evicted_ms");
/* Add sysfs file to report compute unit occupancy */
if (pdd->dev->opu2kgd->get_cu_occupancy)
opu_sysfs_create_file(pdd->kobj_stats,
&pdd->attr_cu_occupancy,
"cu_occupancy");
}
}
static void opu_procfs_add_sysfs_counters(struct opu_process *p)
{
int ret = 0;
int i;
char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
if (!p || !p->kobj)
return;
/*
* Create sysfs files for each GPU which supports SVM
* - proc/<pid>/counters_<gpuid>/
* - proc/<pid>/counters_<gpuid>/faults
* - proc/<pid>/counters_<gpuid>/page_in
* - proc/<pid>/counters_<gpuid>/page_out
*/
for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
struct opu_process_device *pdd = p->pdds[i];
struct kobject *kobj_counters;
snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
"counters_%u", pdd->dev->id);
kobj_counters = opu_alloc_struct(kobj_counters);
if (!kobj_counters)
return;
ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
p->kobj, counters_dir_filename);
if (ret) {
pr_warn("Creating OPU proc/%s folder failed",
counters_dir_filename);
kobject_put(kobj_counters);
return;
}
pdd->kobj_counters = kobj_counters;
opu_sysfs_create_file(kobj_counters, &pdd->attr_faults,
"faults");
opu_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
"page_in");
opu_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
"page_out");
}
}
static void opu_procfs_add_sysfs_files(struct opu_process *p)
{
int i;
if (!p || !p->kobj)
return;
/*
* Create sysfs files for each GPU:
* - proc/<pid>/vram_<gpuid>
* - proc/<pid>/sdma_<gpuid>
*/
for (i = 0; i < p->n_pdds; i++) {
struct opu_process_device *pdd = p->pdds[i];
snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
pdd->dev->id);
opu_sysfs_create_file(p->kobj, &pdd->attr_vram,
pdd->vram_filename);
snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
pdd->dev->id);
opu_sysfs_create_file(p->kobj, &pdd->attr_sdma,
pdd->sdma_filename);
}
}
void opu_procfs_del_queue(struct queue *q)
{
if (!q)
return;
kobject_del(&q->kobj);
kobject_put(&q->kobj);
}
int opu_process_create_wq(void)
{
if (!opu_process_wq)
opu_process_wq = alloc_workqueue("opu_process_wq", 0, 0);
if (!opu_restore_wq)
opu_restore_wq = alloc_ordered_workqueue("opu_restore_wq", 0);
if (!opu_process_wq || !opu_restore_wq) {
opu_process_destroy_wq();
return -ENOMEM;
}
return 0;
}
void opu_process_destroy_wq(void)
{
if (opu_process_wq) {
destroy_workqueue(opu_process_wq);
opu_process_wq = NULL;
}
if (opu_restore_wq) {
destroy_workqueue(opu_restore_wq);
opu_restore_wq = NULL;
}
}
static void opu_process_free_gpuvm(struct kgd_mem *mem,
struct opu_process_device *pdd)
{
struct opu_dev *dev = pdd->dev;
amdgpu_amdopu_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv);
amdgpu_amdopu_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv,
NULL);
}
/* opu_process_alloc_gpuvm - Allocate GPU VM for the OPU process
* This function should be only called right after the process
* is created and when opu_processes_mutex is still being held
* to avoid concurrency. Because of that exclusiveness, we do
* not need to take p->mutex.
*/
static int opu_process_alloc_gpuvm(struct opu_process_device *pdd,
uint64_t gpu_va, uint32_t size,
uint32_t flags, void **kptr)
{
struct opu_dev *kdev = pdd->dev;
struct kgd_mem *mem = NULL;
int handle;
int err;
err = amdgpu_amdopu_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
pdd->drm_priv, &mem, NULL, flags);
if (err)
goto err_alloc_mem;
err = amdgpu_amdopu_gpuvm_map_memory_to_gpu(kdev->kgd, mem,
pdd->drm_priv, NULL);
if (err)
goto err_map_mem;
err = amdgpu_amdopu_gpuvm_sync_memory(kdev->kgd, mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
/* Create an obj handle so opu_process_device_remove_obj_handle
* will take care of the bo removal when the process finishes.
* We do not need to take p->mutex, because the process is just
* created and the ioctls have not had the chance to run.
*/
handle = opu_process_device_create_obj_handle(pdd, mem);
if (handle < 0) {
err = handle;
goto free_gpuvm;
}
if (kptr) {
err = amdgpu_amdopu_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
(struct kgd_mem *)mem, kptr, NULL);
if (err) {
pr_debug("Map GTT BO to kernel failed\n");
goto free_obj_handle;
}
}
return err;
free_obj_handle:
opu_process_device_remove_obj_handle(pdd, handle);
free_gpuvm:
sync_memory_failed:
opu_process_free_gpuvm(mem, pdd);
return err;
err_map_mem:
amdgpu_amdopu_gpuvm_free_memory_of_gpu(kdev->kgd, mem, pdd->drm_priv,
NULL);
err_alloc_mem:
*kptr = NULL;
return err;
}
/* opu_process_device_reserve_ib_mem - Reserve memory inside the
* process for IB usage The memory reserved is for OPU to submit
* IB to AMDGPU from kernel. If the memory is reserved
* successfully, ib_kaddr will have the CPU/kernel
* address. Check ib_kaddr before accessing the memory.
*/
static int opu_process_device_reserve_ib_mem(struct opu_process_device *pdd)
{
struct qcm_process_device *qpd = &pdd->qpd;
uint32_t flags = OPU_IOC_ALLOC_MEM_FLAGS_GTT |
OPU_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
OPU_IOC_ALLOC_MEM_FLAGS_WRITABLE |
OPU_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
void *kaddr;
int ret;
if (qpd->ib_kaddr || !qpd->ib_base)
return 0;
/* ib_base is only set for dGPU */
ret = opu_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
&kaddr);
if (ret)
return ret;
qpd->ib_kaddr = kaddr;
return 0;
}
struct opu_process *opu_create_process(struct file *filep)
{
struct opu_process *process;
struct task_struct *thread = current;
int ret;
if (!thread->mm)
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
if (thread->group_leader->mm != thread->mm)
return ERR_PTR(-EINVAL);
/*
* take opu processes mutex before starting of process creation
* so there won't be a case where two threads of the same process
* create two opu_process structures
*/
mutex_lock(&opu_processes_mutex);
/* A prior open of /dev/opu could have already created the process. */
process = find_process(thread);
if (process) {
pr_debug("Process already found\n");
} else {
process = create_process(thread);
if (IS_ERR(process))
goto out;
ret = opu_process_init_cwsr_apu(process, filep);
if (ret)
goto out_destroy;
if (!procfs.kobj)
goto out;
process->kobj = opu_alloc_struct(process->kobj);
if (!process->kobj) {
pr_warn("Creating procfs kobject failed");
goto out;
}
ret = kobject_init_and_add(process->kobj, &procfs_type,
procfs.kobj, "%d",
(int)process->lead_thread->pid);
if (ret) {
pr_warn("Creating procfs pid directory failed");
kobject_put(process->kobj);
goto out;
}
opu_sysfs_create_file(process->kobj, &process->attr_pasid,
"pasid");
process->kobj_queues = kobject_create_and_add("queues",
process->kobj);
if (!process->kobj_queues)
pr_warn("Creating OPU proc/queues folder failed");
opu_procfs_add_sysfs_stats(process);
opu_procfs_add_sysfs_files(process);
opu_procfs_add_sysfs_counters(process);
}
out:
if (!IS_ERR(process))
kref_get(&process->ref);
mutex_unlock(&opu_processes_mutex);
return process;
out_destroy:
hash_del_rcu(&process->opu_processes);
mutex_unlock(&opu_processes_mutex);
synchronize_srcu(&opu_processes_srcu);
/* opu_process_free_notifier will trigger the cleanup */
mmu_notifier_put(&process->mmu_notifier);
return ERR_PTR(ret);
}
struct opu_process *opu_get_process(const struct task_struct *thread)
{
struct opu_process *process;
if (!thread->mm)
return ERR_PTR(-EINVAL);
/* Only the pthreads threading model is supported. */
if (thread->group_leader->mm != thread->mm)
return ERR_PTR(-EINVAL);
process = find_process(thread);
if (!process)
return ERR_PTR(-EINVAL);
return process;
}
static struct opu_process *find_process_by_mm(const struct mm_struct *mm)
{
struct opu_process *process;
hash_for_each_possible_rcu(opu_processes_table, process,
opu_processes, (uintptr_t)mm)
if (process->mm == mm)
return process;
return NULL;
}
static struct opu_process *find_process(const struct task_struct *thread)
{
struct opu_process *p;
int idx;
idx = srcu_read_lock(&opu_processes_srcu);
p = find_process_by_mm(thread->mm);
srcu_read_unlock(&opu_processes_srcu, idx);
return p;
}
void opu_unref_process(struct opu_process *p)
{
kref_put(&p->ref, opu_process_ref_release);
}
static void opu_process_device_free_bos(struct opu_process_device *pdd)
{
struct opu_process *p = pdd->process;
void *mem;
int id;
int i;
/*
* Remove all handles from idr and release appropriate
* local memory object
*/
idr_for_each_entry(&pdd->alloc_idr, mem, id) {
for (i = 0; i < p->n_pdds; i++) {
struct opu_process_device *peer_pdd = p->pdds[i];
if (!peer_pdd->drm_priv)
continue;
amdgpu_amdopu_gpuvm_unmap_memory_from_gpu(
peer_pdd->dev->kgd, mem, peer_pdd->drm_priv);
}
amdgpu_amdopu_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem,
pdd->drm_priv, NULL);
opu_process_device_remove_obj_handle(pdd, id);
}
}
static void opu_process_free_outstanding_opu_bos(struct opu_process *p)
{
int i;
for (i = 0; i < p->n_pdds; i++)
opu_process_device_free_bos(p->pdds[i]);
}
static void opu_process_destroy_pdds(struct opu_process *p)
{
int i;
for (i = 0; i < p->n_pdds; i++) {
struct opu_process_device *pdd = p->pdds[i];
pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
pdd->dev->id, p->pasid);
if (pdd->drm_file) {
amdgpu_amdopu_gpuvm_release_process_vm(
pdd->dev->kgd, pdd->drm_priv);
fput(pdd->drm_file);
}
if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
get_order(OPU_CWSR_TBA_TMA_SIZE));
kfree(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr);
opu_free_process_doorbells(pdd->dev, pdd->doorbell_index);
/*
* before destroying pdd, make sure to report availability
* for auto suspend
*/
if (pdd->runtime_inuse) {
pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
pdd->runtime_inuse = false;
}
kfree(pdd);
p->pdds[i] = NULL;
}
p->n_pdds = 0;
}
static void opu_process_remove_sysfs(struct opu_process *p)
{
struct opu_process_device *pdd;
int i;
if (!p->kobj)
return;
sysfs_remove_file(p->kobj, &p->attr_pasid);
kobject_del(p->kobj_queues);
kobject_put(p->kobj_queues);
p->kobj_queues = NULL;
for (i = 0; i < p->n_pdds; i++) {
pdd = p->pdds[i];
sysfs_remove_file(p->kobj, &pdd->attr_vram);
sysfs_remove_file(p->kobj, &pdd->attr_sdma);
sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
if (pdd->dev->opu2kgd->get_cu_occupancy)
sysfs_remove_file(pdd->kobj_stats,
&pdd->attr_cu_occupancy);
kobject_del(pdd->kobj_stats);
kobject_put(pdd->kobj_stats);
pdd->kobj_stats = NULL;
}
for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
pdd = p->pdds[i];
sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
kobject_del(pdd->kobj_counters);
kobject_put(pdd->kobj_counters);
pdd->kobj_counters = NULL;
}
kobject_del(p->kobj);
kobject_put(p->kobj);
p->kobj = NULL;
}
/* No process locking is needed in this function, because the process
* is not findable any more. We must assume that no other thread is
* using it any more, otherwise we couldn't safely free the process
* structure in the end.
*/
static void opu_process_wq_release(struct work_struct *work)
{
struct opu_process *p = container_of(work, struct opu_process,
release_work);
opu_process_remove_sysfs(p);
opu_iommu_unbind_process(p);
opu_process_free_outstanding_opu_bos(p);
svm_range_list_fini(p);
opu_process_destroy_pdds(p);
dma_fence_put(p->ef);
opu_event_free_process(p);
opu_pasid_free(p->pasid);
mutex_destroy(&p->mutex);
put_task_struct(p->lead_thread);
kfree(p);
}
static void opu_process_ref_release(struct kref *ref)
{
struct opu_process *p = container_of(ref, struct opu_process, ref);
INIT_WORK(&p->release_work, opu_process_wq_release);
queue_work(opu_process_wq, &p->release_work);
}
static struct mmu_notifier *opu_process_alloc_notifier(struct mm_struct *mm)
{
int idx = srcu_read_lock(&opu_processes_srcu);
struct opu_process *p = find_process_by_mm(mm);
srcu_read_unlock(&opu_processes_srcu, idx);
return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
}
static void opu_process_free_notifier(struct mmu_notifier *mn)
{
opu_unref_process(container_of(mn, struct opu_process, mmu_notifier));
}
static void opu_process_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct opu_process *p;
int i;
/*
* The opu_process structure can not be free because the
* mmu_notifier srcu is read locked
*/
p = container_of(mn, struct opu_process, mmu_notifier);
if (WARN_ON(p->mm != mm))
return;
mutex_lock(&opu_processes_mutex);
hash_del_rcu(&p->opu_processes);
mutex_unlock(&opu_processes_mutex);
synchronize_srcu(&opu_processes_srcu);
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
cancel_delayed_work_sync(&p->svms.restore_work);
mutex_lock(&p->mutex);
/* Iterate over all process device data structures and if the
* pdd is in debug mode, we should first force unregistration,
* then we will be able to destroy the queues
*/
for (i = 0; i < p->n_pdds; i++) {
struct opu_dev *dev = p->pdds[i]->dev;
mutex_lock(opu_get_dbgmgr_mutex());
if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
if (!opu_dbgmgr_unregister(dev->dbgmgr, p)) {
opu_dbgmgr_destroy(dev->dbgmgr);
dev->dbgmgr = NULL;
}
}
mutex_unlock(opu_get_dbgmgr_mutex());
}
opu_process_dequeue_from_all_devices(p);
pqm_uninit(&p->pqm);
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
/* Signal the eviction fence after user mode queues are
* destroyed. This allows any BOs to be freed without
* triggering pointless evictions or waiting for fences.
*/
dma_fence_signal(p->ef);
mutex_unlock(&p->mutex);
mmu_notifier_put(&p->mmu_notifier);
}
static const struct mmu_notifier_ops opu_process_mmu_notifier_ops = {
.release = opu_process_notifier_release,
.alloc_notifier = opu_process_alloc_notifier,
.free_notifier = opu_process_free_notifier,
};
static int opu_process_init_cwsr_apu(struct opu_process *p, struct file *filep)
{
unsigned long offset;
int i;
for (i = 0; i < p->n_pdds; i++) {
struct opu_dev *dev = p->pdds[i]->dev;
struct qcm_process_device *qpd = &p->pdds[i]->qpd;
if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
continue;
offset = OPU_MMAP_TYPE_RESERVED_MEM | OPU_MMAP_GPU_ID(dev->id);
qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
OPU_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
MAP_SHARED, offset);
if (IS_ERR_VALUE(qpd->tba_addr)) {
int err = qpd->tba_addr;
pr_err("Failure to set tba address. error %d.\n", err);
qpd->tba_addr = 0;
qpd->cwsr_kaddr = NULL;
return err;
}
memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
qpd->tma_addr = qpd->tba_addr + OPU_CWSR_TMA_OFFSET;
pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
}
return 0;
}
static int opu_process_device_init_cwsr_dgpu(struct opu_process_device *pdd)
{
struct opu_dev *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
uint32_t flags = OPU_IOC_ALLOC_MEM_FLAGS_GTT
| OPU_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
| OPU_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
void *kaddr;
int ret;
if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
return 0;
/* cwsr_base is only set for dGPU */
ret = opu_process_alloc_gpuvm(pdd, qpd->cwsr_base,
OPU_CWSR_TBA_TMA_SIZE, flags, &kaddr);
if (ret)
return ret;
qpd->cwsr_kaddr = kaddr;
qpd->tba_addr = qpd->cwsr_base;
memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
qpd->tma_addr = qpd->tba_addr + OPU_CWSR_TMA_OFFSET;
pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
return 0;
}
void opu_process_set_trap_handler(struct qcm_process_device *qpd,
uint64_t tba_addr,
uint64_t tma_addr)
{
if (qpd->cwsr_kaddr) {
/* OPU trap handler is bound, record as second-level TBA/TMA
* in first-level TMA. First-level trap will jump to second.
*/
uint64_t *tma =
(uint64_t *)(qpd->cwsr_kaddr + OPU_CWSR_TMA_OFFSET);
tma[0] = tba_addr;
tma[1] = tma_addr;
} else {
/* No trap handler bound, bind as first-level TBA/TMA. */
qpd->tba_addr = tba_addr;
qpd->tma_addr = tma_addr;
}
}
bool opu_process_xnack_mode(struct opu_process *p, bool supported)
{
int i;
/* On most GFXv9 GPUs, the retry mode in the SQ must match the
* boot time retry setting. Mixing processes with different
* XNACK/retry settings can hang the GPU.
*
* Different GPUs can have different noretry settings depending
* on HW bugs or limitations. We need to find at least one
* XNACK mode for this process that's compatible with all GPUs.
* Fortunately GPUs with retry enabled (noretry=0) can run code
* built for XNACK-off. On GFXv9 it may perform slower.
*
* Therefore applications built for XNACK-off can always be
* supported and will be our fallback if any GPU does not
* support retry.
*/
for (i = 0; i < p->n_pdds; i++) {
struct opu_dev *dev = p->pdds[i]->dev;
/* Only consider GFXv9 and higher GPUs. Older GPUs don't
* support the SVM APIs and don't need to be considered
* for the XNACK mode selection.
*/
if (dev->device_info->asic_family < CHIP_VEGA10)
continue;
/* Aldebaran can always support XNACK because it can support
* per-process XNACK mode selection. But let the dev->noretry
* setting still influence the default XNACK mode.
*/
if (supported &&
dev->device_info->asic_family == CHIP_ALDEBARAN)
continue;
/* GFXv10 and later GPUs do not support shader preemption
* during page faults. This can lead to poor QoS for queue
* management and memory-manager-related preemptions or
* even deadlocks.
*/
if (dev->device_info->asic_family >= CHIP_NAVI10)
return false;
if (dev->noretry)
return false;
}
return true;
}
/*
* On return the opu_process is fully operational and will be freed when the
* mm is released
*/
static struct opu_process *create_process(const struct task_struct *thread)
{
struct opu_process *process;
struct mmu_notifier *mn;
int err = -ENOMEM;
process = kzalloc(sizeof(*process), GFP_KERNEL);
if (!process)
goto err_alloc_process;
kref_init(&process->ref);
mutex_init(&process->mutex);
process->mm = thread->mm;
process->lead_thread = thread->group_leader;
process->n_pdds = 0;
INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
process->last_restore_timestamp = get_jiffies_64();
opu_event_init_process(process);
process->is_32bit_user_mode = in_compat_syscall();
process->pasid = opu_pasid_alloc();
if (process->pasid == 0)
goto err_alloc_pasid;
err = pqm_init(&process->pqm, process);
if (err != 0)
goto err_process_pqm_init;
/* init process apertures*/
err = opu_init_apertures(process);
if (err != 0)
goto err_init_apertures;
/* Check XNACK support after PDDs are created in opu_init_apertures */
process->xnack_enabled = opu_process_xnack_mode(process, false);
err = svm_range_list_init(process);
if (err)
goto err_init_svm_range_list;
/* alloc_notifier needs to find the process in the hash table */
hash_add_rcu(opu_processes_table, &process->opu_processes,
(uintptr_t)process->mm);
/* MMU notifier registration must be the last call that can fail
* because after this point we cannot unwind the process creation.
* After this point, mmu_notifier_put will trigger the cleanup by
* dropping the last process reference in the free_notifier.
*/
mn = mmu_notifier_get(&opu_process_mmu_notifier_ops, process->mm);
if (IS_ERR(mn)) {
err = PTR_ERR(mn);
goto err_register_notifier;
}
BUG_ON(mn != &process->mmu_notifier);
get_task_struct(process->lead_thread);
return process;
err_register_notifier:
hash_del_rcu(&process->opu_processes);
svm_range_list_fini(process);
err_init_svm_range_list:
opu_process_free_outstanding_opu_bos(process);
opu_process_destroy_pdds(process);
err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
opu_pasid_free(process->pasid);
err_alloc_pasid:
mutex_destroy(&process->mutex);
kfree(process);
err_alloc_process:
return ERR_PTR(err);
}
static int init_doorbell_bitmap(struct qcm_process_device *qpd,
struct opu_dev *dev)
{
unsigned int i;
int range_start = dev->shared_resources.non_cp_doorbells_start;
int range_end = dev->shared_resources.non_cp_doorbells_end;
if (!OPU_IS_SOC15(dev->device_info->asic_family))
return 0;
qpd->doorbell_bitmap =
kzalloc(DIV_ROUND_UP(OPU_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL);
if (!qpd->doorbell_bitmap)
return -ENOMEM;
/* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
range_start + OPU_QUEUE_DOORBELL_MIRROR_OFFSET,
range_end + OPU_QUEUE_DOORBELL_MIRROR_OFFSET);
for (i = 0; i < OPU_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
if (i >= range_start && i <= range_end) {
set_bit(i, qpd->doorbell_bitmap);
set_bit(i + OPU_QUEUE_DOORBELL_MIRROR_OFFSET,
qpd->doorbell_bitmap);
}
}
return 0;
}
struct opu_process_device *opu_get_process_device_data(struct opu_dev *dev,
struct opu_process *p)
{
int i;
for (i = 0; i < p->n_pdds; i++)
if (p->pdds[i]->dev == dev)
return p->pdds[i];
return NULL;
}
struct opu_process_device *opu_create_process_device_data(struct opu_dev *dev,
struct opu_process *p)
{
struct opu_process_device *pdd = NULL;
if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
return NULL;
pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
if (!pdd)
return NULL;
if (opu_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
pr_err("Failed to alloc doorbell for pdd\n");
goto err_free_pdd;
}
if (init_doorbell_bitmap(&pdd->qpd, dev)) {
pr_err("Failed to init doorbell for process\n");
goto err_free_pdd;
}
pdd->dev = dev;
INIT_LIST_HEAD(&pdd->qpd.queues_list);
INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
pdd->qpd.dqm = dev->dqm;
pdd->qpd.pqm = &p->pqm;
pdd->qpd.evicted = 0;
pdd->qpd.mapped_gws_queue = false;
pdd->process = p;
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
pdd->vram_usage = 0;
pdd->sdma_past_activity_counter = 0;
atomic64_set(&pdd->evict_duration_counter, 0);
p->pdds[p->n_pdds++] = pdd;
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
return pdd;
err_free_pdd:
kfree(pdd);
return NULL;
}
/**
* opu_process_device_init_vm - Initialize a VM for a process-device
*
* @pdd: The process-device
* @drm_file: Optional pointer to a DRM file descriptor
*
* If @drm_file is specified, it will be used to acquire the VM from
* that file descriptor. If successful, the @pdd takes ownership of
* the file descriptor.
*
* If @drm_file is NULL, a new VM is created.
*
* Returns 0 on success, -errno on failure.
*/
int opu_process_device_init_vm(struct opu_process_device *pdd,
struct file *drm_file)
{
struct opu_process *p;
struct opu_dev *dev;
int ret;
if (!drm_file)
return -EINVAL;
if (pdd->drm_priv)
return -EBUSY;
p = pdd->process;
dev = pdd->dev;
ret = amdgpu_amdopu_gpuvm_acquire_process_vm(
dev->kgd, drm_file, p->pasid,
&p->kgd_process_info, &p->ef);
if (ret) {
pr_err("Failed to create process VM object\n");
return ret;
}
pdd->drm_priv = drm_file->private_data;
ret = opu_process_device_reserve_ib_mem(pdd);
if (ret)
goto err_reserve_ib_mem;
ret = opu_process_device_init_cwsr_dgpu(pdd);
if (ret)
goto err_init_cwsr;
pdd->drm_file = drm_file;
return 0;
err_init_cwsr:
err_reserve_ib_mem:
opu_process_device_free_bos(pdd);
pdd->drm_priv = NULL;
return ret;
}
/*
* Direct the IOMMU to bind the process (specifically the pasid->mm)
* to the device.
* Unbinding occurs when the process dies or the device is removed.
*
* Assumes that the process lock is held.
*/
struct opu_process_device *opu_bind_process_to_device(struct opu_dev *dev,
struct opu_process *p)
{
struct opu_process_device *pdd;
int err;
pdd = opu_get_process_device_data(dev, p);
if (!pdd) {
pr_err("Process device data doesn't exist\n");
return ERR_PTR(-ENOMEM);
}
if (!pdd->drm_priv)
return ERR_PTR(-ENODEV);
/*
* signal runtime-pm system to auto resume and prevent
* further runtime suspend once device pdd is created until
* pdd is destroyed.
*/
if (!pdd->runtime_inuse) {
err = pm_runtime_get_sync(dev->ddev->dev);
if (err < 0) {
pm_runtime_put_autosuspend(dev->ddev->dev);
return ERR_PTR(err);
}
}
err = opu_iommu_bind_process_to_device(pdd);
if (err)
goto out;
/*
* make sure that runtime_usage counter is incremented just once
* per pdd
*/
pdd->runtime_inuse = true;
return pdd;
out:
/* balance runpm reference count and exit with error */
if (!pdd->runtime_inuse) {
pm_runtime_mark_last_busy(dev->ddev->dev);
pm_runtime_put_autosuspend(dev->ddev->dev);
}
return ERR_PTR(err);
}
/* Create specific handle mapped to mem from process local memory idr
* Assumes that the process lock is held.
*/
int opu_process_device_create_obj_handle(struct opu_process_device *pdd,
void *mem)
{
return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
}
/* Translate specific handle from process local memory idr
* Assumes that the process lock is held.
*/
void *opu_process_device_translate_handle(struct opu_process_device *pdd,
int handle)
{
if (handle < 0)
return NULL;
return idr_find(&pdd->alloc_idr, handle);
}
/* Remove specific handle from process local memory idr
* Assumes that the process lock is held.
*/
void opu_process_device_remove_obj_handle(struct opu_process_device *pdd,
int handle)
{
if (handle >= 0)
idr_remove(&pdd->alloc_idr, handle);
}
/* This increments the process->ref counter. */
struct opu_process *opu_lookup_process_by_pasid(u32 pasid)
{
struct opu_process *p, *ret_p = NULL;
unsigned int temp;
int idx = srcu_read_lock(&opu_processes_srcu);
hash_for_each_rcu(opu_processes_table, temp, p, opu_processes) {
if (p->pasid == pasid) {
kref_get(&p->ref);
ret_p = p;
break;
}
}
srcu_read_unlock(&opu_processes_srcu, idx);
return ret_p;
}
/* This increments the process->ref counter. */
struct opu_process *opu_lookup_process_by_mm(const struct mm_struct *mm)
{
struct opu_process *p;
int idx = srcu_read_lock(&opu_processes_srcu);
p = find_process_by_mm(mm);
if (p)
kref_get(&p->ref);
srcu_read_unlock(&opu_processes_srcu, idx);
return p;
}
/* opu_process_evict_queues - Evict all user queues of a process
*
* Eviction is reference-counted per process-device. This means multiple
* evictions from different sources can be nested safely.
*/
int opu_process_evict_queues(struct opu_process *p)
{
int r = 0;
int i;
unsigned int n_evicted = 0;
for (i = 0; i < p->n_pdds; i++) {
struct opu_process_device *pdd = p->pdds[i];
r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
&pdd->qpd);
if (r) {
pr_err("Failed to evict process queues\n");
goto fail;
}
n_evicted++;
}
return r;
fail:
/* To keep state consistent, roll back partial eviction by
* restoring queues
*/
for (i = 0; i < p->n_pdds; i++) {
struct opu_process_device *pdd = p->pdds[i];
if (n_evicted == 0)
break;
if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
&pdd->qpd))
pr_err("Failed to restore queues\n");
n_evicted--;
}
return r;
}
/* opu_process_restore_queues - Restore all user queues of a process */
int opu_process_restore_queues(struct opu_process *p)
{
int r, ret = 0;
int i;
for (i = 0; i < p->n_pdds; i++) {
struct opu_process_device *pdd = p->pdds[i];
r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
&pdd->qpd);
if (r) {
pr_err("Failed to restore process queues\n");
if (!ret)
ret = r;
}
}
return ret;
}
int opu_process_gpuidx_from_gpuid(struct opu_process *p, uint32_t gpu_id)
{
int i;
for (i = 0; i < p->n_pdds; i++)
if (p->pdds[i] && gpu_id == p->pdds[i]->dev->id)
return i;
return -EINVAL;
}
int
opu_process_gpuid_from_kgd(struct opu_process *p, struct amdgpu_device *adev,
uint32_t *gpuid, uint32_t *gpuidx)
{
struct kgd_dev *kgd = (struct kgd_dev *)adev;
int i;
for (i = 0; i < p->n_pdds; i++)
if (p->pdds[i] && p->pdds[i]->dev->kgd == kgd) {
*gpuid = p->pdds[i]->dev->id;
*gpuidx = i;
return 0;
}
return -EINVAL;
}
static void evict_process_worker(struct work_struct *work)
{
int ret;
struct opu_process *p;
struct delayed_work *dwork;
dwork = to_delayed_work(work);
/* Process termination destroys this worker thread. So during the
* lifetime of this thread, opu_process p will be valid
*/
p = container_of(dwork, struct opu_process, eviction_work);
WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
"Eviction fence mismatch\n");
/* Narrow window of overlap between restore and evict work
* item is possible. Once amdgpu_amdopu_gpuvm_restore_process_bos
* unreserves OPU BOs, it is possible to evicted again. But
* restore has few more steps of finish. So lets wait for any
* previous restore work to complete
*/
flush_delayed_work(&p->restore_work);
pr_debug("Started evicting pasid 0x%x\n", p->pasid);
ret = opu_process_evict_queues(p);
if (!ret) {
dma_fence_signal(p->ef);
dma_fence_put(p->ef);
p->ef = NULL;
queue_delayed_work(opu_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
} else
pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
}
static void restore_process_worker(struct work_struct *work)
{
struct delayed_work *dwork;
struct opu_process *p;
int ret = 0;
dwork = to_delayed_work(work);
/* Process termination destroys this worker thread. So during the
* lifetime of this thread, opu_process p will be valid
*/
p = container_of(dwork, struct opu_process, restore_work);
pr_debug("Started restoring pasid 0x%x\n", p->pasid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
* before OPU BOs are unreserved. If not, the process can be evicted
* again before the timestamp is set.
* If restore fails, the timestamp will be set again in the next
* attempt. This would mean that the minimum GPU quanta would be
* PROCESS_ACTIVE_TIME_MS - (time to execute the following two
* functions)
*/
p->last_restore_timestamp = get_jiffies_64();
ret = amdgpu_amdopu_gpuvm_restore_process_bos(p->kgd_process_info,
&p->ef);
if (ret) {
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
ret = queue_delayed_work(opu_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
WARN(!ret, "reschedule restore work failed\n");
return;
}
ret = opu_process_restore_queues(p);
if (!ret)
pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
else
pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
}
void opu_suspend_all_processes(void)
{
struct opu_process *p;
unsigned int temp;
int idx = srcu_read_lock(&opu_processes_srcu);
WARN(debug_evictions, "Evicting all processes");
hash_for_each_rcu(opu_processes_table, temp, p, opu_processes) {
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
if (opu_process_evict_queues(p))
pr_err("Failed to suspend process 0x%x\n", p->pasid);
dma_fence_signal(p->ef);
dma_fence_put(p->ef);
p->ef = NULL;
}
srcu_read_unlock(&opu_processes_srcu, idx);
}
int opu_resume_all_processes(void)
{
struct opu_process *p;
unsigned int temp;
int ret = 0, idx = srcu_read_lock(&opu_processes_srcu);
hash_for_each_rcu(opu_processes_table, temp, p, opu_processes) {
if (!queue_delayed_work(opu_restore_wq, &p->restore_work, 0)) {
pr_err("Restore process %d failed during resume\n",
p->pasid);
ret = -EFAULT;
}
}
srcu_read_unlock(&opu_processes_srcu, idx);
return ret;
}
int opu_reserved_mem_mmap(struct opu_dev *dev, struct opu_process *process,
struct vm_area_struct *vma)
{
struct opu_process_device *pdd;
struct qcm_process_device *qpd;
if ((vma->vm_end - vma->vm_start) != OPU_CWSR_TBA_TMA_SIZE) {
pr_err("Incorrect CWSR mapping size.\n");
return -EINVAL;
}
pdd = opu_get_process_device_data(dev, process);
if (!pdd)
return -EINVAL;
qpd = &pdd->qpd;
qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(OPU_CWSR_TBA_TMA_SIZE));
if (!qpd->cwsr_kaddr) {
pr_err("Error allocating per process CWSR buffer.\n");
return -ENOMEM;
}
vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
/* Mapping pages to user process */
return remap_pfn_range(vma, vma->vm_start,
PFN_DOWN(__pa(qpd->cwsr_kaddr)),
OPU_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
}
void opu_flush_tlb(struct opu_process_device *pdd, enum TLB_FLUSH_TYPE type)
{
struct opu_dev *dev = pdd->dev;
if (dev->dqm->sched_policy == OPU_SCHED_POLICY_NO_HWS) {
/* Nothing to flush until a VMID is assigned, which
* only happens when the first queue is created.
*/
if (pdd->qpd.vmid)
amdgpu_amdopu_flush_gpu_tlb_vmid(dev->kgd,
pdd->qpd.vmid);
} else {
amdgpu_amdopu_flush_gpu_tlb_pasid(dev->kgd,
pdd->process->pasid, type);
}
}
#if defined(CONFIG_DEBUG_FS)
int opu_debugfs_mqds_by_process(struct seq_file *m, void *data)
{
struct opu_process *p;
unsigned int temp;
int r = 0;
int idx = srcu_read_lock(&opu_processes_srcu);
hash_for_each_rcu(opu_processes_table, temp, p, opu_processes) {
seq_printf(m, "Process %d PASID 0x%x:\n",
p->lead_thread->tgid, p->pasid);
mutex_lock(&p->mutex);
r = pqm_debugfs_mqds(m, &p->pqm);
mutex_unlock(&p->mutex);
if (r)
break;
}
srcu_read_unlock(&opu_processes_srcu, idx);
return r;
}
#endif
| 25.899217
| 84
| 0.716354
|
[
"object",
"model"
] |
451931fa2d8c92283447b0cd35b69dcfc8ab4ace
| 466
|
h
|
C
|
src/annealing/include/module.h
|
ctlab/rmwcs
|
354d47304e0d605906037fb6c6285363e1b34fba
|
[
"Apache-2.0"
] | 7
|
2017-09-06T09:55:00.000Z
|
2022-02-25T03:57:13.000Z
|
src/annealing/include/module.h
|
ctlab/rmwcs
|
354d47304e0d605906037fb6c6285363e1b34fba
|
[
"Apache-2.0"
] | 2
|
2021-11-14T00:58:01.000Z
|
2021-11-21T14:46:17.000Z
|
src/annealing/include/module.h
|
ctlab/rmwcs
|
354d47304e0d605906037fb6c6285363e1b34fba
|
[
"Apache-2.0"
] | 2
|
2020-10-18T02:23:30.000Z
|
2021-12-21T10:55:05.000Z
|
#ifndef ANNEALING_MODULE_H
#define ANNEALING_MODULE_H
#include "../../include/graph.h"
using namespace mwcsr;
namespace annealing {
class Module {
std::vector<size_t> vs;
std::vector<Edge> es;
public:
Module();
Module(const Graph& g, const std::vector<size_t>& vertices, const std::vector<size_t>& edges);
std::vector<size_t> vertices();
std::vector<Edge> edges();
};
}
#endif //ANNEALING_MODULE_H
| 18.64
| 102
| 0.630901
|
[
"vector"
] |
451b422b2a89bfb6374f0efebb3563eccd5a2fbc
| 1,112
|
h
|
C
|
GitThreadHelper.h
|
mooming/gitlfslockhelper
|
303331a191693b47d2f147e56df59e19d86e45b7
|
[
"MIT"
] | null | null | null |
GitThreadHelper.h
|
mooming/gitlfslockhelper
|
303331a191693b47d2f147e56df59e19d86e45b7
|
[
"MIT"
] | null | null | null |
GitThreadHelper.h
|
mooming/gitlfslockhelper
|
303331a191693b47d2f147e56df59e19d86e45b7
|
[
"MIT"
] | null | null | null |
#pragma once
#include <functional>
#include <mutex>
#include <string>
#include <thread>
#include <vector>
namespace Git
{
using namespace std;
class MultiThreadTask
{
mutex lockObj;
size_t capacity;
vector<thread> pool;
public:
MultiThreadTask(size_t capacity)
: lockObj()
, capacity(capacity)
{
pool.reserve(capacity);
}
void Add(std::function<bool(const string& str)> func, const string& str)
{
lock_guard<mutex> lock(lockObj);
if (pool.size() >= capacity)
{
WaitForComplete_NeedLock();
}
pool.emplace_back([func](const string& str) { func(str); }, str);
}
void WaitForComplete()
{
lock_guard<mutex> lock(lockObj);
WaitForComplete_NeedLock();
}
private:
void WaitForComplete_NeedLock()
{
for (auto& thread : pool)
{
thread.join();
}
pool.clear();
}
};
}
| 18.847458
| 80
| 0.496403
|
[
"vector"
] |
451fae40b56ea1bd67714a604ae5e7a2ed930687
| 1,888
|
h
|
C
|
src/Sparrow/Sparrow/Implementations/Dftb/Utils/SkfParser.h
|
qcscine/sparrow
|
387e56ed8da78e10d96861758c509f7c375dcf07
|
[
"BSD-3-Clause"
] | 45
|
2019-06-12T20:04:00.000Z
|
2022-02-28T21:43:54.000Z
|
src/Sparrow/Sparrow/Implementations/Dftb/Utils/SkfParser.h
|
qcscine/sparrow
|
387e56ed8da78e10d96861758c509f7c375dcf07
|
[
"BSD-3-Clause"
] | 12
|
2019-06-12T23:53:57.000Z
|
2022-03-28T18:35:57.000Z
|
src/Sparrow/Sparrow/Implementations/Dftb/Utils/SkfParser.h
|
qcscine/sparrow
|
387e56ed8da78e10d96861758c509f7c375dcf07
|
[
"BSD-3-Clause"
] | 11
|
2019-06-22T22:52:51.000Z
|
2022-03-11T16:59:59.000Z
|
/**
* @file
* @copyright This code is licensed under the 3-clause BSD license.\n
* Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.\n
* See LICENSE.txt for details.
*/
#ifndef INCLUDE_SPARROW_IMPLEMENTATIONS_DFTB_UTILS_SKF_PARSER_H
#define INCLUDE_SPARROW_IMPLEMENTATIONS_DFTB_UTILS_SKF_PARSER_H
#include "Sparrow/Implementations/Dftb/Utils/RepulsionParameters.h"
#include "boost/optional.hpp"
#include <array>
#include <string>
#include <unordered_map>
namespace Scine {
namespace Sparrow {
namespace dftb {
struct SkfData {
struct SameElementLine {
// On-site energies for angular momenta d, p, s
double Ed;
double Ep;
double Es;
// spin polarisation error for calculating formation energies
double SPE;
// Hubbard U values for appropriate angular momenta
double Ud;
double Up;
double Us;
// Occupations for the neutral atom
unsigned fd;
unsigned fp;
unsigned fs;
};
using DoublesList = std::vector<double>;
using IntegralTable = std::array<DoublesList, 28>;
/*! @brief Parses a SKF file
*
* Resolves forwarding.
*/
static SkfData read(const std::string& filename);
double gridDistance;
boost::optional<SameElementLine> atomicParameters;
IntegralTable integralTable;
RepulsionParameters repulsion;
};
struct SkfSpinConstants {
using MatrixType = std::array<std::array<double, 3>, 3>;
using MapType = std::unordered_map<int, MatrixType>;
MapType map;
static SkfSpinConstants read(const std::string& filename);
void patch(SkfSpinConstants other);
};
struct SkfHubbardDerivatives {
using MapType = std::unordered_map<int, double>;
MapType map;
static SkfHubbardDerivatives read(const std::string& filename);
void patch(SkfHubbardDerivatives other);
};
} // namespace dftb
} // namespace Sparrow
} // namespace Scine
#endif
| 24.205128
| 85
| 0.725106
|
[
"vector"
] |
45240537fce102d3ddbc87ba545059ed5a8d858a
| 2,192
|
h
|
C
|
include/al/controller/PadRumbleDirector.h
|
3096/starlight
|
7056a81cbf9756b68392be0cadfab5c27570de54
|
[
"MIT"
] | 385
|
2019-05-09T21:46:48.000Z
|
2022-02-18T18:19:40.000Z
|
include/al/controller/PadRumbleDirector.h
|
3096/starlight
|
7056a81cbf9756b68392be0cadfab5c27570de54
|
[
"MIT"
] | 11
|
2019-05-20T01:17:38.000Z
|
2022-01-19T18:03:18.000Z
|
include/al/controller/PadRumbleDirector.h
|
3096/starlight
|
7056a81cbf9756b68392be0cadfab5c27570de54
|
[
"MIT"
] | 36
|
2019-05-09T22:29:29.000Z
|
2021-12-27T18:20:32.000Z
|
/**
* @file PadRumbleDirector.h
* @brief Manager for pad rumble.
*/
#pragma once
#include "types.h"
#include "PadRumbleParam.h"
#include "sead/vector.h"
namespace al
{
class CameraDirector;
class PlayerHolder;
class WaveVibrationHolder;
class PadRumbleDirector
{
public:
PadRumbleDirector(al::PlayerHolder const *, al::CameraDirector const *);
void setWaveVibrationHolder(al::WaveVibrationHolder *);
void update();
void updateInfoListAll();
void startRumble(char const *, sead::Vector3<f32> const &, al::PadRumbleParam const &, s32);
void findDeadInfoOneTime();
void startRumbleNo3D(char const *, al::PadRumbleParam const &, s32);
void stopPadRumbleOneTime(char const *, s32);
void startRumbleLoop(char const *, sead::Vector3<f32> const *, al::PadRumbleParam const &, s32);
void findDeadInfo();
void startRumbleLoopNo3D(char const *, sead::Vector3<f32> const *, al::PadRumbleParam const &, s32);
void stopRumbleLoop(char const *, sead::Vector3<f32> const *, s32);
u64* findInfo(char const *, sead::Vector3<f32> const *, s32);
bool checkIsAliveRumbleLoop(char const *, sead::Vector3<f32> const *, s32);
void stopAllRumble();
void clearAllInfoList();
void pause();
void endPause();
void changeRumbleLoopVolume(char const *, sead::Vector3<f32> const *, f32, f32, s32);
void updateInfoListLoop();
void changeRumbleLoopPitch(char const *, sead::Vector3<f32> const *, f32, f32, s32);
void startRumbleWithVolume(char const *, f32, f32, s32);
void startRumbleDirectValue(f32, f32, f32, f32, f32, f32, s32);
void stopRumbleDirectValue(s32);
void updateInfoListOneTime();
void testStartPadRumbleWithVolumeNoActor(char const *, f32, f32);
al::WaveVibrationHolder* mVibrationHolder; // _0
al::PlayerHolder* mPlayerHolder; // _ 8
al::CameraDirector* mCameraDirector; // _10
u64 _18;
u64 _20;
u64 _28;
u32 _30;
u32 _34;
u8 _38;
u8 mIsPaused; // _39
u8 _3A[0x70-0x3A];
};
};
| 35.934426
| 108
| 0.637318
|
[
"vector"
] |
4530c97c0268cb4108915a29b4fec12dc84f5283
| 1,603
|
h
|
C
|
export/windows/cpp/obj/include/flixel/util/FlxSort.h
|
TinyPlanetStudios/Project-Crash-Land
|
365f196be4212602d32251566f26b53fb70693f6
|
[
"MIT"
] | null | null | null |
export/windows/cpp/obj/include/flixel/util/FlxSort.h
|
TinyPlanetStudios/Project-Crash-Land
|
365f196be4212602d32251566f26b53fb70693f6
|
[
"MIT"
] | null | null | null |
export/windows/cpp/obj/include/flixel/util/FlxSort.h
|
TinyPlanetStudios/Project-Crash-Land
|
365f196be4212602d32251566f26b53fb70693f6
|
[
"MIT"
] | null | null | null |
// Generated by Haxe 3.3.0
#ifndef INCLUDED_flixel_util_FlxSort
#define INCLUDED_flixel_util_FlxSort
#ifndef HXCPP_H
#include <hxcpp.h>
#endif
HX_DECLARE_CLASS1(flixel,FlxBasic)
HX_DECLARE_CLASS1(flixel,FlxObject)
HX_DECLARE_CLASS2(flixel,util,FlxSort)
HX_DECLARE_CLASS2(flixel,util,IFlxDestroyable)
namespace flixel{
namespace util{
class HXCPP_CLASS_ATTRIBUTES FlxSort_obj : public hx::Object
{
public:
typedef hx::Object super;
typedef FlxSort_obj OBJ_;
FlxSort_obj();
public:
void __construct();
inline void *operator new(size_t inSize, bool inContainer=false,const char *inName="flixel.util.FlxSort")
{ return hx::Object::operator new(inSize,inContainer,inName); }
inline void *operator new(size_t inSize, int extra)
{ return hx::Object::operator new(inSize+extra,false,"flixel.util.FlxSort"); }
static hx::ObjectPtr< FlxSort_obj > __new();
static Dynamic __CreateEmpty();
static Dynamic __Create(hx::DynamicArray inArgs);
//~FlxSort_obj();
HX_DO_RTTI_ALL;
static bool __GetStatic(const ::String &inString, Dynamic &outValue, hx::PropertyAccess inCallProp);
static void __register();
::String __ToString() const { return HX_HCSTRING("FlxSort","\xd0","\x60","\xb0","\xdc"); }
static void __boot();
static Int ASCENDING;
static Int DESCENDING;
static Int byY(Int Order, ::flixel::FlxObject Obj1, ::flixel::FlxObject Obj2);
static ::Dynamic byY_dyn();
static Int byValues(Int Order,Float Value1,Float Value2);
static ::Dynamic byValues_dyn();
};
} // end namespace flixel
} // end namespace util
#endif /* INCLUDED_flixel_util_FlxSort */
| 28.625
| 107
| 0.74922
|
[
"object"
] |
453212e034885d6f4cbec83c74b8530aed1f9eed
| 2,675
|
h
|
C
|
test/performance-regression/full-apps/qmcpack/src/Numerics/RadialOrbitalBase.h
|
JKChenFZ/hclib
|
50970656ac133477c0fbe80bb674fe88a19d7177
|
[
"BSD-3-Clause"
] | 55
|
2015-07-28T01:32:58.000Z
|
2022-02-27T16:27:46.000Z
|
test/performance-regression/full-apps/qmcpack/src/Numerics/RadialOrbitalBase.h
|
JKChenFZ/hclib
|
50970656ac133477c0fbe80bb674fe88a19d7177
|
[
"BSD-3-Clause"
] | 66
|
2015-06-15T20:38:19.000Z
|
2020-08-26T00:11:43.000Z
|
test/performance-regression/full-apps/qmcpack/src/Numerics/RadialOrbitalBase.h
|
JKChenFZ/hclib
|
50970656ac133477c0fbe80bb674fe88a19d7177
|
[
"BSD-3-Clause"
] | 26
|
2015-10-26T22:11:51.000Z
|
2021-03-02T22:09:15.000Z
|
//////////////////////////////////////////////////////////////////
// (c) Copyright 2005- by Jeongnim Kim
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
// National Center for Supercomputing Applications &
// Materials Computation Center
// University of Illinois, Urbana-Champaign
// Urbana, IL 61801
// e-mail: [email protected]
// Tel: 217-244-6319 (NCSA) 217-333-3324 (MCC)
//
// Supported by
// National Center for Supercomputing Applications, UIUC
// Materials Computation Center, UIUC
//////////////////////////////////////////////////////////////////
// -*- C++ -*-
/**@file RadialOrbitalBase.h
* @brief declare/define the base class for RadialOrbital
*/
#ifndef QMCPLUSPLUS_RADIALORBITAL_BASE_H
#define QMCPLUSPLUS_RADIALORBITAL_BASE_H
/** base class for RadialOrbital to facilitate maping between a group of radial functors to a numerical functor
*/
template<class T>
struct RadialOrbitalBase
{
inline RadialOrbitalBase() {}
virtual ~RadialOrbitalBase() {}
virtual T f(T r) const = 0;
virtual T df(T r) const = 0;
};
/** composite class that contains a number of radial functions that belong to a group.
*/
template<class T>
struct RadialOrbitalSet: public RadialOrbitalBase<T>
{
std::vector<RadialOrbitalBase<T>*> InFunc;
~RadialOrbitalSet()
{
typename std::vector<RadialOrbitalBase<T>*>::iterator it(InFunc.begin());
typename std::vector<RadialOrbitalBase<T>*>::iterator it_end(InFunc.end());
while(it != it_end)
{
delete *it;
++it;
}
}
inline
void addRadialOrbital(RadialOrbitalBase<T>* arad)
{
InFunc.push_back(arad);
}
inline T f(T r) const
{
typename std::vector<RadialOrbitalBase<T>*>::const_iterator it(InFunc.begin());
typename std::vector<RadialOrbitalBase<T>*>::const_iterator it_end(InFunc.end());
T res(0.0);
while(it != it_end)
{
res += (*it)->f(r);
++it;
}
return res;
}
inline T df(T r) const
{
typename std::vector<RadialOrbitalBase<T>*>::const_iterator it(InFunc.begin());
typename std::vector<RadialOrbitalBase<T>*>::const_iterator it_end(InFunc.end());
T res(0.0);
while(it != it_end)
{
res += (*it)->df(r);
++it;
}
return res;
}
};
#endif
/***************************************************************************
* $RCSfile$ $Author: jmcminis $
* $Revision: 5794 $ $Date: 2013-04-25 20:14:53 -0400 (Thu, 25 Apr 2013) $
* $Id: RadialOrbitalBase.h 5794 2013-04-26 00:14:53Z jmcminis $
***************************************************************************/
| 29.076087
| 111
| 0.559252
|
[
"vector"
] |
453d1fcd68534f4f05a119a92781bdb681008993
| 8,548
|
h
|
C
|
TT_CORE_SDK/interface/order.h
|
shatteringlass/TT_Samples
|
8b8a39f3495f07f6175d84fcf9828a5a54fa7100
|
[
"BSD-3-Clause"
] | 23
|
2019-05-24T18:20:42.000Z
|
2022-03-08T04:21:00.000Z
|
TT_CORE_SDK/interface/order.h
|
shatteringlass/TT_Samples
|
8b8a39f3495f07f6175d84fcf9828a5a54fa7100
|
[
"BSD-3-Clause"
] | 4
|
2019-11-18T13:27:48.000Z
|
2021-05-03T17:47:18.000Z
|
TT_CORE_SDK/interface/order.h
|
shatteringlass/TT_Samples
|
8b8a39f3495f07f6175d84fcf9828a5a54fa7100
|
[
"BSD-3-Clause"
] | 24
|
2019-02-21T16:58:10.000Z
|
2022-03-30T09:27:28.000Z
|
/***************************************************************************
*
* Unpublished Work Copyright (c) 2018-2020
* Trading Technologies International, Inc.
* All Rights Reserved Worldwide
*
* * * * S T R I C T L Y P R O P R I E T A R Y * * *
*
* WARNING: This program (or document) is unpublished, proprietary property
* of Trading Technologies International, Inc. and is to be maintained in
* strict confidence. Unauthorized reproduction, distribution or disclosure
* of this program (or document), or any program (or document) derived from
* it is prohibited by State and Federal law, and by local law outside of
* the U.S.
*
***************************************************************************/
#pragma once
#include "consts.h"
#include "enums/OrderType.h"
#include "enums/OrderSide.h"
#include "enums/TimeInForce.h"
#include "instrument.h"
#include "execution_report.h"
#include "reject_response.h"
#include "position.h"
#include "shared_ptr.h"
// Example computing the expire date in C++ for December 19, 2026.
// timespec now;
// clock_gettime(CLOCK_REALTIME, &now);
// tm utc, local;
// gmtime_r(&now.tv_sec, &utc);
// localtime_r(&now.tv_sec, &local);
// utc.tm_isdst = -1; // This assumes time_t unit is seconds. True
// time_t bias = now.tv_sec - mktime(&utc); // on all known systems.
// Bias is the offset in seconds from local time to UTC time.
// tm gtdate;
// gtdate.tm_isdst = -1; // Let local system decide.
// gtdate.tm_sec = gtdate.tm_min = gtdate.tm_hour = 0;
// gtdate.tm_mday = 19; // Day of the month (1-31).
// gtdate.tm_mon = 11; // Month of the year (0-11).
// gtdate.tm_year = 2026 - 1900; // Year since 1900.
// time_t seconds = mktime(>date) + bias; // Seconds since epoch UTC.
// The mktime function will take the local time into account.
// In order to convert to UTC add the bias.
// new_order.set_expire_date(seconds * 1000000000ULL); // Seconds to nanoseconds.
namespace ttsdk
{
//! \struct OrderProfile
//! \brief Definition of an order to provide when submitting
struct OrderProfile
{
uint64_t request_id = 0;
uint64_t price_subscription_id = 0;
OrderType type = OrderType::NotSet;
uint64_t expire_date = 0; // nanoseconds since the epoch UTC
OrderSide side = OrderSide::NotSet;
TimeInForce tif = TimeInForce::NotSet;
uint64_t account_id = 0;
char clearing_acct_override[128] = {0};
double price = NAN;
double trigger_price = NAN;
double quantity = NAN;
double display_quantity = NAN;
double minimum_quantity = NAN;
char text[128] = { 0 };
char text_a[128] = { 0 };
char text_b[128] = { 0 };
char text_c[128] = { 0 };
char text_tt[128] = { 0 };
char sender_sub_id[128] = { 0 };
//!< Time variables which can be populated to store on the outbound order
//! for performance measuring of order reactions to price updates
uint64_t server_price_time = 0; //!< Price Server timestamp (mdrc_recv_time);
uint64_t order_stimulus_received = 0; //!< User listener timestamp (order_stimulus_receieved_oc)
};
//! \struct OrderPrcQtyProfile
//! \brief Definition of an order price/qty change
struct OrderPrcQtyProfile
{
uint64_t request_id = 0;
uint64_t price_subscription_id = 0;
double price = NAN;
double quantity = NAN;
//!< Time variables which can be populated to store on the outbound order
//! for performance measuring of order reactions to price updates
uint64_t server_price_time = 0; //!< Price Server timestamp (mdrc_recv_time);
uint64_t order_stimulus_received = 0; //!< User listener timestamp (order_stimulus_receieved_oc)
};
class Order;
using OrderPtr = shared_ptr<Order>;
//! \class IOrderEventHandler
//! \brief Interface for listening to order events
//! \warning Events are delivered on a thread managed by the SDK. The number of
//! order delivery threads is set in the SDK options when initializing
//! the SDK. Users can do work on this thread since they control how many
//! threads there are and the threads are only for order events.
class IOrderEventHandler
{
public:
enum SendCode
{
SUCCESS,
UNKNOWN,
TIMEOUT,
INVALID_ACCOUNT_FOR_MARKET,
INVALID_INSTRUMENT,
NETWORK_FAILURE,
SDK_NOT_INITIALIZED,
ALREADY_IN_MARKET,
CHANGE_AFTER_CANCEL,
ORDER_NOT_WORKING,
PRICE_SUBSCRIPTION_REQUIRED,
ORDER_MISSING_REQUIRED_DATA,
MALFORMED,
ORDERBOOK_NOT_SYNCHRONIZED,
RISK_NOT_READY,
PRICE_SUBSCRIPTION_NOT_READY,
SYNTHETIC_ROUTING_NOT_AVAILABLE,
ORDER_MISSING_MARKET_DATA,
INVALID_ACCOUNT_ID,
RESTRICTED_ACCOUNT,
};
virtual ~IOrderEventHandler() noexcept = default;
//! \brief Callback delivering execution report messages
virtual void OnExecutionReport(OrderPtr order, ExecutionReportPtr execRpt) = 0;
//! \brief Callback delivering order reject messages
virtual void OnReject(OrderPtr order, RejectResponsePtr rejResp) = 0;
//! \brief Callback fired when a request delivery surpasses the timeout threshold
virtual void OnSendFailed(OrderPtr order, const OrderProfile& profile, const SendCode code) = 0;
//! \brief Callback fired when the unsubscribe request is complete and it is safe to
// destroy the handler object (sdk will no longer use it)
// orderId is null when unsubscribed from all order events
virtual void OnUnsubscribed(const char* orderId) = 0;
};
using IOrderEventHandlerPtr = IOrderEventHandler*;
//! \class Order
//! \brief an interface to interact with the order.
class Order : public shared_base
{
public:
explicit Order() {};
~Order() {};
virtual const char* GetOrderId() const noexcept = 0;
virtual void Subscribe(IOrderEventHandler& listener) noexcept = 0;
virtual void Unsubscribe() noexcept = 0;
virtual ExecutionReportPtr GetCurrentState() const noexcept = 0;
virtual InstrumentPtr GetInstrument() const noexcept = 0;
virtual void SendNew(const OrderProfile& profile) noexcept = 0;
virtual void SendChange(const OrderProfile& profile) noexcept = 0;
virtual void SendChange(const OrderPrcQtyProfile& profile) noexcept = 0;
virtual void SendCancel(const OrderProfile& profile) noexcept = 0;
private:
Order(const Order&) = delete;
Order& operator=(Order&) = delete;
Order(Order&&) = delete;
Order& operator=(Order&&) = delete;
};
//! \class IOrderBookEventHandler
//! \brief Interface for listening to order and position events. There is
//! one OrderBookEventHandler set in the SDK. All order events
//! will go to this handler unless a specific handler is set for
//! specific orders on an order by order basis.
//! \warning It is assumed this object will exist for the life of the SDK.
class IOrderBookEventHandler : public IOrderEventHandler
{
public:
virtual ~IOrderBookEventHandler() noexcept = default;
//! \brief Callback delivering position updates
virtual void OnPositionUpdate(const Position& updatedPosition) {};
//! \brief Indicates the given account have been synchronized with the realtime
//! streams, orders and positions have been downloaded.
virtual void OnAccountDownloadEnd(const uint64_t accountId) {};
//! \brief Indicates the given account orders and positions downloads
//! have failed and the account if not usable. This is a critical
//! failure.
virtual void OnAccountDownloadFailed(const uint64_t accountId, const char* message) {};
//! \brief Indicates all orders and positions for all accounts are
//! downloaded and synchronized with the realtime stream.
virtual void OnOrderBookDownloadEnd() {};
};
using IOrderBookEventHandlerPtr = IOrderBookEventHandler*;
}
| 40.899522
| 104
| 0.637108
|
[
"object"
] |
45411a280e7d16076328a520f4c73d80a3ea23a5
| 2,785
|
c
|
C
|
sparse_khatrirao_c.c
|
OsmanMalik/sparse-khatri-rao
|
e9965855ec20298e2c64f6c0af6dd62943113f0d
|
[
"MIT"
] | 1
|
2021-06-01T03:49:07.000Z
|
2021-06-01T03:49:07.000Z
|
sparse_khatrirao_c.c
|
OsmanMalik/sparse-khatri-rao
|
e9965855ec20298e2c64f6c0af6dd62943113f0d
|
[
"MIT"
] | null | null | null |
sparse_khatrirao_c.c
|
OsmanMalik/sparse-khatri-rao
|
e9965855ec20298e2c64f6c0af6dd62943113f0d
|
[
"MIT"
] | 1
|
2020-01-06T10:42:36.000Z
|
2020-01-06T10:42:36.000Z
|
/*
* SPARSE_KHATRIRAO_C.C
*
* Compute the Khatri-Rao product of a cell containing sparse matrices.
*
* C = sparse_khatrirao_c(A) returns the Khatri-Rao product C of the sparse
* matrices stored in the cell A.
*
* The latest version of this code is provided at
* https://github.com/OsmanMalik/sparse-khatri-rao
*
* There are no safety checks in this C code. Consider using the Matlab
* wrapper function provided in the link above.
*
* Please compile by running "mex sparse_khatrirao_c.c" in Matlab.
*
* */
/*
* Author: Osman Asif Malik
* Email: [email protected]
* Date: January 5, 2019
*
* */
#include <stdio.h>
#include "mex.h"
/* Declare global variables */
double **a, *b;
mwIndex **a_ir, **a_jc, *b_ir, *b_jc, *a_no_rows, b_no_rows, no_cols, cnt;
mwSize N;
/* Define function which recursively computes column in output matrix */
void compute_output_column(mwIndex c, mwIndex n, double x, mwIndex ind) {
double x_new;
mwIndex i, ind_new;
for(i = a_jc[n][c]; i < a_jc[n][c+1]; ++i) {
x_new = x*a[n][i];
ind_new = ind*a_no_rows[n] + a_ir[n][i];
if(n < N-1) {
compute_output_column(c, n+1, x_new, ind_new);
} else {
b[cnt] = x_new;
b_ir[cnt] = ind_new;
++cnt;
}
}
}
/* mex interface */
void mexFunction(int nlhs, mxArray *plhs[], int nrhs,
const mxArray *prhs[]) {
/* Declare other variables */
mwSize c, n, b_nnz;
/* Get input variables */
N = mxGetDimensions(prhs[0])[1];
a = malloc(N*sizeof(double *));
a_ir = malloc(N*sizeof(mwIndex *));
a_jc = malloc(N*sizeof(mwIndex *));
a_no_rows = malloc(N*sizeof(mwIndex));
for(n = 0; n < N; ++n) {
a[n] = mxGetPr(mxGetCell(prhs[0], n));
a_ir[n] = mxGetIr(mxGetCell(prhs[0], n));
a_jc[n] = mxGetJc(mxGetCell(prhs[0], n));
a_no_rows[n] = mxGetM(mxGetCell(prhs[0], n));
}
no_cols = mxGetN(mxGetCell(prhs[0], 1));
/* Compute no rows in output matrix */
b_no_rows = 1;
for(n = 0; n < N; ++n) {
b_no_rows *= a_no_rows[n];
}
/* Compute nnz in output matrix */
b_nnz = 1;
for(c = 0; c < no_cols; ++c) {
mwIndex prod = 1;
for(n = 0; n < N; ++n){
prod *= a_jc[n][c+1] - a_jc[n][c];
}
b_nnz += prod;
}
/* Create sparse output matrix */
plhs[0] = mxCreateSparse(b_no_rows, no_cols, b_nnz, mxREAL);
b = mxGetPr(plhs[0]);
b_ir = mxGetIr(plhs[0]);
b_jc = mxGetJc(plhs[0]);
/* Compute jc for output matrix */
b_jc[0] = 0;
for(c = 0; c < no_cols; ++c) {
mwIndex prod = 1;
for(n = 0; n < N; ++n) {
prod *= a_jc[n][c+1] - a_jc[n][c];
}
b_jc[c+1] = b_jc[c] + prod;
}
/* Compute non-zero elements and ir vector for output matrix */
cnt = 0;
for(c = 0; c < no_cols; ++c) {
compute_output_column(c, 0, 1.0, 0);
}
/* Free dynamically allocated memory */
free(a_no_rows);
free(a_jc);
free(a_ir);
free(a);
}
| 24.429825
| 76
| 0.619031
|
[
"vector"
] |
454579dcacb16d81f01872cd186881e166ddcfbf
| 208
|
h
|
C
|
src/ai/weed/balrog_boss_flying.h
|
sodomon2/Cavestory-nx
|
a65ce948c820b3c60b5a5252e5baba6b918d9ebd
|
[
"BSD-2-Clause"
] | 8
|
2018-04-03T23:06:33.000Z
|
2021-12-28T18:04:19.000Z
|
src/ai/weed/balrog_boss_flying.h
|
sodomon2/Cavestory-nx
|
a65ce948c820b3c60b5a5252e5baba6b918d9ebd
|
[
"BSD-2-Clause"
] | null | null | null |
src/ai/weed/balrog_boss_flying.h
|
sodomon2/Cavestory-nx
|
a65ce948c820b3c60b5a5252e5baba6b918d9ebd
|
[
"BSD-2-Clause"
] | 1
|
2020-07-31T00:23:27.000Z
|
2020-07-31T00:23:27.000Z
|
#ifndef __AIBALLROGFLY_H_
#define __AIBALLROGFLY_H_
#include "../../object.h"
void ai_balrog_boss_flying(Object *o);
void ondeath_balrog_boss_flying(Object *o);
void ai_balrog_shot_bounce(Object *o);
#endif
| 23.111111
| 43
| 0.798077
|
[
"object"
] |
454dd78a50a7b8e1d47083a3be176614b6253c42
| 5,255
|
h
|
C
|
update_engine/payload_consumer/install_plan.h
|
Keneral/asystem
|
df12381b72ef3d629c8efc61100cc8c714195320
|
[
"Unlicense"
] | null | null | null |
update_engine/payload_consumer/install_plan.h
|
Keneral/asystem
|
df12381b72ef3d629c8efc61100cc8c714195320
|
[
"Unlicense"
] | null | null | null |
update_engine/payload_consumer/install_plan.h
|
Keneral/asystem
|
df12381b72ef3d629c8efc61100cc8c714195320
|
[
"Unlicense"
] | null | null | null |
//
// Copyright (C) 2011 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef UPDATE_ENGINE_PAYLOAD_CONSUMER_INSTALL_PLAN_H_
#define UPDATE_ENGINE_PAYLOAD_CONSUMER_INSTALL_PLAN_H_
#include <string>
#include <vector>
#include <base/macros.h>
#include <brillo/secure_blob.h>
#include "update_engine/common/action.h"
#include "update_engine/common/boot_control_interface.h"
// InstallPlan is a simple struct that contains relevant info for many
// parts of the update system about the install that should happen.
namespace chromeos_update_engine {
enum class InstallPayloadType {
kUnknown,
kFull,
kDelta,
};
std::string InstallPayloadTypeToString(InstallPayloadType type);
struct InstallPlan {
InstallPlan() = default;
bool operator==(const InstallPlan& that) const;
bool operator!=(const InstallPlan& that) const;
void Dump() const;
// Load the |source_path| and |target_path| of all |partitions| based on the
// |source_slot| and |target_slot| if available. Returns whether it succeeded
// to load all the partitions for the valid slots.
bool LoadPartitionsFromSlots(BootControlInterface* boot_control);
bool is_resume{false};
InstallPayloadType payload_type{InstallPayloadType::kUnknown};
std::string download_url; // url to download from
std::string version; // version we are installing.
uint64_t payload_size{0}; // size of the payload
std::string payload_hash; // SHA256 hash of the payload
uint64_t metadata_size{0}; // size of the metadata
std::string metadata_signature; // signature of the metadata
// The partition slots used for the update.
BootControlInterface::Slot source_slot{BootControlInterface::kInvalidSlot};
BootControlInterface::Slot target_slot{BootControlInterface::kInvalidSlot};
// The vector below is used for partition verification. The flow is:
//
// 1. FilesystemVerifierAction computes and fills in the source partition
// hash based on the guessed source size for delta major version 1 updates.
//
// 2. DownloadAction verifies the source partition sizes and hashes against
// the expected values transmitted in the update manifest. It fills in the
// expected target partition sizes and hashes based on the manifest.
//
// 3. FilesystemVerifierAction computes and verifies the applied partition
// sizes and hashes against the expected values in target_partition_hashes.
struct Partition {
bool operator==(const Partition& that) const;
// The name of the partition.
std::string name;
std::string source_path;
uint64_t source_size{0};
brillo::Blob source_hash;
std::string target_path;
uint64_t target_size{0};
brillo::Blob target_hash;
// Whether we should run the postinstall script from this partition and the
// postinstall parameters.
bool run_postinstall{false};
std::string postinstall_path;
std::string filesystem_type;
};
std::vector<Partition> partitions;
// True if payload hash checks are mandatory based on the system state and
// the Omaha response.
bool hash_checks_mandatory{false};
// True if Powerwash is required on reboot after applying the payload.
// False otherwise.
bool powerwash_required{false};
// If not blank, a base-64 encoded representation of the PEM-encoded
// public key in the response.
std::string public_key_rsa;
};
class InstallPlanAction;
template<>
class ActionTraits<InstallPlanAction> {
public:
// Takes the install plan as input
typedef InstallPlan InputObjectType;
// Passes the install plan as output
typedef InstallPlan OutputObjectType;
};
// Basic action that only receives and sends Install Plans.
// Can be used to construct an Install Plan to send to any other Action that
// accept an InstallPlan.
class InstallPlanAction : public Action<InstallPlanAction> {
public:
InstallPlanAction() {}
explicit InstallPlanAction(const InstallPlan& install_plan):
install_plan_(install_plan) {}
void PerformAction() override {
if (HasOutputPipe()) {
SetOutputObject(install_plan_);
}
processor_->ActionComplete(this, ErrorCode::kSuccess);
}
InstallPlan* install_plan() { return &install_plan_; }
static std::string StaticType() { return "InstallPlanAction"; }
std::string Type() const override { return StaticType(); }
typedef ActionTraits<InstallPlanAction>::InputObjectType InputObjectType;
typedef ActionTraits<InstallPlanAction>::OutputObjectType OutputObjectType;
private:
InstallPlan install_plan_;
DISALLOW_COPY_AND_ASSIGN(InstallPlanAction);
};
} // namespace chromeos_update_engine
#endif // UPDATE_ENGINE_PAYLOAD_CONSUMER_INSTALL_PLAN_H_
| 33.259494
| 79
| 0.750333
|
[
"vector"
] |
bfa239224b10d994e1b532e80868717770289f62
| 605
|
h
|
C
|
Legend_of_Yve/SpriteComponent.h
|
nikitabm/Legend_of_Yve
|
7feb224f5a36a645e5e4c37a10b940aefdf42708
|
[
"MIT"
] | null | null | null |
Legend_of_Yve/SpriteComponent.h
|
nikitabm/Legend_of_Yve
|
7feb224f5a36a645e5e4c37a10b940aefdf42708
|
[
"MIT"
] | null | null | null |
Legend_of_Yve/SpriteComponent.h
|
nikitabm/Legend_of_Yve
|
7feb224f5a36a645e5e4c37a10b940aefdf42708
|
[
"MIT"
] | null | null | null |
#pragma once
#include "Component.h"
#include <SFML/Graphics.hpp>
#include <iostream>
#include "RenderComponent.h"
class SpriteComponent : public RenderComponent
{
private:
sf::Sprite _sprite;
sf::Texture _texture;
bool _CenterAllignment = true;
public:
SpriteComponent();
~SpriteComponent();
sf::Sprite& Sprite();
sf::Vector2f Position() const;
void Sprite(std::string t_imagePath);
void Start() override;
void Update() override;
void SetPosition(const sf::Vector2f t_newPosition);
void SetPosition(sf::Vector2f* t_newPosition);
void Render(sf::RenderWindow& t_window) const override;
};
| 21.607143
| 56
| 0.753719
|
[
"render"
] |
bfa23a97b029af4cdd6dc8a9f3b09f31b511afac
| 13,742
|
h
|
C
|
binminheap.h
|
vvhitedog/BinaryMinHeap
|
5ea79ffa53a9985b1021db21e2d89e3fcfb6d986
|
[
"MIT"
] | null | null | null |
binminheap.h
|
vvhitedog/BinaryMinHeap
|
5ea79ffa53a9985b1021db21e2d89e3fcfb6d986
|
[
"MIT"
] | null | null | null |
binminheap.h
|
vvhitedog/BinaryMinHeap
|
5ea79ffa53a9985b1021db21e2d89e3fcfb6d986
|
[
"MIT"
] | null | null | null |
// Copyright 2019 Matt Gara
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
/*
* Binary Minimum Heap
*/
#ifndef BINMINHEAP_H_
#define BINMINHEAP_H_
#include <cassert>
#include <iostream>
#include <iomanip>
#include <boost/unordered_map.hpp>
#include <boost/static_assert.hpp>
namespace binminheap {
/** A wrapper class to wrap a map-type interface (ex. std::map, or
boost::unordered_map) into a compatible heapscratch dataype.
XXX: This converter when used with a map_type in the BinaryMinHeap will not
delete elements from it. In fact, this is a limitation of the implementation
of the BinaryMinHeap. This will not necessarily mean that efficiency will
be impacted, in general, it should be as efficient or even more so than
deletion, however, memory consumption is not optimal.
*/
template <typename map_type> class HeapscratchConverter {
private:
map_type ↦
size_t infinity;
public:
/**
Sets the underlying map type.
@param map The underlying map type used internally.
*/
HeapscratchConverter(map_type &map)
: map(map), infinity(std::numeric_limits<std::size_t>::max()) {
BOOST_STATIC_ASSERT(
(boost::is_same<typename map_type::mapped_type, size_t>::value));
}
/**
Wraps around the underlying [] const implementation of the map. Returns
infinity if not found which is the necessary behaviour for correct
performance when used with BinMinHeap.
@param map The key to choose from the map.
@return A constant reference to the value in the map.
*/
template <typename K> const size_t &operator[](const K &key) const {
typename map_type::const_iterator fit = map.find(key);
if (fit == map.end()) {
return infinity;
}
return fit->second;
}
/**
Exposes the underlying [] map operator.
@param map The key to choose from the map.
@return A reference to the value in the map.
*/
template <typename K> size_t &operator[](const K &key) { return map[key]; }
};
/** A binary minimum heap templated implementation.
Improves upon std::priority_queue by supplying updating of priorities in
heap dynamically. For efficiency, this implementation requires an external
buffer (refered to as a heapscratch) to maintain a mapping of keys to position
in heap. If an upperbound on the keys is known, an array (std::vector) is
suggested as testing shows it to be the most efficient. If no upperbound is
known a-priori an unordered map (hash table) is suggested, mapping key type to
std::size_t.
XXX: Note that due to interface differences of the access operator [] in a
map like std:map or boost::unordered_map and an array-like data structure
std::vector, some ground-work must be done to consolidate the use of the
map structure for a heapscratch. See the binary heap test for an example of
usage of both. Note that std::numeric_limits<size_t>::max() is a special
value (infinity) indicating the lack of presence of this element from the
heap.
XXX: Note that the key type must be convertable to the std::size_t datatype
in order for the default heapscratch type (std::vector) to make sense. This
can be seemlessly accomplished by implementing the cast operator to size_t
for the key data type supplied.
*/
template <typename P, typename K,
typename heapscratch_type = std::vector<size_t> >
class BinaryMinHeap {
private:
std::vector<std::pair<P, K> > data;
heapscratch_type &key2idx; /* Assume that K is convertable to size_t */
inline size_t leftChildIdx(size_t k) const {
// Compute (2 * k + 1) faster
return (k << 1) + 1;
}
inline size_t rightChildIdx(size_t k) const {
// Compute (2 * k + 2) faster
return (k + 1) << 1;
}
inline size_t parentIdx(size_t k) const {
// Compute ((k-1) / 2) faster
return (k - 1) >> 1;
}
inline bool lessThan(size_t idx1, size_t idx2) const {
register size_t dataSize = data.size();
if (idx1 >= dataSize && idx2 >= dataSize) {
return false;
}
if (idx1 < dataSize && idx2 >= dataSize) {
return true;
}
if (idx1 >= dataSize && idx2 < dataSize) {
return false;
}
return data[idx1].first < data[idx2].first;
}
void swapInHeap(size_t idx1, size_t idx2) {
const K &s1 = data[idx1].second;
const K &s2 = data[idx2].second;
assert((size_t)s1 != std::numeric_limits<size_t>::max() &&
(size_t)s2 != std::numeric_limits<size_t>::max() &&
data[idx1].second == s1 && data[idx2].second == s2);
key2idx[s1] = idx2;
key2idx[s2] = idx1;
std::swap(data[idx1], data[idx2]);
}
void bubbleUp(size_t idx) {
if (!idx) {
return;
}
size_t parent = parentIdx(idx);
if (lessThan(idx, parent)) {
swapInHeap(idx, parent);
bubbleUp(parent);
}
}
void bubbleDown(size_t idx) {
if (idx >= data.size()) {
return;
}
size_t left = leftChildIdx(idx);
size_t right = rightChildIdx(idx);
size_t minIdx = lessThan(left, right) ? left : right;
if (lessThan(minIdx, idx)) {
swapInHeap(idx, minIdx);
bubbleDown(minIdx);
}
}
public:
/**
Does nothing to populate heap. Simply sets the underlying heapscratch data
structure.
@param heapscratch The heapscratch to use internally.
*/
BinaryMinHeap(heapscratch_type &heapscratch, const size_t sizehint = 0)
: key2idx(heapscratch) {
if (sizehint) {
data.reserve(sizehint);
}
}
/**
Construct a heap from a list described by two iterators. Keys in range
are assumed to be unique, if not behaviour is undefined.
This construction allows for filling a heap in O(n) time
as is described in The Algorithm Design Manual (Steven Skiena).
@param heapscratch The heapscratch to use internally.
@param begin The starting iterator of the list to populate with.
@param end The ending iterator of the list to populate with.
*/
template <typename Iterator>
BinaryMinHeap(heapscratch_type &heapscratch, Iterator begin, Iterator end)
: key2idx(heapscratch) {
#ifndef NDEBUG
std::vector<K> uniqkeys;
for (Iterator it = begin; it != end; ++it) {
uniqkeys.push_back(it->second);
}
std::sort(uniqkeys.begin(), uniqkeys.end());
typename std::vector<K>::const_iterator ait =
adjacent_find(uniqkeys.begin(), uniqkeys.end());
assert(ait == uniqkeys.end() &&
"Can not have duplicate keys in range when populating heap.");
#endif
data.resize(std::distance(begin, end));
std::copy(begin, end, data.begin());
size_t k = 0;
for (Iterator it = begin; it != end; ++it, ++k) {
key2idx[it->second] = k;
}
for (int k = data.size() - 1; k >= 0; k--) {
bubbleDown(k);
}
}
void pare() {
std::vector<std::pair<P, K> > tmp;
data.swap(tmp);
}
void clear() { data.clear(); }
/**
Get pointer to underlying array implementation.
This method is only provided as a convenience function to iterate over
the underlying data in the heap efficiently. It is strongly
suggested that the underlying data is *not* modified unless
it is well understood what is to be done will not break the heap
condition.
@return A pointer to the start of the internal data array.
*/
const std::pair<P, K> *values() const { return &data[0]; }
/**
Get the size of the heap.
@return The size of the heap.
*/
size_t size() { return data.size(); }
/**
Find key in the heap.
Finds the requested key in the heap if it exists. If so,
the (current) priority of the key in the heap is also
returned.
@param key The key to find in the heap.
@param priority The priority of the found key, only if key is found,
otherwise this is untouched.
@return True if key is found, false otherwise.
*/
bool findKey(const K &key, P &priority) const {
size_t idx = key2idx[key];
if (idx == std::numeric_limits<size_t>::max()) {
return false;
}
assert(data[idx].second == key);
priority = data[idx].first;
return true;
}
/**
Push priority, key pair onto heap. Keys pushed must be unique, this is not
checked for
if debugging is disabled.
@param keyval The pair of priority and key values to be pushed onto heap.
*/
void push(const std::pair<P, K> &keyval) {
#ifndef NDEBUG
const heapscratch_type &hs = key2idx;
size_t idx = hs[keyval.second];
assert(idx == std::numeric_limits<size_t>::max() &&
"Non unique key attempted to be inserted.");
#endif
data.push_back(keyval);
size_t idxInserted = data.size() - 1;
key2idx[keyval.second] = idxInserted;
bubbleUp(idxInserted);
}
/**
Get top priority, key pair in heap.
@return The top (minimum) priority, key pair in heap.
*/
const std::pair<P, K> &top() const { return data[0]; }
/**
Delete the top priority, key pair in heap.
*/
void pop() {
assert(data.size() > 0 && "Can not pop an empty heap.");
key2idx[data[0].second] = std::numeric_limits<size_t>::max();
data[0] = data[data.size() - 1];
if (data.size() > 1) {
key2idx[data[data.size() - 1].second] = 0;
}
data.erase(data.end() - 1);
bubbleDown(0);
}
/**
Update the priority of a given key in the heap.
Updating keys through this interface guarantees that the heap
condition will not be violated.
@param key The key to update.
@param newpriority The new priority to use when updating.
*/
void updateKey(const K &key, const P &newpriority) {
size_t idx = key2idx[key];
if (idx == std::numeric_limits<size_t>::max()) {
return;
}
assert(data[idx].second == key);
P oldpriority = data[idx].first;
data[idx].first = newpriority;
if (oldpriority < newpriority) {
bubbleDown(idx);
} else if (newpriority < oldpriority) {
bubbleUp(idx);
} else {
// oldpriority == newpriority NOTHING TO DO
}
}
template <typename Pp, typename Kk, typename Ss>
friend std::ostream &operator<<(std::ostream &os,
const BinaryMinHeap<Pp, Kk, Ss> &mh);
};
template <typename Pp, typename Kk, typename Ss>
std::ostream &operator<<(std::ostream &os,
const BinaryMinHeap<Pp, Kk, Ss> &mh) {
boost::unordered_map<std::pair<size_t, size_t>, size_t> rc2idx;
boost::unordered_map<size_t, std::pair<size_t, size_t> > idx2rc;
size_t numlevels = std::log(mh.data.size()) / std::log(2) + 1;
for (int level = int(numlevels) - 1; level >= 0; level--) {
size_t start = (1 << level) - 1;
size_t num2print = 1 << level;
if (level == (int)numlevels - 1) {
for (size_t k = 0; k < num2print; k++) {
size_t idx = start + k;
std::pair<size_t, size_t> rc = std::make_pair(2 * k, 0);
idx2rc[idx] = rc;
rc2idx[rc] = idx;
}
} else {
for (size_t k = 0; k < num2print; k++) {
size_t idx = start + k;
if (idx >= mh.data.size()) {
break;
}
size_t lcidx = mh.leftChildIdx(idx);
size_t rcidx = mh.rightChildIdx(idx);
std::pair<size_t, size_t> lftrc = idx2rc[lcidx];
std::pair<size_t, size_t> rgtrc = idx2rc[rcidx];
std::pair<size_t, size_t> rc =
std::make_pair((lftrc.first + rgtrc.first) / 2, lftrc.second + 1);
idx2rc[idx] = rc;
rc2idx[rc] = idx;
}
}
}
boost::unordered_map<std::pair<size_t, size_t>, size_t>::iterator it;
size_t maxCol, maxRow;
maxCol = 0;
maxRow = 0;
for (it = rc2idx.begin(); it != rc2idx.end(); it++) {
size_t r = it->first.first;
size_t c = it->first.second;
if (r > maxRow) {
maxRow = r;
}
if (c > maxCol) {
maxCol = c;
}
}
const size_t padsize = 3;
for (size_t r = 0; r <= maxRow; r++) {
for (size_t c = 0; c <= maxCol; c++) {
boost::unordered_map<std::pair<size_t, size_t>, size_t>::iterator fit;
fit = rc2idx.find(std::make_pair(r, c));
bool found = fit != rc2idx.end();
size_t idx = 0;
if (found) {
idx = fit->second;
}
bool withinRange = found && idx < mh.data.size();
if (!withinRange) {
os << std::setfill(' ') << " " << std::setw(padsize) << ' ' << " "
<< std::setw(padsize) << ' ' << " ";
} else {
os << std::setfill(' ') << "(" << std::setw(padsize)
<< mh.data[idx].first << ", " << std::setw(padsize)
<< mh.data[idx].second << ") ";
}
}
os << std::endl;
}
return os;
}
}
#endif
| 30.470067
| 80
| 0.634624
|
[
"vector"
] |
bfb1b54c37b88411316397c327606ea0ddb5eda7
| 212
|
h
|
C
|
src/ecs/components/Transform.h
|
ZaOniRinku/NeigeEngine
|
8dfe06e428ec1751ba0b5003ccdf5162474f002b
|
[
"MIT"
] | 9
|
2020-10-06T11:17:07.000Z
|
2022-03-29T20:28:07.000Z
|
src/ecs/components/Transform.h
|
ZaOniRinku/NeigeEngine
|
8dfe06e428ec1751ba0b5003ccdf5162474f002b
|
[
"MIT"
] | 1
|
2020-10-06T11:55:45.000Z
|
2020-10-06T12:07:46.000Z
|
src/ecs/components/Transform.h
|
ZaOniRinku/NeigeEngine
|
8dfe06e428ec1751ba0b5003ccdf5162474f002b
|
[
"MIT"
] | null | null | null |
#pragma once
#define GLM_FORCE_RADIANS
#define GLM_FORCE_DEPTH_ZERO_TO_ONE
#include "../../external/glm/glm/glm.hpp"
struct Transform {
glm::vec3 position;
glm::vec3 rotation;
glm::vec3 scale;
};
| 17.666667
| 42
| 0.707547
|
[
"transform"
] |
bfc00074e05abd887dc8f1df08e888798e57764f
| 936
|
c
|
C
|
lib/wizards/aarrgh/nyxi/pool2.c
|
vlehtola/questmud
|
8bc3099b5ad00a9e0261faeb6637c76b521b6dbe
|
[
"MIT"
] | null | null | null |
lib/wizards/aarrgh/nyxi/pool2.c
|
vlehtola/questmud
|
8bc3099b5ad00a9e0261faeb6637c76b521b6dbe
|
[
"MIT"
] | null | null | null |
lib/wizards/aarrgh/nyxi/pool2.c
|
vlehtola/questmud
|
8bc3099b5ad00a9e0261faeb6637c76b521b6dbe
|
[
"MIT"
] | null | null | null |
inherit "room/room";
object monster;
reset(arg) {
if(arg) return;
if (!morfeus) {
monster = clone_object("/wizards/aarrgh/nyx/mon/morfeus.c");
move_object(monster, this_object());
}
set_light(1);
set_not_out(1);
add_exit("pool", "/wizards/aarrgh/nyxi/down1");
add_exit("north", "/wizards/aarrgh/nyxi/volcano8");
add_exit("east", "/wizards/aarrgh/nyxi/volcano6");
add_exit("south", "/wizards/aarrgh/nyxi/volcano1");
add_exit("west", "/wizards/aarrgh/nyxi/volcano4");
short_desc = "A Sacrificing pool";
long_desc = "A sacrificing pool has been built here. The pool is made of a pure marble and is\n"
"decorated with human skulls. The pool is not deep and there is something strange in the center\n"
"of the pool.\n";
items = allocate(4);
items[0] = "pool";
items[1] = "This pool is filled with blood";
}
| 34.666667
| 111
| 0.61859
|
[
"object"
] |
bfc2ca10c73474239e36f3e8f8cc2ff06ecd1345
| 5,980
|
h
|
C
|
src/vt/collective/collective_alg.h
|
rbuch/vt
|
74c2e0cae3201dfbcbfda7644c354703ddaed6bb
|
[
"BSD-3-Clause"
] | 26
|
2019-11-26T08:36:15.000Z
|
2022-02-15T17:13:21.000Z
|
src/vt/collective/collective_alg.h
|
rbuch/vt
|
74c2e0cae3201dfbcbfda7644c354703ddaed6bb
|
[
"BSD-3-Clause"
] | 1,215
|
2019-09-09T14:31:33.000Z
|
2022-03-30T20:20:14.000Z
|
src/vt/collective/collective_alg.h
|
rbuch/vt
|
74c2e0cae3201dfbcbfda7644c354703ddaed6bb
|
[
"BSD-3-Clause"
] | 12
|
2019-09-08T00:03:05.000Z
|
2022-02-23T21:28:35.000Z
|
/*
//@HEADER
// *****************************************************************************
//
// collective_alg.h
// DARMA/vt => Virtual Transport
//
// Copyright 2019-2021 National Technology & Engineering Solutions of Sandia, LLC
// (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
// Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact [email protected]
//
// *****************************************************************************
//@HEADER
*/
#if !defined INCLUDED_VT_COLLECTIVE_COLLECTIVE_ALG_H
#define INCLUDED_VT_COLLECTIVE_COLLECTIVE_ALG_H
#include "vt/config.h"
#include "vt/collective/tree/tree.h"
#include "vt/activefn/activefn.h"
#include "vt/messaging/message.h"
#include "vt/collective/barrier/barrier.h"
#include "vt/collective/reduce/reduce_manager.h"
#include "vt/collective/reduce/operators/default_msg.h"
#include "vt/collective/scatter/scatter.h"
#include "vt/utils/hash/hash_tuple.h"
#include "vt/runtime/component/component_pack.h"
#include "vt/collective/collective_scope.h"
#include <memory>
#include <unordered_map>
namespace vt { namespace collective {
constexpr CollectiveAlgType const fst_collective_alg = 1;
/**
* \struct CollectiveAlg
*
* \brief Perform asynchronous collectives within VT
*
* CollectiveAlg is a core VT component that provides the ability to perform
* reductions, scatters, barriers, and safe MPI (collective) operations while
* inside a VT handler.
*/
struct CollectiveAlg :
runtime::component::Component<CollectiveAlg>,
virtual reduce::ReduceManager,
virtual barrier::Barrier,
virtual scatter::Scatter
{
/*----------------------------------------------------------------------------
*
* CollectiveAlg class implements all collective operations:
* 1) Barrier
* 2) One to all: broadcast, scatter
* 3) All to one: reduce, gather
* 4) All to all: allreduce, allgather, alltoall, reduce_scatter
* 5) Scan etc.
*
*------------------------------------------------------------------------------
*/
CollectiveAlg();
//----------------------------------------------------------------------------
//----------------------------------------------------------------------------
std::string name() override { return "Collective"; }
public:
/**
* \brief Create a new scope for sequenced MPI operations. Each scope has a
* distinct, independent collective sequence of operations.
*
* \param[in] tag integer identifier (default value means allocate a new
* system scope)
*
* \return a new collective scope with sequenced operations
*/
CollectiveScope makeCollectiveScope(TagType scope_tag = no_tag);
private:
friend struct CollectiveScope;
struct CollectiveMsg : vt::collective::ReduceNoneMsg {
CollectiveMsg(
bool in_is_user_tag, TagType in_scope, TagType in_seq, NodeType in_root
) : is_user_tag_(in_is_user_tag),
scope_(in_scope),
seq_(in_seq),
root_(in_root)
{ }
bool is_user_tag_ = false;
TagType scope_ = no_tag;
TagType seq_ = no_tag;
NodeType root_ = uninitialized_destination;
};
static void runCollective(CollectiveMsg* msg);
public:
/**
* \internal \brief Check if a scope has been deallocated
*
* \note Used for testing purposes
*
* \param[in] is_user_tag whether it's a user-tagged scope
* \param[in] scope_bits the scope bits
*
* \return whether it is deallocated
*/
bool isDeallocated(bool is_user_tag, TagType scope_bits) const;
template <typename SerializerT>
void serialize(SerializerT& s) {
s | next_system_scope_
| user_scope_
| system_scope_
| postponed_collectives_;
}
private:
using ScopeMapType = std::unordered_map<TagType, std::unique_ptr<detail::ScopeImpl>>;
TagType next_system_scope_ = 1; /**< The next system allocated scope */
ScopeMapType user_scope_; /**< Live scopes with user tag */
ScopeMapType system_scope_; /**< Live scopes with system tag */
std::vector<MsgSharedPtr<CollectiveMsg>> postponed_collectives_;
};
using ReduceMsg = reduce::ReduceMsg;
}} // end namespace vt::collective
namespace vt {
extern collective::CollectiveAlg *theCollective();
} //end namespace vt
#include "vt/collective/reduce/reduce_manager.impl.h"
#include "vt/collective/scatter/scatter.impl.h"
#endif /*INCLUDED_VT_COLLECTIVE_COLLECTIVE_ALG_H*/
| 33.785311
| 87
| 0.670569
|
[
"vector"
] |
bfc46431978e5f362c6824da66cbbb2872ecc6e7
| 5,758
|
h
|
C
|
include/o3d/engine/object/meshdata.h
|
dream-overflow/o3d
|
087ab870cc0fd9091974bb826e25c23903a1dde0
|
[
"FSFAP"
] | 2
|
2019-06-22T23:29:44.000Z
|
2019-07-07T18:34:04.000Z
|
include/o3d/engine/object/meshdata.h
|
dream-overflow/o3d
|
087ab870cc0fd9091974bb826e25c23903a1dde0
|
[
"FSFAP"
] | null | null | null |
include/o3d/engine/object/meshdata.h
|
dream-overflow/o3d
|
087ab870cc0fd9091974bb826e25c23903a1dde0
|
[
"FSFAP"
] | null | null | null |
/**
* @file meshdata.h
* @brief MeshData object that contain any kind of geometry.
* @author Frederic SCHERMA ([email protected])
* @date 2005-10-22
* @copyright Copyright (c) 2001-2017 Dream Overflow. All rights reserved.
* @details
*/
#ifndef _O3D_MESHDATA_H
#define _O3D_MESHDATA_H
#include <vector>
#include "geometrydata.h"
#include "../scene/sceneentity.h"
#include "o3d/core/task.h"
#include "o3d/core/memorydbg.h"
namespace o3d {
class MeshDataManager;
/**
* @brief A mesh data object contain any geometry data, and skinning weighting/bones id
* informations.
* @details It contain a GeometryData object and its geometry object contain one or many
* FaceArray objects.
* @note Important consideration: For save memory and increase OpenGL performance,
* use face arrays in 16bits (UInt16) for meshdata for vertices lesser
* than 65536. Otherwise use a 32bits (UInt32) face array. So take care of using the
* optimal format. You can process an optimize method to process many optimization for
* you, but this cost some CPU time, and only recommended at authoring state.
*/
class O3D_API MeshData : public SceneResource
{
public:
O3D_DECLARE_DYNAMIC_CLASS(MeshData)
//! Default constructor
MeshData(BaseObject *parent);
//! Compute the size of the geometry and all faces array.
//! @param countOptional count the size of additional arrays of MeshData
UInt32 getCapacity(Bool countOptional) const;
//-----------------------------------------------------------------------------------
// Global data
//-----------------------------------------------------------------------------------
//! Get the number of vertices contained
UInt32 getNumVertices() const;
//! Compute the total number of faces
UInt32 getNumFaces() const;
//-----------------------------------------------------------------------------------
// Generation methods
//-----------------------------------------------------------------------------------
//! Compute tangent and bi-tangent
void genTangentSpace();
//! Compute normals
void genNormals();
//! Compute the bounding volume given the mode
void computeBounding(GeometryData::BoundingMode mode);
//-----------------------------------------------------------------------------------
// General settings
//-----------------------------------------------------------------------------------
//! Set the .o3dms mesh data file name.
inline void setFileName(const String &filename) { m_filename = filename; }
//! Get the .o3dms mesh data file name.
inline const String& getFileName() const { return m_filename; }
//-----------------------------------------------------------------------------------
// Geometry data methods
//-----------------------------------------------------------------------------------
//! Define the geometry object. The previous is deleted.
inline void setGeometry(GeometryData *geometry) { m_geometry = geometry; }
//! Get the geometry object, or null if none.
inline GeometryData* getGeometry() const { return m_geometry.get(); }
//! Get the geometry object in way to process a specified face array
//! @param arrayId Identifier of the FaceArray into the geometry to bind
//! @note 0 always mean the first face array.
inline void bindGeometry(UInt32 arrayId)
{
if (m_geometry.isValid()) {
m_geometry->bindFaceArray(arrayId);
}
}
//! Create/validate geometry data. Create VBO if necessary or validate for VertexArray.
//! @param setPersistant @see Geometry::create
void createGeometry();
//! Destroy the contained geometry and clear the filename
void destroy();
//-----------------------------------------------------------------------------------
// LOD management
//-----------------------------------------------------------------------------------
//! Set the current LOD percent
inline void setCurrentLod(UInt32 lvl)
{
if (m_geometry.isValid()) {
return m_geometry->setLodLvl(lvl);
}
}
//! Get the current LOD percent (100 if not supported)
inline UInt32 getCurrentLod()const
{
if (m_geometry.isValid()) {
return m_geometry->getLodLvl();
} else {
return 100;
}
}
//! Is progressive mesh supported
inline Bool isProgressive() const
{
if (m_geometry.isValid()) {
return m_geometry->isProgressive();
} else {
return False;
}
}
//! Compute the the progressive mesh data
inline void genProgressiveMesh()
{
if (m_geometry.isValid()) {
return m_geometry->genProgressiveMesh();
}
}
//-----------------------------------------------------------------------------------
// Serialization
//-----------------------------------------------------------------------------------
//! Load a geometry file (*.o3dms) using a scene relative path filename or absolute.
Bool load(const String &filename);
//! Save the geometry file (*.o3dms) into the defined filename.
Bool save();
virtual Bool writeToFile(OutStream &os) override;
virtual Bool readFromFile(InStream &is) override;
protected:
String m_filename; //!< filename for retrieve the meshdata
SmartObject<GeometryData> m_geometry; //!< Geometry data
};
/**
* @brief Task responsible of the loading of a mesh-data object.
*/
class O3D_API MeshDataTask : public Task
{
public:
//! Default constructor.
//! @param meshData Mesh-data target.
//! @param filename Filename of the mesh-data to load.
MeshDataTask(MeshData *meshData, const String &filename);
virtual Bool execute() override;
virtual Bool finalize() override;
private:
String m_filename;
MeshData *m_meshData;
GeometryData *m_geometry;
};
} // namespace o3d
#endif // _O3D_MESHDATA_H
| 30.146597
| 88
| 0.58371
|
[
"mesh",
"geometry",
"object",
"vector"
] |
bfc9b28d9664b557e24c15982dd0b7d35f7e6a66
| 1,276
|
h
|
C
|
src/utils/xrLC_Light/global_calculation_data.h
|
clayne/xray-16
|
32ebf81a252c7179e2824b2874f911a91e822ad1
|
[
"OML",
"Linux-OpenIB"
] | 2
|
2015-02-23T10:43:02.000Z
|
2015-06-11T14:45:08.000Z
|
src/utils/xrLC_Light/global_calculation_data.h
|
clayne/xray-16
|
32ebf81a252c7179e2824b2874f911a91e822ad1
|
[
"OML",
"Linux-OpenIB"
] | 17
|
2022-01-25T08:58:23.000Z
|
2022-03-28T17:18:28.000Z
|
src/utils/xrLC_Light/global_calculation_data.h
|
clayne/xray-16
|
32ebf81a252c7179e2824b2874f911a91e822ad1
|
[
"OML",
"Linux-OpenIB"
] | 1
|
2015-06-05T20:04:00.000Z
|
2015-06-05T20:04:00.000Z
|
#pragma once
#include "utils/communicate.h"
#include "base_lighting.h"
#include "global_slots_data.h"
#include "b_build_texture.h"
#include "global_slots_data.h"
#include "xrCDB/xrCDB.h"
class Shader_xrLC_LIB;
//-----------------------------------------------------------------
struct global_claculation_data
{
base_lighting g_lights; /////////////////////lc
Shader_xrLC_LIB* g_shaders_xrlc; ////////////////lc
b_params g_params; //////////////////////lc
xr_vector<b_material> g_materials; ///////////////////lc
xr_vector<b_BuildTexture> g_textures; ////////////////////lc
CDB::MODEL RCAST_Model; ///////////////////lc
Fbox LevelBB; //-----------============
global_slots_data slots_data; //-------=============
xr_vector<b_shader> g_shader_compile; //-----==========
xr_vector<b_rc_face> g_rc_faces; //---------===============
///////////////////////////////////////////////////////////////////////
void read(INetReader& r);
void write(IWriter& w) const;
//////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////
global_claculation_data() : g_shaders_xrlc(0) {}
void xrLoad();
};
extern global_claculation_data gl_data;
| 38.666667
| 78
| 0.453762
|
[
"model"
] |
bfd9ec282dd9ef003274bd0e3f0377c2caa5f415
| 2,607
|
h
|
C
|
Include/10.0.19041.0/km/crt/yvals.h
|
sezero/windows-sdk-headers
|
e8e9d4d50769ded01a2df905c6bf4355eb3fa8b5
|
[
"MIT"
] | 5
|
2020-05-29T06:22:17.000Z
|
2021-11-28T08:21:38.000Z
|
Include/10.0.19041.0/km/crt/yvals.h
|
sezero/windows-sdk-headers
|
e8e9d4d50769ded01a2df905c6bf4355eb3fa8b5
|
[
"MIT"
] | null | null | null |
Include/10.0.19041.0/km/crt/yvals.h
|
sezero/windows-sdk-headers
|
e8e9d4d50769ded01a2df905c6bf4355eb3fa8b5
|
[
"MIT"
] | 5
|
2020-05-30T04:15:11.000Z
|
2021-11-28T08:48:56.000Z
|
#if defined _STL70_
#include "_70_yvals.h"
#elif defined _STL100_
#include "_100_yvals.h"
#elif defined _STL110_
#include "_110_yvals.h"
#else
/* yvals.h values header for Microsoft C/C++ */
#ifndef _YVALS
#define _YVALS
#include <use_ansi.h>
/* Define _CRTIMP2 */
#ifndef _CRTIMP2
#if defined(_DLL) && !defined(_STATIC_CPPLIB)
#define _CRTIMP2 __declspec(dllimport)
#else /* ndef _DLL */
#define _CRTIMP2
#endif /* _DLL */
#endif /* _CRTIMP2 */
#ifdef _MSC_VER
#pragma pack(push,8)
#endif /* _MSC_VER */
#include <stlshared.h>
#pragma warning(disable: 4018 4114 4146 4244 4245) /* NOTE: These leak out into user code. */
#pragma warning(disable: 4663 4664 4665) /* NOTE: These leak out into user code. */
#pragma warning(disable: 4237 4284 4290 4514) /* NOTE: These leak out into user code. */
/* NAMESPACE */
#if defined(__cplusplus)
#define _STD std::
#define _STD_BEGIN namespace std {
#define _STD_END };
#define _STD_USING
#else
#define _STD ::
#define _STD_BEGIN
#define _STD_END
#endif /* __cplusplus */
_STD_BEGIN
/* TYPE bool */
#if defined(__cplusplus)
typedef bool _Bool;
#endif /* __cplusplus */
/* INTEGER PROPERTIES */
#define _MAX_EXP_DIG 8 /* for parsing numerics */
#define _MAX_INT_DIG 32
#define _MAX_SIG_DIG 36
/* STDIO PROPERTIES */
#define _Filet _iobuf
#ifndef _FPOS_T_DEFINED
#define _FPOSOFF(fp) ((long)(fp))
#endif /* _FPOS_T_DEFINED */
/* NAMING PROPERTIES */
#if defined(__cplusplus)
#define _C_LIB_DECL extern "C" {
#define _END_C_LIB_DECL }
#else
#define _C_LIB_DECL
#define _END_C_LIB_DECL
#endif /* __cplusplus */
#define _CDECL
// CLASS _Lockit
#if defined(__cplusplus)
class _CRTIMP2 _Lockit
{ // lock while object in existence
public:
#ifdef _MT
#define _LOCKIT(x) lockit x
_Lockit();
~_Lockit();
#else
#define _LOCKIT(x)
_Lockit()
{}
~_Lockit()
{}
#endif /* _MT */
};
#endif /* __cplusplus */
/* MISCELLANEOUS MACROS */
#define _L(c) L##c
#define _Mbstinit(x) mbstate_t x = {0}
#define _MAX _cpp_max
#define _MIN _cpp_min
#if defined(_DLL) && !defined(_MANAGED)
#define _DLL_CPPLIB_STDHPP64
#endif
_STD_END
#ifdef _MSC_VER
#pragma pack(pop)
#endif /* _MSC_VER */
#endif /* _YVALS */
/*
* Copyright (c) 1996 by P.J. Plauger. ALL RIGHTS RESERVED.
* Consult your license regarding permissions and restrictions.
*/
// The file \sdpublic\sdk\inc\crt\yvals.h was reviewed by LCA in June 2011 and per license is
// acceptable for Microsoft use under Dealpoint ID 46582, 201971
#endif // _STL70_ or _STL100_ or _STL110_
/* 88bf0570-3001-4e78-a5f2-be5765546192 */
| 23.486486
| 94
| 0.698888
|
[
"object"
] |
bfe0839c4b6faccf3880b17b916e445d3c91b341
| 824
|
h
|
C
|
src/staticObject.h
|
pavol6999/ppgso-project
|
781e23fce3fe504b300aa07296df517023537a72
|
[
"MIT"
] | null | null | null |
src/staticObject.h
|
pavol6999/ppgso-project
|
781e23fce3fe504b300aa07296df517023537a72
|
[
"MIT"
] | null | null | null |
src/staticObject.h
|
pavol6999/ppgso-project
|
781e23fce3fe504b300aa07296df517023537a72
|
[
"MIT"
] | null | null | null |
#ifndef PPGSO_STATICOBJECT_H
#define PPGSO_STATICOBJECT_H
#include "scene.h"
#include "object.h"
#include "ppgso.h"
class StaticObject : public Object {
private:
int obj_id;
const static int num_obj = 4;
static std::unique_ptr<ppgso::Mesh> mesh[num_obj];
static std::unique_ptr<ppgso::Shader> shader;
static std::unique_ptr<ppgso::Texture> texture[num_obj];
static std::vector<std::array<glm::vec3,2>> bounding_boxes;
static std::string texNames[num_obj];
static std::string meshNames[num_obj];
const unsigned int SHADOW_WIDTH = 1024, SHADOW_HEIGHT = 1024;
public:
StaticObject(int id, glm::vec3 position, glm::vec3 rotation, glm::vec3 scale);
bool update(Scene &scene, float dt) override;
void render(Scene &scene) override;
};
#endif //PPGSO_STATICOBJECT_H
| 20.6
| 82
| 0.712379
|
[
"mesh",
"render",
"object",
"vector"
] |
3ad735d51952a5d984413a5825c7f6e0eaa5b5f0
| 1,762
|
h
|
C
|
model/ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload.h
|
ezmaxinc/eZmax-SDK-c
|
725eab79d6311127a2d5bd731b978bce94142d69
|
[
"curl",
"MIT"
] | null | null | null |
model/ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload.h
|
ezmaxinc/eZmax-SDK-c
|
725eab79d6311127a2d5bd731b978bce94142d69
|
[
"curl",
"MIT"
] | null | null | null |
model/ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload.h
|
ezmaxinc/eZmax-SDK-c
|
725eab79d6311127a2d5bd731b978bce94142d69
|
[
"curl",
"MIT"
] | null | null | null |
/*
* ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload.h
*
* Payload for PUT /1/object/ezsigndocument/{pkiEzsigndocumentID}/editEzsignformfieldgroups
*/
#ifndef _ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_H_
#define _ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_H_
#include <string.h>
#include "../external/cJSON.h"
#include "../include/list.h"
#include "../include/keyValuePair.h"
#include "../include/binary.h"
typedef struct ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_t ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_t;
typedef struct ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_t {
list_t *a_pki_ezsignformfieldgroup_id; //primitive container
} ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_t;
ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_t *ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_create(
list_t *a_pki_ezsignformfieldgroup_id
);
void ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_free(ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_t *ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload);
ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_t *ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_parseFromJSON(cJSON *ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payloadJSON);
cJSON *ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_convertToJSON(ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_t *ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload);
#endif /* _ezsigndocument_edit_ezsignformfieldgroups_v1_response_m_payload_H_ */
| 46.368421
| 221
| 0.905221
|
[
"object"
] |
3ada26177ddf587d950fc41d5533ac4ec43eff5f
| 2,094
|
h
|
C
|
apps/my_app.h
|
kanavkhanna/Battleship
|
d7859da32ecf5a871403f9b09f33b0e319cfb8b0
|
[
"MIT"
] | null | null | null |
apps/my_app.h
|
kanavkhanna/Battleship
|
d7859da32ecf5a871403f9b09f33b0e319cfb8b0
|
[
"MIT"
] | null | null | null |
apps/my_app.h
|
kanavkhanna/Battleship
|
d7859da32ecf5a871403f9b09f33b0e319cfb8b0
|
[
"MIT"
] | null | null | null |
// Copyright (c) 2020 CS126SP20. All rights reserved.
#ifndef FINALPROJECT_APPS_MYAPP_H_
#define FINALPROJECT_APPS_MYAPP_H_
#include <battleship/Location.h>
#include <battleship/board.h>
#include <battleship/direction.h>
#include <battleship/engine.h>
#include <battleship/leaderboard.h>
#include <battleship/player.h>
#include <choreograph/Choreograph.h>
#include <cinder/app/App.h>
#include <magicwindow.h>
namespace myapp {
enum class GameState {
kWelcome,
kPlaying,
kGameOver,
};
class MyApp : public cinder::app::App {
private:
battleship::Engine player1_;
battleship::Engine player2_;
battleship::Board gameBoard_;
vector<battleship::Direction> shipDirections_;
vector<battleship::Location> shipLocations_;
vector<battleship::Location> attackLocations_;
std::chrono::time_point<std::chrono::system_clock> last_intact_time_;
std::chrono::time_point<std::chrono::system_clock> last_pause_time_;
battleship::LeaderBoard leaderboard_;
bool paused_;
bool isPlayer1Turn = true;
bool isPlayer2Turn = false;
std::string winner_name_;
bool printed_game_over_;
const size_t size_;
GameState state_;
const size_t tile_size_;
std::vector<battleship::Player> top_players_;
std::vector<battleship::Player> current_player_info_;
public:
MyApp();
void setup() override;
void update() override;
void draw() override;
void keyDown(cinder::app::KeyEvent) override;
void mouseDown(cinder::app::MouseEvent) override;
void ResetGame();
void DrawBackground();
template <typename C>
void PrintText(const string& text, const C& color, const glm::ivec2& size,
const glm::vec2& loc);
void DrawGameOver();
void DrawScoreBoard();
void DrawWelcomeScreen();
//executes the task of a player: create ships, or attack
void PlayerTask(battleship::Engine&, battleship::Engine&);
//executes the drawing part of the game, depending on the user commands (i.e. for drawing ships or missiles)
void PlayerDraw(battleship::Engine&,battleship::Engine&);
};
} // namespace myapp
#endif // FINALPROJECT_APPS_MYAPP_H_
| 26.175
| 110
| 0.745941
|
[
"vector"
] |
3ae590dd42a858074b3e2ec48572653c9e303d0a
| 3,219
|
h
|
C
|
pyop2/vecset.h
|
chromy/PyOP2
|
8a1955c628b795019485c9771709c338a806e661
|
[
"BSD-3-Clause"
] | null | null | null |
pyop2/vecset.h
|
chromy/PyOP2
|
8a1955c628b795019485c9771709c338a806e661
|
[
"BSD-3-Clause"
] | null | null | null |
pyop2/vecset.h
|
chromy/PyOP2
|
8a1955c628b795019485c9771709c338a806e661
|
[
"BSD-3-Clause"
] | null | null | null |
// Copyright (C) 2009-2014 Garth N. Wells, Florian Rathgeber
//
// This file is part of DOLFIN.
//
// DOLFIN is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// DOLFIN is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
//
// First added: 2009-08-09
// Last changed: 2014-05-12
#ifndef __VEC_SET_H
#define __VEC_SET_H
#include <algorithm>
#include <vector>
// This is a set-like data structure. It is not ordered and it is based
// a std::vector. It uses linear search, and can be faster than std::set
// and boost::unordered_set in some cases.
template<typename T>
class vecset {
public:
typedef typename std::vector<T>::iterator iterator;
typedef typename std::vector<T>::const_iterator const_iterator;
typedef typename std::vector<T>::size_type size_type;
/// Create empty set
vecset() {}
/// Create empty set but reserve capacity for n values
vecset(size_type n) {
_x.reserve(n);
}
/// Copy constructor
vecset(const vecset<T>& x) : _x(x._x) {}
/// Destructor
~vecset() {}
/// Find entry in set and return an iterator to the entry
iterator find(const T& x) {
return std::find(_x.begin(), _x.end(), x);
}
/// Find entry in set and return an iterator to the entry (const)
const_iterator find(const T& x) const {
return std::find(_x.begin(), _x.end(), x);
}
/// Insert entry
bool insert(const T& x) {
if( find(x) == this->end() ) {
_x.push_back(x);
return true;
} else {
return false;
}
}
/// Insert entries
template <typename InputIt>
void insert(const InputIt first, const InputIt last) {
for (InputIt position = first; position != last; ++position)
{
if (std::find(_x.begin(), _x.end(), *position) == _x.end())
_x.push_back(*position);
}
}
const_iterator begin() const {
return _x.begin();
}
const_iterator end() const {
return _x.end();
}
/// vecset size
std::size_t size() const {
return _x.size();
}
/// Erase an entry
void erase(const T& x) {
iterator p = find(x);
if (p != _x.end())
_x.erase(p);
}
/// Sort set
void sort() {
std::sort(_x.begin(), _x.end());
}
/// Clear set
void clear() {
_x.clear();
}
/// Reserve space for a given number of set members
void reserve(size_type n) {
_x.reserve(n);
}
/// Set capacity
size_type capacity() {
return _x.capacity();
}
/// Index the nth entry in the set
T operator[](size_type n) const {
return _x[n];
}
private:
std::vector<T> _x;
};
#endif
| 24.022388
| 78
| 0.613234
|
[
"vector"
] |
3ae821d15c46efa002c828e74b7029ea82e56e92
| 32,715
|
c
|
C
|
net/rras/netsh/if/repair.c
|
npocmaka/Windows-Server-2003
|
5c6fe3db626b63a384230a1aa6b92ac416b0765f
|
[
"Unlicense"
] | 17
|
2020-11-13T13:42:52.000Z
|
2021-09-16T09:13:13.000Z
|
net/rras/netsh/if/repair.c
|
sancho1952007/Windows-Server-2003
|
5c6fe3db626b63a384230a1aa6b92ac416b0765f
|
[
"Unlicense"
] | 2
|
2020-10-19T08:02:06.000Z
|
2020-10-19T08:23:18.000Z
|
net/rras/netsh/if/repair.c
|
sancho1952007/Windows-Server-2003
|
5c6fe3db626b63a384230a1aa6b92ac416b0765f
|
[
"Unlicense"
] | 14
|
2020-11-14T09:43:20.000Z
|
2021-08-28T08:59:57.000Z
|
#include "precomp.h"
EXTERN_C
VOID
WINAPI
NetCfgDiagRepairRegistryBindings (
IN FILE* pLogFile);
#define REG_DELETE 100
CONST CHAR Empty[] = "";
typedef union _TR_VALUE_DATA {
ULONG_PTR __asignany;
ULONG Value;
CONST BYTE* Pointer;
} TR_VALUE_DATA;
typedef
VOID
(TR_CONDITIONAL_ROUTINE)(
IN CONST struct _TR_REPAIR_CONTEXT *Ctx,
IN CONST struct _TR_KEY_DESCRIPTOR *Kd,
IN CONST struct _TR_VALUE_DESCRIPTOR *Vd,
OUT DWORD *RegType,
OUT TR_VALUE_DATA *Data,
OUT DWORD *DataSize
);
typedef TR_CONDITIONAL_ROUTINE *PTR_CONDITIONAL_ROUTINE;
typedef struct _TR_VALUE_DESCRIPTOR {
PCSTR SubKeyName;
PCSTR ValueName;
DWORD RegType;
TR_VALUE_DATA Data;
DWORD DataSize;
//
// If Conditional is TRUE, then Data and DataSize are obtained at run-time
// by invoking the routine whose address is in ConditionalRoutine.
// ConditionalData may be used to hold arbitrary information for use by
// ConditionalRoutine.
//
BOOLEAN Conditional;
PTR_CONDITIONAL_ROUTINE ConditionalRoutine;
TR_VALUE_DATA ConditionalData;
} TR_VALUE_DESCRIPTOR;
#define TRV_DW(_subkey, _valuename, _data) \
{ _subkey, _valuename, REG_DWORD, (ULONG_PTR)_data, sizeof(DWORD) },
#define TRV_ESZ(_subkey, _valuename, _esz) \
{ _subkey, _valuename, REG_EXPAND_SZ, (ULONG_PTR)_esz, sizeof(_esz) },
#define TRV_MSZ(_subkey, _valuename, _msz) \
{ _subkey, _valuename, REG_MULTI_SZ, (ULONG_PTR)_msz, sizeof(_msz) },
#define TRV_SZ(_subkey, _valuename, _sz) \
{ _subkey, _valuename, REG_SZ, (ULONG_PTR)_sz, sizeof(_sz) },
#define TRV_DEL(_subkey, _valuename) \
{ _subkey, _valuename, REG_DELETE, 0, 0 },
#define TRV_COND(_subkey, _valuename, _routine, _cdata) \
{ _subkey, _valuename, REG_NONE, 0, 0, TRUE, _routine, _cdata },
#define TRV_END() \
{ NULL, NULL, REG_NONE, 0, 0 }
typedef struct _TR_KEY_DESCRIPTOR {
//
// RootKey is one of the HKEY_* values. (e.g. HKEY_LOCAL_MACHINE)
//
HKEY RootKey;
//
// ParentKey is the name of a subkey (under RootKey) where either the
// values reside or subkeys are to be enumerated and values found under
// each subkey.
//
PCSTR ParentKeyName;
//
// TRUE if all subkeys of Parentkey are to be enumerated and values
// found under each of those subkeys.
//
BOOL EnumKey;
//
// Pointer to an array of value descriptors. The array is terminated
// with an entry of all zeros.
//
CONST TR_VALUE_DESCRIPTOR *Value;
} TR_KEY_DESCRIPTOR;
#define DHCP_OPT_TCPIP(_name) \
"SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters\\"_name"\0"
#define DHCP_OPT_TCPIP_INTERFACE(_name) \
"SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters\\Interfaces\\?\\"_name"\0"
#define DHCP_OPT_LEGACY_TCPIP_INTERFACE(_name) \
"SYSTEM\\CurrentControlSet\\Services\\?\\Parameters\\Tcpip\\"_name"\0"
#define DHCP_OPT_NETBT(_name) \
"SYSTEM\\CurrentControlSet\\Services\\NetBT\\Parameters\\"_name
#define DHCP_OPT_NETBT_INTERFACE(_name) \
"SYSTEM\\CurrentControlSet\\Services\\NetBT\\Parameters\\Interfaces\\Tcpip_?\\"_name"\0"
#define DHCP_OPT_NETBT_ADAPTER(_name) \
"SYSTEM\\CurrentControlSet\\Services\\NetBT\\Adapters\\?\\"_name"\0"
CONST TR_VALUE_DESCRIPTOR DhcpParameterOptions_Values [] =
{
TRV_DW ("1", "KeyType", 7)
TRV_MSZ("1", "RegLocation", DHCP_OPT_TCPIP_INTERFACE ("DhcpSubnetMaskOpt")
DHCP_OPT_LEGACY_TCPIP_INTERFACE("DhcpSubnetMaskOpt"))
TRV_DW ("3", "KeyType", 7)
TRV_MSZ("3", "RegLocation", DHCP_OPT_TCPIP_INTERFACE ("DhcpDefaultGateway")
DHCP_OPT_LEGACY_TCPIP_INTERFACE("DhcpDefaultGateway"))
TRV_DW ("6", "KeyType", 1)
TRV_MSZ("6", "RegLocation", DHCP_OPT_TCPIP_INTERFACE("DhcpNameServer")
DHCP_OPT_TCPIP ("DhcpNameServer"))
TRV_DW ("15", "KeyType", 1)
TRV_MSZ("15", "RegLocation", DHCP_OPT_TCPIP_INTERFACE("DhcpDomain")
DHCP_OPT_TCPIP ("DhcpDomain"))
TRV_DW ("44", "KeyType", 1)
TRV_MSZ("44", "RegLocation", DHCP_OPT_NETBT_INTERFACE("DhcpNameServerList")
DHCP_OPT_NETBT_ADAPTER ("DhcpNameServer"))
TRV_DW ("46", "KeyType", 4)
TRV_SZ ("46", "RegLocation", DHCP_OPT_NETBT("DhcpNodeType"))
TRV_DW ("47", "KeyType", 1)
TRV_SZ ("47", "RegLocation", DHCP_OPT_NETBT("DhcpScopeID"))
TRV_DW ("DhcpNetbiosOptions", "KeyType", 4)
TRV_DW ("DhcpNetbiosOptions", "OptionId", 1)
TRV_DW ("DhcpNetbiosOptions", "VendorType", 1)
TRV_MSZ("DhcpNetbiosOptions", "RegLocation", DHCP_OPT_NETBT_INTERFACE("DhcpNetbiosOptions"))
TRV_END()
};
CONST TR_KEY_DESCRIPTOR DhcpParameterOptions =
{
HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\Dhcp\\Parameters\\Options",
FALSE,
DhcpParameterOptions_Values
};
CONST TR_VALUE_DESCRIPTOR DhcpParameter_Values [] =
{
TRV_ESZ(NULL, "ServiceDll", "%SystemRoot%\\System32\\dhcpcsvc.dll")
TRV_END()
};
CONST TR_KEY_DESCRIPTOR DhcpParameters =
{
HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\Dhcp\\Parameters",
FALSE,
DhcpParameter_Values
};
CONST TR_VALUE_DESCRIPTOR DnscacheParameter_Values [] =
{
TRV_ESZ(NULL, "ServiceDll", "%SystemRoot%\\System32\\dnsrslvr.dll")
TRV_DEL(NULL, "AdapterTimeoutCacheTime")
TRV_DEL(NULL, "CacheHashTableBucketSize")
TRV_DEL(NULL, "CacheHashTableSize")
TRV_DEL(NULL, "DefaultRegistrationRefreshInterval")
TRV_DEL(NULL, "MaxCacheEntryTtlLimit")
TRV_DEL(NULL, "MaxSoaCacheEntryTtlLimit")
TRV_DEL(NULL, "NegativeCacheTime")
TRV_DEL(NULL, "NegativeSoaCacheTime")
TRV_DEL(NULL, "NetFailureCacheTime")
TRV_DEL(NULL, "NetFailureErrorPopupLimit")
TRV_END()
};
CONST TR_KEY_DESCRIPTOR DnscacheParameters =
{
HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\Dnscache\\Parameters",
FALSE,
DnscacheParameter_Values
};
CONST TR_VALUE_DESCRIPTOR LmHostsParameter_Values [] =
{
TRV_ESZ(NULL, "ServiceDll", "%SystemRoot%\\System32\\lmhsvc.dll")
TRV_END()
};
CONST TR_KEY_DESCRIPTOR LmHostsParameters =
{
HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\LmHosts\\Parameters",
FALSE,
LmHostsParameter_Values
};
CONST TR_VALUE_DESCRIPTOR NetbtInterface_Values [] =
{
TRV_DEL(NULL, "EnableAdapterDomainNameRegistration")
TRV_MSZ(NULL, "NameServerList", "")
TRV_DW (NULL, "NetbiosOptions", 0)
TRV_END()
};
CONST TR_KEY_DESCRIPTOR NetbtInterfaces =
{
HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\Netbt\\Parameters\\Interfaces",
TRUE,
NetbtInterface_Values
};
CONST TR_VALUE_DESCRIPTOR NetbtParameter_Values [] =
{
TRV_DEL(NULL, "BacklogIncrement")
TRV_DW (NULL, "BcastNameQueryCount", 3)
TRV_DW (NULL, "BcastQueryTimeout", 750)
TRV_DEL(NULL, "BroadcastAddress")
TRV_DEL(NULL, "CachePerAdapterEnabled")
TRV_DW (NULL, "CacheTimeout", 600000)
TRV_DEL(NULL, "ConnectOnRequestedInterfaceOnly")
TRV_DEL(NULL, "EnableDns")
TRV_DEL(NULL, "EnableLmhosts")
TRV_DEL(NULL, "EnableProxy")
TRV_DEL(NULL, "EnableProxyRegCheck")
TRV_DEL(NULL, "InitialRefreshT.O.")
TRV_DEL(NULL, "LmhostsTimeout")
TRV_DEL(NULL, "MaxConnBackLog")
TRV_DEL(NULL, "MaxDgramBuffering")
TRV_DEL(NULL, "MaxPreloadEntries")
TRV_DEL(NULL, "MinimumFreeLowerConnections")
TRV_DEL(NULL, "MinimumRefreshSleepTime")
TRV_DW (NULL, "NameServerPort", 137)
TRV_DW (NULL, "NameSrvQueryCount", 3)
TRV_DW (NULL, "NameSrvQueryTimeout", 1500)
TRV_SZ (NULL, "NbProvider", "_tcp")
TRV_DEL(NULL, "NodeType")
TRV_DEL(NULL, "NoNameReleaseOnDemand")
TRV_DEL(NULL, "RandomAdapter")
TRV_DEL(NULL, "RefreshOpCode")
TRV_DEL(NULL, "ScopeId")
TRV_DW (NULL, "SessionKeepAlive", 3600000)
TRV_DEL(NULL, "SingleResponse")
TRV_DW (NULL, "Size/Small/Medium/Large", 1)
TRV_DEL(NULL, "SmbDeviceEnabled")
TRV_SZ (NULL, "TransportBindName", "\\Device\\")
TRV_DEL(NULL, "TryAllIpAddrs")
TRV_DEL(NULL, "TryAllNameServers")
TRV_DEL(NULL, "UseDnsOnlyForNameResolutions")
TRV_DEL(NULL, "WinsDownTimeout")
TRV_END()
};
CONST TR_KEY_DESCRIPTOR NetbtParameters =
{
HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\Netbt\\Parameters",
FALSE,
NetbtParameter_Values
};
CONST TR_VALUE_DESCRIPTOR NlaParameter_Values [] =
{
TRV_ESZ(NULL, "ServiceDll", "%SystemRoot%\\System32\\mswsock.dll")
TRV_END()
};
CONST TR_KEY_DESCRIPTOR NlaParameters =
{
HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\Nla\\Parameters",
FALSE,
NlaParameter_Values
};
typedef enum {
TrAddressTypeTcpipValue,
TrDefaultGatewayMetricTcpipValue,
TrDisableDynamicUpdateTcpipValue,
TrDontAddDefaultGatewayTcpipValue,
TrEnableDhcpTcpipValue,
TrNameServerTcpipValue,
TrRawIpAllowedProtocolsTcpipValue,
TrTcpAllowedPortsTcpipValue,
TrUdpAllowedPortsTcpipValue,
TrEnableDeadGwDetectTcpipValue,
} TR_TCPIP_VALUE;
TR_CONDITIONAL_ROUTINE TrTcpipWanConditionalRoutine;
TR_CONDITIONAL_ROUTINE TrTcpipRrasConditionalRoutine;
CONST TR_VALUE_DESCRIPTOR TcpipInterface_Values [] =
{
TRV_COND(NULL, "AddressType",
TrTcpipWanConditionalRoutine,
TrAddressTypeTcpipValue)
TRV_MSZ (NULL, "DefaultGateway", "")
TRV_COND(NULL, "DefaultGatewayMetric",
TrTcpipWanConditionalRoutine,
TrDefaultGatewayMetricTcpipValue)
TRV_COND(NULL, "DisableDynamicUpdate",
TrTcpipWanConditionalRoutine,
TrDisableDynamicUpdateTcpipValue)
TRV_DEL (NULL, "DisableReverseAddressRegistrations")
TRV_COND(NULL, "DontAddDefaultGateway",
TrTcpipRrasConditionalRoutine,
TrDontAddDefaultGatewayTcpipValue)
TRV_COND(NULL, "EnableDhcp",
TrTcpipWanConditionalRoutine,
TrEnableDhcpTcpipValue)
TRV_MSZ (NULL, "IpAddress", "0.0.0.0\0")
TRV_DEL (NULL, "IpAutoconfigurationAddress")
TRV_DEL (NULL, "IpAutoconfigurationEnabled")
TRV_DEL (NULL, "IpAutoconfigurationMask")
TRV_DEL (NULL, "IpAutoconfigurationSeed")
TRV_DEL (NULL, "IpAutoconfigurationSubnet")
TRV_DEL (NULL, "MaxForwardPending")
TRV_DEL (NULL, "Mtu")
TRV_COND(NULL, "NameServer",
TrTcpipWanConditionalRoutine,
TrNameServerTcpipValue)
TRV_DEL (NULL, "PerformRouterDiscovery")
TRV_DEL (NULL, "PerformRouterDiscoveryBackup")
TRV_DEL (NULL, "PptpFiltering")
TRV_COND(NULL, "RawIpAllowedProtocols",
TrTcpipWanConditionalRoutine,
TrRawIpAllowedProtocolsTcpipValue)
TRV_DEL (NULL, "SolicitationAddressBcast")
TRV_MSZ (NULL, "SubnetMask", "0.0.0.0\0")
TRV_COND(NULL, "TcpAllowedPorts",
TrTcpipWanConditionalRoutine,
TrTcpAllowedPortsTcpipValue)
TRV_DEL (NULL, "TcpDelAckTicks")
TRV_DEL (NULL, "TcpInitialRtt")
TRV_DEL (NULL, "TcpWindowSize")
TRV_DEL (NULL, "TypeOfInterface")
TRV_COND(NULL, "UdpAllowedPorts",
TrTcpipWanConditionalRoutine,
TrUdpAllowedPortsTcpipValue)
TRV_DW (NULL, "UseZeroBroadcast", 0)
TRV_END ()
};
CONST TR_KEY_DESCRIPTOR TcpipInterfaces =
{
HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters\\Interfaces",
TRUE,
TcpipInterface_Values
};
CONST TR_VALUE_DESCRIPTOR TcpipParameter_Values [] =
{
TRV_DEL (NULL, "AllowUnqualifiedQuery")
TRV_DEL (NULL, "AllowUserRawAccess")
TRV_DEL (NULL, "ArpAlwaysSourceRoute")
TRV_DEL (NULL, "ArpCacheLife")
TRV_DEL (NULL, "ArpCacheMinReferencedLife")
TRV_DEL (NULL, "ArpRetryCount")
TRV_DEL (NULL, "ArpTrSingleRoute")
TRV_DEL (NULL, "ArpUseEtherSnap")
TRV_ESZ (NULL, "DatabasePath", "%SystemRoot%\\System32\\drivers\\etc")
TRV_DEL (NULL, "DefaultRegistrationTtl")
TRV_DEL (NULL, "DefaultTosValue")
TRV_DEL (NULL, "DefaultTtl")
TRV_DEL (NULL, "DisableDhcpMediaSense")
TRV_DEL (NULL, "DisableDynamicUpdate")
TRV_DEL (NULL, "DisableIpSourceRouting")
TRV_DEL (NULL, "DisableMediaSenseEventLog")
TRV_DEL (NULL, "DisableReplaceAddressesInConflicts")
TRV_DEL (NULL, "DisableTaskOffload")
TRV_DEL (NULL, "DisableUserTosSetting")
TRV_DEL (NULL, "DisjointNameSpace")
TRV_DEL (NULL, "DontAddDefaultGatewayDefault")
TRV_DEL (NULL, "DnsQueryTimeouts")
TRV_DEL (NULL, "EnableAddrMaskReply")
TRV_DEL (NULL, "EnableBcastArpReply")
TRV_COND(NULL, "EnableDeadGwDetect",
TrTcpipRrasConditionalRoutine,
TrEnableDeadGwDetectTcpipValue)
TRV_DEL (NULL, "EnableFastRouteLookup")
TRV_DEL (NULL, "EnableIcmpRedirect")
TRV_DEL (NULL, "EnableMulticastForwarding")
TRV_DEL (NULL, "EnablePmtuBhDetect")
TRV_DEL (NULL, "EnablePmtuDiscovery")
TRV_DEL (NULL, "EnableSecurityFilters")
TRV_DEL (NULL, "FfpControlFlags")
TRV_DEL (NULL, "FfpFastForwardingCacheSize")
TRV_DW (NULL, "ForwardBroadcasts", 0)
TRV_DEL (NULL, "ForwardBufferMemory")
TRV_DEL (NULL, "GlobalMaxTcpWindowSize")
TRV_DEL (NULL, "IgmpLevel")
TRV_DEL (NULL, "IpAutoconfigurationEnabled")
TRV_DEL (NULL, "IpAutoconfigurationMask")
TRV_DEL (NULL, "IpAutoconfigurationSeed")
TRV_DW (NULL, "IpEnableRouter", 0)
TRV_DEL (NULL, "IpEnableRouterBackup")
TRV_DEL (NULL, "KeepAliveInterval")
TRV_DEL (NULL, "KeepAliveTime")
TRV_SZ (NULL, "NameServer", "")
TRV_DEL (NULL, "MaxForwardBufferMemory")
TRV_DEL (NULL, "MaxFreeTWTcbs")
TRV_DEL (NULL, "MaxFreeTcbs")
TRV_DEL (NULL, "MaxHashTableSize")
TRV_DEL (NULL, "MaxNormLookupMemory")
TRV_DEL (NULL, "MaxNumForwardPackets")
TRV_DEL (NULL, "MaxUserPort")
TRV_DEL (NULL, "NumForwardPackets")
TRV_DEL (NULL, "NumTcbTablePartitions")
TRV_DEL (NULL, "PptpTcpMaxDataRetransmissions")
TRV_DEL (NULL, "PrioritizeRecordData")
TRV_DEL (NULL, "QueryIpMatching")
TRV_DEL (NULL, "SackOpts")
TRV_DEL (NULL, "SearchList")
TRV_DEL (NULL, "SynAttackProtect")
TRV_DEL (NULL, "Tcp1323Opts")
TRV_DEL (NULL, "TcpMaxConnectResponseRetransmissions")
TRV_DEL (NULL, "TcpMaxConnectRetransmissions")
TRV_DEL (NULL, "TcpMaxDataRetransmissions")
TRV_DEL (NULL, "TcpMaxDupAcks")
TRV_DEL (NULL, "TcpMaxHalfOpen")
TRV_DEL (NULL, "TcpMaxHalfOpenRetried")
TRV_DEL (NULL, "TcpMaxPortsExhausted")
TRV_DEL (NULL, "TcpMaxSendFree")
TRV_DEL (NULL, "TcpNumConnections")
TRV_DEL (NULL, "TcpTimedWaitDelay")
TRV_DEL (NULL, "TcpUseRfc1122UrgentPointer")
TRV_DEL (NULL, "TcpWindowSize")
TRV_DEL (NULL, "TrFunctionalMcastAddress")
TRV_DEL (NULL, "UpdateSecurityLevel")
TRV_DEL (NULL, "UseDomainNameDevolution")
TRV_DW ("Winsock", "UseDelayedAcceptance", 0)
TRV_END ()
};
CONST TR_KEY_DESCRIPTOR TcpipParameters =
{
HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters",
FALSE,
TcpipParameter_Values
};
CONST TR_VALUE_DESCRIPTOR TcpipPerformance_Values [] =
{
TRV_SZ (NULL, "Close", "CloseTcpIpPerformanceData")
TRV_SZ (NULL, "Collect", "CollectTcpIpPerformanceData")
TRV_SZ (NULL, "Library", "Perfctrs.dll")
TRV_SZ (NULL, "Open", "OpenTcpIpPerformanceData")
TRV_SZ (NULL, "Object List", "502 510 546 582 638 658")
TRV_END()
};
CONST TR_KEY_DESCRIPTOR TcpipPerformance =
{
HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Performance",
FALSE,
TcpipPerformance_Values
};
CONST TR_VALUE_DESCRIPTOR TcpipServiceProvider_Values [] =
{
TRV_DW (NULL, "Class", 8)
TRV_DW (NULL, "DnsPriority", 2000)
TRV_DW (NULL, "HostsPriority", 500)
TRV_DW (NULL, "LocalPriority", 499)
TRV_DW (NULL, "NetbtPriority", 2001)
TRV_SZ (NULL, "Name", "TCP/IP")
TRV_ESZ(NULL, "ProviderPath", "%SystemRoot%\\System32\\wsock32.dll")
TRV_END()
};
CONST TR_KEY_DESCRIPTOR TcpipServiceProvider =
{
HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\Tcpip\\ServiceProvider",
FALSE,
TcpipServiceProvider_Values
};
CONST TR_KEY_DESCRIPTOR* TrRepairSet [] =
{
&DhcpParameterOptions,
&DhcpParameters,
&DnscacheParameters,
&LmHostsParameters,
&NetbtInterfaces,
&NetbtParameters,
&NlaParameters,
&TcpipInterfaces,
&TcpipParameters,
&TcpipPerformance,
&TcpipServiceProvider,
NULL
};
#define SAM_DESIRED KEY_READ | KEY_WRITE | DELETE
typedef enum _TR_LOG_ACTION {
TR_ADDED,
TR_DELETED,
TR_RESET,
} TR_LOG_ACTION;
CONST PCSTR LogActionPrefix [] = {
"added ",
"deleted",
"reset ",
};
typedef struct _TR_REPAIR_CONTEXT {
HANDLE Heap;
FILE *LogFile;
PBYTE RegData;
ULONG RegDataSize;
CHAR EnumKeyName [MAX_PATH];
} TR_REPAIR_CONTEXT, *PTR_REPAIR_CTX;
BOOL
TrInitializeRepairContext(
IN PTR_REPAIR_CTX Ctx,
IN FILE *LogFile
)
{
ZeroMemory(Ctx, sizeof(TR_REPAIR_CONTEXT));
Ctx->Heap = GetProcessHeap();
Ctx->LogFile = LogFile;
Ctx->RegDataSize = 1024;
Ctx->RegData = HeapAlloc(Ctx->Heap, 0, Ctx->RegDataSize);
*Ctx->EnumKeyName = 0;
return (Ctx->RegData != NULL);
}
VOID
TrCleanupRepairContext(
IN PTR_REPAIR_CTX Ctx
)
{
if (Ctx->RegData != NULL) {
HeapFree(Ctx->Heap, 0, Ctx->RegData);
Ctx->RegData = NULL;
}
}
VOID
TrLogAction(
IN TR_LOG_ACTION Action,
IN PTR_REPAIR_CTX Ctx,
IN CONST TR_KEY_DESCRIPTOR *Kd,
IN CONST TR_VALUE_DESCRIPTOR *Vd,
IN DWORD RegType
)
{
fprintf(Ctx->LogFile, "%s %s\\",
LogActionPrefix[Action], Kd->ParentKeyName);
if (Vd->SubKeyName != NULL) {
fprintf(Ctx->LogFile, "%s\\", Vd->SubKeyName);
}
if (Kd->EnumKey) {
fprintf(Ctx->LogFile, "%s\\", Ctx->EnumKeyName);
}
fprintf(Ctx->LogFile, "%s\n", Vd->ValueName);
//
// Show the value we are replacing.
//
if (TR_RESET == Action) {
switch (RegType) {
case REG_DWORD:
fprintf(Ctx->LogFile, " old REG_DWORD = %d\n\n", *(PULONG)Ctx->RegData);
break;
case REG_EXPAND_SZ:
fprintf(Ctx->LogFile, " old REG_EXPAND_SZ = %s\n\n", (PCSTR)Ctx->RegData);
break;
case REG_MULTI_SZ:
{
PCSTR Msz = (PCSTR)Ctx->RegData;
fprintf(Ctx->LogFile, " old REG_MULTI_SZ =\n");
if (*Msz) {
while (*Msz) {
fprintf(Ctx->LogFile, " %s\n", Msz);
Msz += strlen(Msz) + 1;
}
} else {
fprintf(Ctx->LogFile, " <empty>\n");
}
fprintf(Ctx->LogFile, "\n");
break;
}
case REG_SZ:
fprintf(Ctx->LogFile, " old REG_SZ = %s\n\n", (PCSTR)Ctx->RegData);
break;
default:
break;
}
}
}
LONG
TrReadRegData(
IN PTR_REPAIR_CTX Ctx,
IN HKEY Key,
IN CONST TR_VALUE_DESCRIPTOR *Vd,
OUT PULONG ReturnedSize
)
{
LONG Error;
ULONG Type, Size;
*ReturnedSize = 0;
Size = Ctx->RegDataSize;
Error = RegQueryValueExA(Key, Vd->ValueName, NULL, &Type,
Ctx->RegData, &Size);
if (ERROR_MORE_DATA == Error) {
HeapFree(Ctx->Heap, 0, Ctx->RegData);
Ctx->RegDataSize = (Size + 63) & ~63;
Ctx->RegData = HeapAlloc(Ctx->Heap, 0, Ctx->RegDataSize);
if (Ctx->RegData != NULL) {
Size = Ctx->RegDataSize;
Error = RegQueryValueExA(Key, Vd->ValueName, NULL, &Type,
Ctx->RegData, &Size);
if (NOERROR != Error) {
fprintf(Ctx->LogFile,
" RegQueryValueEx still failed. error = %d\n",
Error);
} else {
*ReturnedSize = Size;
}
} else {
Error = ERROR_NOT_ENOUGH_MEMORY;
}
} else if (NOERROR == Error) {
*ReturnedSize = Size;
}
return Error;
}
VOID
TrSetRegData(
IN HKEY Key,
IN CONST TR_VALUE_DESCRIPTOR *Vd,
IN DWORD RegType,
IN TR_VALUE_DATA* Data,
IN DWORD DataSize
)
{
RegSetValueExA(Key, Vd->ValueName, 0, RegType,
(REG_DWORD == RegType) ? (CONST BYTE*)&Data->Value
: Data->Pointer,
DataSize);
}
VOID
TrProcessOpenKey(
IN PTR_REPAIR_CTX Ctx,
IN HKEY ParentKey,
IN CONST TR_KEY_DESCRIPTOR *Kd
)
{
LONG Error = NOERROR;
ULONG i, Size;
CONST TR_VALUE_DESCRIPTOR *Vd, *PrevVd;
HKEY SubKey, UseKey;
DWORD RegType;
TR_VALUE_DATA Data;
DWORD DataSize;
PrevVd = NULL;
SubKey = INVALID_HANDLE_VALUE;
for (i = 0; Kd->Value[i].ValueName != NULL; i++) {
Vd = &Kd->Value[i];
if (Vd->SubKeyName == NULL) {
UseKey = ParentKey;
Error = NOERROR;
}
//
// Open a subkey if needed, and only if its not the same as
// the one already open.
//
else if (((PrevVd == NULL) || (Vd->SubKeyName != PrevVd->SubKeyName))) {
if (SubKey != INVALID_HANDLE_VALUE) {
RegCloseKey(SubKey);
}
Error = RegOpenKeyExA(ParentKey, Vd->SubKeyName, 0,
SAM_DESIRED, &SubKey);
if (NOERROR == Error) {
UseKey = SubKey;
} else {
SubKey = INVALID_HANDLE_VALUE;
}
}
if (NOERROR == Error) {
Error = TrReadRegData(Ctx, UseKey, Vd, &Size);
}
//
// If the key is handled specially, consult its conditional-routine
// to obtain the settings to be used below. From here onwards,
// all processing for this value must use the local variables
// 'RegType', 'Data', and 'DataSize' rather than the corresponding
// fields of 'Vd'.
//
// (Also see 'TrSetRegData' and 'TrLogAction'.)
//
if (Vd->Conditional) {
Vd->ConditionalRoutine(Ctx, Kd, Vd, &RegType, &Data, &DataSize);
} else {
RegType = Vd->RegType;
Data = Vd->Data;
DataSize = Vd->DataSize;
}
if (ERROR_FILE_NOT_FOUND == Error) {
if (REG_DELETE != RegType) {
//
// The value should exist, so set its default value.
//
TrSetRegData(UseKey, Vd, RegType, &Data, DataSize);
TrLogAction(TR_ADDED, Ctx, Kd, Vd, RegType);
}
} else if (NOERROR == Error) {
//
// The value exists and we read its data.
//
if (REG_DELETE == RegType) {
//
// Need to delete the existing value.
//
RegDeleteValueA(UseKey, Vd->ValueName);
TrLogAction(TR_DELETED, Ctx, Kd, Vd, RegType);
} else {
BOOL MisCompare = TRUE;
//
// Compare the value we read with the default value and reset
// it if it is different.
//
if (Size == DataSize) {
if (REG_DWORD == RegType) {
MisCompare = (*(PULONG)Ctx->RegData != Data.Value);
} else {
MisCompare = memcmp(Ctx->RegData, Data.Pointer,
Size);
}
}
if (MisCompare) {
TrSetRegData(UseKey, Vd, RegType, &Data, DataSize);
TrLogAction(TR_RESET, Ctx, Kd, Vd, RegType);
}
}
} else {
fprintf(Ctx->LogFile, "\nerror reading registry value (%s) (%d)\n", Vd->ValueName, Error);
}
PrevVd = Vd;
}
if (SubKey != INVALID_HANDLE_VALUE) {
RegCloseKey(SubKey);
}
}
VOID
TrProcessKey(
IN PTR_REPAIR_CTX Ctx,
IN CONST TR_KEY_DESCRIPTOR *Kd
)
{
LONG Error;
HKEY ParentKey;
Error = RegOpenKeyExA(Kd->RootKey, Kd->ParentKeyName, 0,
SAM_DESIRED, &ParentKey);
if (NOERROR == Error) {
if (Kd->EnumKey) {
ULONG i;
ULONG EnumKeyNameLen;
FILETIME LastWriteTime;
HKEY SubKey;
for (i = 0; NOERROR == Error; i++) {
EnumKeyNameLen = sizeof(Ctx->EnumKeyName);
Error = RegEnumKeyExA(ParentKey, i, Ctx->EnumKeyName, &EnumKeyNameLen,
NULL, NULL, NULL, &LastWriteTime);
if (NOERROR != Error) {
if (ERROR_NO_MORE_ITEMS != Error) {
fprintf(Ctx->LogFile, "enum error = %d (index = %d)\n",
Error, i);
}
break;
}
Error = RegOpenKeyExA(ParentKey, Ctx->EnumKeyName, 0,
SAM_DESIRED, &SubKey);
if (NOERROR == Error) {
TrProcessOpenKey(Ctx, SubKey, Kd);
RegCloseKey(SubKey);
}
}
} else {
TrProcessOpenKey(Ctx, ParentKey, Kd);
}
RegCloseKey(ParentKey);
}
}
VOID
TrProcessSet(
IN PTR_REPAIR_CTX Ctx,
IN CONST TR_KEY_DESCRIPTOR *Set[]
)
{
ULONG i;
//
// Process each TR_KEY_DESCRIPTOR element in the set.
//
for (i = 0; Set[i] != NULL; i++) {
TrProcessKey(Ctx, Set[i]);
}
}
DWORD
TrRepair(
FILE* LogFile
)
{
TR_REPAIR_CONTEXT Ctx;
if (TrInitializeRepairContext(&Ctx, LogFile)) {
TrProcessSet(&Ctx, TrRepairSet);
NetCfgDiagRepairRegistryBindings(LogFile);
TrCleanupRepairContext(&Ctx);
}
return NOERROR;
}
BOOLEAN
IsRrasInstalled(
IN CONST struct _TR_REPAIR_CONTEXT *Ctx,
IN CONST struct _TR_KEY_DESCRIPTOR *Kd
)
{
ULONG Error;
BOOLEAN RrasInstalled;
HKEY RrasKey;
CONST CHAR RrasKeyName[] =
"SYSTEM\\CurrentControlSet\\Services\\RemoteAccess";
Error = RegOpenKeyExA(Kd->RootKey, RrasKeyName, 0, SAM_DESIRED, &RrasKey);
if (NOERROR != Error) {
RrasInstalled = FALSE;
} else {
DWORD ConfigurationFlags;
DWORD Size;
DWORD Type;
Size = sizeof(DWORD);
Error = RegQueryValueExA(RrasKey, "ConfigurationFlags", NULL, &Type,
(LPBYTE)&ConfigurationFlags, &Size);
if (NOERROR != Error) {
RrasInstalled = FALSE;
} else {
RrasInstalled = (ConfigurationFlags != 0);
}
RegCloseKey(RrasKey);
}
return RrasInstalled;
}
BOOLEAN
IsWanInterface(
IN CONST struct _TR_REPAIR_CONTEXT *Ctx,
IN CONST struct _TR_KEY_DESCRIPTOR *Kd
)
{
HKEY AdaptersKey;
CONST CHAR AdaptersKeyName[] =
"SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters\\Adapters";
LONG Error;
BOOLEAN IsWan;
HKEY Key;
//
// Open the TCP/IP adapters key.
// If successful, look for a subkey with the same name
// as the one in 'Kd'. If present, this is not a LAN interface.
//
Error = RegOpenKeyExA(Kd->RootKey, AdaptersKeyName, 0, SAM_DESIRED,
&AdaptersKey);
if (NOERROR != Error) {
//
// Assume this is a LAN interface.
//
IsWan = FALSE;
} else {
Error = RegOpenKeyExA(AdaptersKey, Ctx->EnumKeyName, 0, SAM_DESIRED,
&Key);
if (NOERROR != Error) {
IsWan = TRUE;
} else {
IsWan = FALSE;
RegCloseKey(Key);
}
RegCloseKey(AdaptersKey);
}
return IsWan;
}
VOID
TrTcpipWanConditionalRoutine(
IN CONST struct _TR_REPAIR_CONTEXT *Ctx,
IN CONST struct _TR_KEY_DESCRIPTOR *Kd,
IN CONST struct _TR_VALUE_DESCRIPTOR *Vd,
OUT DWORD *RegType,
OUT TR_VALUE_DATA *Data,
OUT DWORD *DataSize
)
{
//
// Return the appropriate setting for the given registry value,
// based on whether its key is for a WAN or LAN interface.
//
if (IsWanInterface(Ctx, Kd)) {
switch((TR_TCPIP_VALUE)Vd->ConditionalData.Value) {
case TrAddressTypeTcpipValue:
case TrDefaultGatewayMetricTcpipValue:
case TrDisableDynamicUpdateTcpipValue:
case TrNameServerTcpipValue:
case TrRawIpAllowedProtocolsTcpipValue:
case TrTcpAllowedPortsTcpipValue:
case TrUdpAllowedPortsTcpipValue:
default:
*RegType = REG_DELETE;
break;
case TrEnableDhcpTcpipValue:
case TrDontAddDefaultGatewayTcpipValue:
*RegType = REG_DWORD;
Data->Value = 0;
*DataSize = sizeof(DWORD);
break;
}
} else {
switch((TR_TCPIP_VALUE)Vd->ConditionalData.Value) {
case TrDontAddDefaultGatewayTcpipValue:
default:
*RegType = REG_DELETE;
break;
case TrAddressTypeTcpipValue:
case TrDisableDynamicUpdateTcpipValue:
*RegType = REG_DWORD;
Data->Value = 0;
*DataSize = sizeof(DWORD);
break;
case TrEnableDhcpTcpipValue:
*RegType = REG_DWORD;
Data->Value = 1;
*DataSize = sizeof(DWORD);
break;
case TrNameServerTcpipValue:
*RegType = REG_SZ;
Data->Pointer = Empty;
*DataSize = sizeof(Empty);
break;
case TrDefaultGatewayMetricTcpipValue:
case TrRawIpAllowedProtocolsTcpipValue:
case TrTcpAllowedPortsTcpipValue:
case TrUdpAllowedPortsTcpipValue:
*RegType = REG_MULTI_SZ;
Data->Pointer = Empty;
*DataSize = sizeof(Empty);
break;
}
}
}
VOID
TrTcpipRrasConditionalRoutine(
IN CONST struct _TR_REPAIR_CONTEXT *Ctx,
IN CONST struct _TR_KEY_DESCRIPTOR *Kd,
IN CONST struct _TR_VALUE_DESCRIPTOR *Vd,
OUT DWORD *RegType,
OUT TR_VALUE_DATA *Data,
OUT DWORD *DataSize
)
{
//
// Return the appropriate setting for the given registry value,
// based on whether RRAS is installed.
//
// N.B. The setting for 'DontAddDefaultGateway' is further dependent
// on whether the key is for a WAN or LAN interface.
//
if (IsRrasInstalled(Ctx, Kd)) {
switch((TR_TCPIP_VALUE)Vd->ConditionalData.Value) {
case TrDontAddDefaultGatewayTcpipValue:
if (IsWanInterface(Ctx, Kd)) {
*RegType = REG_DWORD;
Data->Value = 1;
*DataSize = sizeof(DWORD);
} else {
*RegType = REG_DELETE;
}
break;
case TrEnableDeadGwDetectTcpipValue:
*RegType = REG_DWORD;
Data->Value = 0;
*DataSize = sizeof(DWORD);
break;
default:
*RegType = REG_DELETE;
break;
}
} else {
switch((TR_TCPIP_VALUE)Vd->ConditionalData.Value) {
case TrDontAddDefaultGatewayTcpipValue:
case TrEnableDeadGwDetectTcpipValue:
default:
*RegType = REG_DELETE;
break;
}
}
}
| 30.404275
| 103
| 0.597035
|
[
"object"
] |
3ae8d668e968bcd8524ef1b5b2fdb523133c8647
| 12,624
|
h
|
C
|
src/utests/include/utests/baselib/UtfMain.h
|
sturosier/swblocks-baselib
|
3f6ad1f34bbdef5824ab7c91344eec5b876fa718
|
[
"Apache-2.0"
] | 30
|
2017-01-27T00:12:43.000Z
|
2021-04-02T23:33:49.000Z
|
src/utests/include/utests/baselib/UtfMain.h
|
sha-shrestha/swblocks-baselib
|
f5d38ace39e386b233fa96239ba16cdedac765aa
|
[
"Apache-2.0"
] | 33
|
2017-04-20T15:42:53.000Z
|
2021-01-24T14:04:07.000Z
|
src/utests/include/utests/baselib/UtfMain.h
|
sha-shrestha/swblocks-baselib
|
f5d38ace39e386b233fa96239ba16cdedac765aa
|
[
"Apache-2.0"
] | 29
|
2017-01-27T17:57:39.000Z
|
2021-09-09T01:06:35.000Z
|
/*
* This file is part of the swblocks-baselib library.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if !defined(UTF_TEST_MODULE)
#error UTF_TEST_MODULE macro should be defined before this header is included; it should also be included only once
#endif
#include <baselib/crypto/TrustedRoots.h>
#include <baselib/http/SimpleHttpTask.h>
#include <baselib/tasks/AsioSslStreamWrapper.h>
#include <baselib/core/AppInitDone.h>
#include <baselib/core/ObjModel.h>
#include <baselib/core/ObjModelDefs.h>
#include <baselib/core/MessageBuffer.h>
#include <baselib/core/Logging.h>
#include <baselib/core/ThreadPool.h>
#include <baselib/core/ThreadPoolImpl.h>
#include <baselib/core/OS.h>
#include <baselib/core/BaseIncludes.h>
#include <string>
#include <sstream>
#include <utests/baselib/UtfArgsParser.h>
#include <utests/baselib/UtfCrypto.h>
#include <utests/baselib/Utf.h>
#include <baselib/core/detail/BoostIncludeGuardPush.h>
#include <boost/test/unit_test_monitor.hpp>
#include <baselib/core/detail/BoostIncludeGuardPop.h>
/**
* @brief The logging config
*/
template
<
typename E = void
>
class DefaultUtfConfigT
{
private:
bl::cpp::SafeUniquePtr< bl::AppInitDoneDefault > m_appInit;
bl::Logging::LineLoggerPusher m_pushLineLogger;
bl::fs::path m_file1;
bl::fs::path m_file2;
bl::fs::path m_resultFile;
static bl::Logging::line_logger_t g_defaultLogger;
static std::string getUmdhPath()
{
return "\"C:\\dev\\3rd\\PF\\Windows Kits\\8.0\\Debuggers\\x86\\umdh.exe\"";
}
static std::string getUmdhPathAdmin()
{
return "\"C:\\Program Files (x86)\\Windows Kits\\8.0\\Debuggers\\x86\\umdh.exe\"";
}
static std::string getUmdhPathAdminNoQuotes()
{
return "C:\\Program Files (x86)\\Windows Kits\\8.0\\Debuggers\\x86\\umdh.exe";
}
static void utfLineLogger(
SAA_in const std::string& prefix,
SAA_in const std::string& text,
SAA_in const bool enableTimestamp,
SAA_in const bl::Logging::Level level
)
{
/*
* There were some API changes made in Boost.Test framework in 1.59 which we need
* to accommodate - e.g. is_initialized() API was replaced with test_in_progress()
*/
#if BOOST_VERSION < 105900
if( boost::unit_test::framework::is_initialized() )
#else
if( boost::unit_test::framework::test_in_progress() )
#endif
{
bl::cpp::SafeOutputStringStream os;
bl::Logging::defaultLineLoggerNoLock(
prefix,
text,
true /* enableTimestamp */,
level,
false /* addNewLine */,
os
);
os.flush();
const auto msg = os.str();
BL_STDIO_TEXT(
{
switch( level )
{
case bl::Logging::LL_ERROR:
UTF_ERROR_MESSAGE( msg );
break;
case bl::Logging::LL_WARNING:
UTF_ERROR_MESSAGE( msg );
break;
default:
UTF_MESSAGE( msg );
break;
}
}
);
}
else
{
/*
* The UTF isn't initialized yet; we can only use the default logger
*/
g_defaultLogger( prefix, text, enableTimestamp, level );
}
}
static void translateException( const std::exception& ex )
{
UTF_ERROR_MESSAGE( "Unhandled exception was caught:\n" + bl::eh::diagnostic_information( ex ) );
}
public:
typedef DefaultUtfConfigT< E > this_type;
DefaultUtfConfigT()
:
m_pushLineLogger( &this_type::utfLineLogger )
{
/*
* The default abstract priority for unit tests should be 'Normal'
*/
bl::os::setAbstractPriorityDefault( bl::os::AbstractPriority::Normal );
bl::Logging::setLevel( bl::Logging::LL_DEBUG, true /* default global logging level */ );
using namespace bl;
auto& monitor = boost::unit_test::unit_test_monitor;
monitor.register_exception_translator< std::exception >( &this_type::translateException );
const auto& mts = boost::unit_test::framework::master_test_suite();
BL_ASSERT( mts.argc >= 0 );
/*
* The UTF_TEST_APP_INIT_UTF_ARGS_PARSER macro can be used to override the UTF argument parser
* class in some unit tests which have non-generic (custom / specialized) command line arguments
*/
#ifndef UTF_TEST_APP_INIT_UTF_ARGS_PARSER
#define UTF_TEST_APP_INIT_UTF_ARGS_PARSER test::UtfArgsParser
#endif // UTF_TEST_APP_INIT_UTF_ARGS_PARSER
UTF_TEST_APP_INIT_UTF_ARGS_PARSER::init( ( std::size_t )( mts.argc ), ( const char* const* )( mts.argv ) );
const auto timeoutInSeconds = ( long )( test::UtfArgsParser::timeoutInSeconds() );
if( timeoutInSeconds )
{
bl::http::Parameters::timeoutInSecondsGet( timeoutInSeconds );
bl::http::Parameters::timeoutInSecondsOther( timeoutInSeconds );
}
bl::Logging::setLevel( ( int ) test::UtfArgsParser::loggingLevel(), true /* default global logging level */ );
m_appInit.reset(
new bl::AppInitDoneDefault(
( int ) test::UtfArgsParser::loggingLevel() /* default global logging level */,
test::UtfArgsParser::threadsCount() /* the default # of threads in the TP */,
nullptr /* sharedThreadPool */,
nullptr /* sharedNonBlockingThreadPool */
#ifdef UTF_TEST_APP_INIT_DEACTIVATE_THREAD_POOLS
, UTF_TEST_APP_INIT_DEACTIVATE_THREAD_POOLS
#endif
)
);
/*
* Make sure we initialize OpenSSL and add our DEV root certificate to
* the list of the global trusted roots
*/
bl::crypto::registerTrustedRoot( test::UtfCrypto::getDevRootCA() /* certificatePemText */ );
if( test::UtfArgsParser::isNoRfc2818Verify() )
{
bl::tasks::AsioSslStreamWrapper::g_rfc2818VerifyCallback =
bl::tasks::AsioSslStreamWrapper::rfc2818NoVerifyCallback();
}
#ifdef UTF_TEST_APP_INIT_PHASE1_INIT
UTF_TEST_APP_INIT_PHASE1_INIT
#endif // UTF_TEST_APP_INIT_PHASE1_INIT
bl::crypto::CryptoBase::init();
BL_CHK( false, 0L == om::outstandingObjectRefs(), BL_MSG() << "Objects leaked!" );
if( os::onWindows() && test::UtfArgsParser::isUmdhModeEnabled() )
{
const fs::path temp( os::getEnvironmentVariable( "TEMP" ) );
m_file1 = temp;
m_file1 /= fs::path( uuids::uuid2string( uuids::create() ) + ".umdh" );
m_file2 = temp;
m_file2 /= fs::path( uuids::uuid2string( uuids::create() ) + ".umdh" );
m_resultFile = temp;
m_resultFile /= fs::path( uuids::uuid2string( uuids::create() ) + ".umdh" );
const auto umdhPath = fs::exists( getUmdhPathAdminNoQuotes() ) ? getUmdhPathAdmin() : getUmdhPath();
bl::cpp::SafeOutputStringStream ss;
ss << umdhPath;
ss << " -p:";
ss << os::getPid();
ss << " -f:";
ss << m_file1.string();
const auto cmdLine = ss.str();
const auto processRef = os::createProcess( cmdLine );
BL_CHK(
false,
0 == os::tryAwaitTermination( processRef ),
BL_MSG()
<< "Failed to execute: "
<< cmdLine
);
}
}
~DefaultUtfConfigT() NOEXCEPT
{
long outstandingRefs = 0L;
try
{
using namespace bl;
if( m_appInit )
{
/*
* Dispose the global thread pools
*/
m_appInit -> dispose();
BL_ASSERT( ! bl::ThreadPoolDefault::getDefault( ThreadPoolId::GeneralPurpose ) );
BL_ASSERT( ! bl::ThreadPoolDefault::getDefault( ThreadPoolId::NonBlocking ) );
}
if( os::onWindows() && test::UtfArgsParser::isUmdhModeEnabled() )
{
const auto umdhPath = fs::exists( getUmdhPathAdminNoQuotes() ) ? getUmdhPathAdmin() : getUmdhPath();
{
bl::cpp::SafeOutputStringStream ss;
ss << umdhPath;
ss << " -p:";
ss << os::getPid();
ss << " -f:";
ss << m_file2.string();
const auto cmdLine = ss.str();
const auto processRef = os::createProcess( cmdLine );
BL_CHK(
false,
0 == os::tryAwaitTermination( processRef ),
BL_MSG()
<< "Failed to execute: "
<< cmdLine
);
}
{
bl::cpp::SafeOutputStringStream ss;
ss << umdhPath;
ss << " ";
ss << m_file1.string();
ss << " ";
ss << m_file2.string();
ss << " -f:";
ss << m_resultFile.string();
const auto cmdLine = ss.str();
const auto processRef = os::createProcess( cmdLine );
BL_CHK(
false,
0 == os::tryAwaitTermination( processRef ),
BL_MSG()
<< "Failed to execute: "
<< cmdLine
);
}
/*
* Using the logger here may not be safe since it maybe
* still wired to the UTF logger (which cannot be used during globals unwind)
*/
bl::Logging::LineLoggerPusher pushLineLogger( bl::Logging::getDefaultLineLogger() );
BL_LOG_MULTILINE(
bl::Logging::info(),
BL_MSG()
<< "The result UMDH file is:\n"
<< m_resultFile.string()
);
}
outstandingRefs = bl::om::outstandingObjectRefs();
BL_CHK( false, 0L == outstandingRefs, BL_MSG() << "Objects leaked!" );
}
catch( std::exception& e )
{
/*
* Using the logger here may not be safe since it maybe
* still wired to the UTF logger (which cannot be used during globals unwind)
*/
bl::Logging::LineLoggerPusher pushLineLogger( bl::Logging::getDefaultLineLogger() );
BL_LOG_MULTILINE(
bl::Logging::error(),
BL_MSG()
<< "DefaultUtfConfig()::~DefaultUtfConfig() failed with the following exception: "
<< bl::eh::diagnostic_information( e )
<< "\nOutstanding object references are "
<< outstandingRefs
);
}
}
};
template
<
typename E
>
bl::Logging::line_logger_t
DefaultUtfConfigT< E >::g_defaultLogger = bl::Logging::getDefaultLineLogger();
typedef DefaultUtfConfigT<> DefaultUtfConfig;
#ifndef BL_NO_DEFAULT_UTF_GLOBAL_FIXTURE
UTF_GLOBAL_FIXTURE( DefaultUtfConfig )
#endif
| 33.221053
| 118
| 0.528438
|
[
"object"
] |
3aed41e68df57b04c09f578175e7efc2d341e0e2
| 11,230
|
h
|
C
|
deprecated/include/Graphics/GUI/Core/GuiItem.h
|
Arzana/Plutonium
|
5a17c93e5072ac291b96347a4df196e1609fabe2
|
[
"MIT"
] | 4
|
2019-03-21T16:02:03.000Z
|
2020-04-09T08:53:29.000Z
|
deprecated/include/Graphics/GUI/Core/GuiItem.h
|
Arzana/Plutonium
|
5a17c93e5072ac291b96347a4df196e1609fabe2
|
[
"MIT"
] | 24
|
2018-04-06T08:25:17.000Z
|
2020-10-19T11:01:09.000Z
|
deprecated/include/Graphics/GUI/Core/GuiItem.h
|
Arzana/Plutonium
|
5a17c93e5072ac291b96347a4df196e1609fabe2
|
[
"MIT"
] | null | null | null |
#pragma once
#include "Core\Events\ValueChangedEventArgs.h"
#include "Graphics\GUI\GuiItemRenderer.h"
#include "Core\Events\EventBus.h"
#include "Anchors.h"
#include "Game.h"
namespace Plutonium
{
class Container;
/* Defines the absolute base object for all GuiItems. */
class GuiItem
{
public:
/* Occurs when the BackColor value is changed. */
EventBus<GuiItem, ValueChangedEventArgs<Color>> BackColorChanged;
/* Occurs when the BackgroundImage is set or changed. */
EventBus<GuiItem, ValueChangedEventArgs<TextureHandler>> BackgroundImageChanged;
/* Occurs when the GuiItem is clicked with any button. */
EventBus<GuiItem, CursorHandler> Clicked;
/* Occurs before the deletion of the base GuiItem. */
EventBus<GuiItem, EventArgs> Finalized;
/* Occurs when the Focusable indicator is changed. */
EventBus<GuiItem, ValueChangedEventArgs<bool>> FocusableChanged;
/* Occurs when the FocusedImage is set or changed. */
EventBus<GuiItem, ValueChangedEventArgs<TextureHandler>> FocusedImageChanged;
/* Occurs when the GuiItem gains focus. */
EventBus<GuiItem, EventArgs> GainedFocus;
/* Occurs when the cursor pointer rests on the GuiItem. */
EventBus<GuiItem, CursorHandler> Hover;
/* Occurs when the cursor pointer enters the GuiItem's bounds. */
EventBus<GuiItem, CursorHandler> HoverEnter;
/* Occurs when the cursor pointer leaves the GuiItem's bounds. */
EventBus<GuiItem, CursorHandler> HoverLeave;
/* Occurs when the GuiItem loses focus. */
EventBus<GuiItem, EventArgs> LostFocus;
/* Occurs when the position of the GuiItem is changed. */
EventBus<GuiItem, ValueChangedEventArgs<Vector2>> Moved;
/* Occurs when the name of the GuiItem is set or changed. */
EventBus<GuiItem, ValueChangedEventArgs<const char*>> NameChanged;
/* Occurs when the GuiItem is resized. */
EventBus<GuiItem, ValueChangedEventArgs<Vector2>> Resized;
/* Occurs when the GuiItem is enabled or disabled. */
EventBus<GuiItem, ValueChangedEventArgs<bool>> StateChanged;
/* Occurs when the GuiItem is shown or hiden. */
EventBus<GuiItem, ValueChangedEventArgs<bool>> VisibilityChanged;
/* Initializes a new instance of a base GuiItem with default settings. */
GuiItem(_In_ Game *parent);
/* Initializes a new instance of a base GuiItem with a specified position and size. */
GuiItem(_In_ Game *parent, _In_ Rectangle bounds);
GuiItem(_In_ const GuiItem &value) = delete;
GuiItem(_In_ GuiItem &&value) = delete;
/* Releases the resources allocated by the GuiItem. */
~GuiItem(void);
_Check_return_ GuiItem& operator =(_In_ const GuiItem &other) = delete;
_Check_return_ GuiItem& operator =(_In_ GuiItem &&other) = delete;
/* Simulated a cursor click event. */
void PerformClick(void);
/* Updates the GuiItem, checking if any event are occuring. */
virtual void Update(_In_ float dt);
/* Renders the GuiItem to the screen. */
virtual void Draw(_In_ GuiItemRenderer *renderer);
/*
Moves the GuiItem to a specified relative position.
The anchor will have prefrence over the specified positional components.
*/
void MoveRelative(_In_ Anchors anchor, _In_opt_ float x = 0.0f, _In_opt_ float y = 0.0f);
/* Enables the GuiItem and makes it visible. */
void Show(void);
/* Disables the GuiItem and makes it hiden. */
void Hide(void);
/* Gets the current value of the anchor. */
_Check_return_ inline Anchors GetAnchor(void) const
{
return anchor;
}
/* Gets the current value of the background color. */
_Check_return_ inline Color GetBackColor(void) const
{
return backColor;
}
/* Gets the current background image (nullptr is none is set). */
_Check_return_ inline TextureHandler GetBackgroundImage(void) const
{
return background;
}
/* Gets the bounds of the GuiItem. */
_Check_return_ inline Rectangle GetBounds(void) const
{
return bounds;
}
/* Gets the bounding box of the GuiItem, used for anchoring. */
_Check_return_ virtual inline Rectangle GetBoundingBox(void) const
{
return bounds;
}
/* Gets the maximum bounds of the GuiItem, this includes the background bounds and the bounding box. */
_Check_return_ inline Rectangle GetMaxBounds(void) const
{
return Rectangle::Merge(GetBoundingBox(), bounds);
}
/* Gets the default value for the background color. */
_Check_return_ static inline Color GetDefaultBackColor(void)
{
return Color::WhiteSmoke();
}
/* Gets the default value for the GuiItem bounds. */
_Check_return_ static inline Rectangle GetDefaultBounds(void)
{
return Rectangle(0.0f, 0.0f, 100.0f, 50.0f);
}
/* Gets the default value for the rounding factor. */
_Check_return_ static inline float GetDefaultRoundingFactor(void)
{
return 10.0f;
}
/* Gets whether the GuiItem is currently enabled. */
_Check_return_ inline bool IsEnabled(void) const
{
return enabled;
}
/* Gets the current height of the bounds as an integer value. */
_Check_return_ inline int32 GetHeight(void) const
{
return static_cast<int32>(GetMaxBounds().GetHeight());
}
/* Gets the assigned name of the GuiItem. */
_Check_return_ inline const char* GetName(void) const
{
return name;
}
/* Gets the current position of the GuiItem. */
_Check_return_ inline Vector2 GetPosition(void) const
{
return GetMaxBounds().Position;
}
/* Gets the current size of the GuiItem. */
_Check_return_ inline Vector2 GetSize(void) const
{
return GetMaxBounds().Size;
}
/* Gets whether the GuiItem is currently visible. */
_Check_return_ inline bool IsVisible(void) const
{
return visible;
}
/* Gets the current widht of the bounds as an integer value. */
_Check_return_ inline int32 GetWidth(void) const
{
return static_cast<int32>(GetMaxBounds().GetWidth());
}
/* Gets the current horizontal position of the GuiItem. */
_Check_return_ inline float GetX(void) const
{
return GetPosition().X;
}
/* Gets the current vertical position of the GuiItem. */
_Check_return_ inline float GetY(void) const
{
return GetPosition().Y;
}
/* Gets whether the GuiItem can be focused. */
_Check_return_ inline bool IsFocusable(void) const
{
return focusable;
}
/* Gets whether the GuiItem is currently focused. */
_Check_return_ inline bool IsFocused(void) const
{
return focused;
}
/* Gets the rounding factor. */
_Check_return_ inline float GetRoundingFactor(void) const
{
return roundingFactor;
}
/* Gets the current offset from the defined anchor point. */
_Check_return_ inline Vector2 GetOffsetFromAnchor(void) const
{
return offsetFromAnchorPoint;
}
/* Gets the current parent of this GuiItem, can be nullptr! */
_Check_return_ inline const GuiItem* GetParent(void) const
{
return parent;
}
/* Sets the anchor to the specified value, making sure the GuiItem always stays at the desired position if the parent or GuiItem resizes. */
virtual void SetAnchors(_In_ Anchors value, _In_opt_ float xOffset = 0.0f, _In_opt_ float yOffset = 0.0f);
/* Sets the color of the background to a new solid color, or (when a background image is set) changes the color filter of the background image. */
virtual void SetBackColor(_In_ Color color);
/* Sets the background image for this GuiItem replacing the solid color background. */
virtual void SetBackgroundImage(_In_ TextureHandler image);
/* Sets the focused background image for this GuiItem replacing the solid color background. */
virtual void SetFocusedBackgroundImage(_In_ TextureHandler image);
/* Sets the bounds of the GuiItem, possibly moving it and resizing it. */
virtual void SetBounds(_In_ Rectangle bounds);
/* Sets whether the GuiItem is enabled or disabled. */
virtual void SetState(_In_ bool enabled);
/* Sets the height of the GuiItem, resizing it. */
virtual void SetHeight(_In_ int32 height);
/* Sets the name indentifier of the GuiItem. */
virtual void SetName(_In_ const char *name);
/* Sets the position of the GuiItem. */
virtual void SetPosition(_In_ Vector2 position);
/* Sets the size of the GuiItem. */
virtual void SetSize(_In_ Vector2 size);
/* Sets whether the GuiItem is visible or hiden. */
virtual void SetVisibility(_In_ bool visibility);
/* Sets the width of the GuiItem, resizing it. */
virtual void SetWidth(_In_ int32 width);
/* Sets the horizontal position of the GuiItem, moving it. */
virtual void SetX(_In_ float x);
/* Sets the vertical position of the GuiItem, moving it. */
virtual void SetY(_In_ float y);
/* Sets whether the GuiItem can be focused. */
virtual void SetFocusable(_In_ bool value);
/* Sets the rounding factor used to give the GuiItem background rounded edges. */
virtual void SetRoundingFactor(_In_ float value);
/* Sets the parent of this GuiItem. */
virtual void SetParent(const GuiItem *item);
protected:
/* Suppresses all the refresh calls to this GuiItem until enabled again. */
bool suppressRefresh;
/* Suppresses the next update call to this GuiItem, resetting it afterwards. */
bool suppressUpdate;
/* Suppresses the next render call to this GuiItem, resetting it afterwards. */
bool suppressRender;
/* The game associated with the GuiItem. */
Game *game;
/* Renders the GuiItem to the renderer, use for internal item skipping. */
void RenderGuiItem(_In_ GuiItemRenderer *renderer);
/* Gets the required size of the GuiItem at any time, max of background or focus image. */
_Check_return_ virtual Vector2 GetMinSize(void) const;
/* This function can be called to give the GuiItem focus or have it lose focus. */
void ApplyFocus(bool focused);
/* Gets whether the cursor is currently hovering over the GuiItem. */
_Check_return_ inline bool IsMouseOver(void) const
{
return over;
}
/* Gets whether the cursor left button is currently clicking the GuiItem. */
_Check_return_ inline bool IsLeftDown(void) const
{
return ldown;
}
/* Gets whether the cursor right button is currently clicking the GuiItem. */
_Check_return_ inline bool IsRightDown(void) const
{
return rdown;
}
/* Gets the mesh used to render the background. */
_Check_return_ inline const Buffer* GetBackgroundMesh(void) const
{
return mesh;
}
/* Gets the offset that shuold be used for the background bounds. */
_Check_return_ virtual inline Vector2 GetBackgroundOffset(void) const
{
return Vector2::Zero();
}
private:
friend class Container;
const GuiItem *parent;
Container *container;
Buffer *mesh;
TextureHandler background, focusedBackground;
bool over, ldown, rdown, visible, enabled, focusable, focused;
Color backColor;
float roundingFactor;
Vector2 position;
Rectangle bounds;
const char *name;
Anchors anchor;
Vector2 offsetFromAnchorPoint;
void CheckBounds(Vector2 size);
void UpdateMesh(void);
void UpdatePosition(Vector2 position);
void WindowResizedHandler(WindowHandler, EventArgs);
void ParentMovedHandler(const GuiItem *sender, ValueChangedEventArgs<Vector2>);
void ParentResizedHandler(const GuiItem*, ValueChangedEventArgs<Vector2>);
void MoveRelativeInternal(Anchors anchor, Vector2 base, Vector2 adder);
};
}
| 34.984424
| 148
| 0.733838
|
[
"mesh",
"render",
"object",
"solid"
] |
3af8dc4aa6b679e219cf01cc89de6097e28d7760
| 1,484
|
h
|
C
|
Code/Tools/AssetProcessor/native/utilities/BuilderConfigurationBus.h
|
cypherdotXd/o3de
|
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
|
[
"Apache-2.0",
"MIT"
] | 11
|
2021-07-08T09:58:26.000Z
|
2022-03-17T17:59:26.000Z
|
Code/Tools/AssetProcessor/native/utilities/BuilderConfigurationBus.h
|
RoddieKieley/o3de
|
e804fd2a4241b039a42d9fa54eaae17dc94a7a92
|
[
"Apache-2.0",
"MIT"
] | 29
|
2021-07-06T19:33:52.000Z
|
2022-03-22T10:27:49.000Z
|
Code/Tools/AssetProcessor/native/utilities/BuilderConfigurationBus.h
|
RoddieKieley/o3de
|
e804fd2a4241b039a42d9fa54eaae17dc94a7a92
|
[
"Apache-2.0",
"MIT"
] | 4
|
2021-07-06T19:24:43.000Z
|
2022-03-31T12:42:27.000Z
|
/*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#pragma once
#include <AzCore/EBus/EBus.h>
#include <AzCore/std/string/string.h>
#include <AssetBuilderSDK/AssetBuilderSDK.h>
namespace AssetProcessor
{
class BuilderConfigurationRequests
: public AZ::EBusTraits
{
public:
static const AZ::EBusAddressPolicy AddressPolicy = AZ::EBusAddressPolicy::Single;
static const AZ::EBusHandlerPolicy HandlerPolicy = AZ::EBusHandlerPolicy::Single;
using MutexType = AZStd::recursive_mutex;
virtual ~BuilderConfigurationRequests() = default;
//! Load configuration data from a specific BuilderConfig.ini file
virtual bool LoadConfiguration(const AZStd::string& /*configFile*/) { return false; }
//! Update a job descriptor given the configuration data which has been loaded
virtual bool UpdateJobDescriptor(const AZStd::string& /*jobKey*/, AssetBuilderSDK::JobDescriptor& /*jobDesc*/) { return false; }
//! Update a builder desc given configuration data
virtual bool UpdateBuilderDescriptor(const AZStd::string& /*builderName*/, AssetBuilderSDK::AssetBuilderDesc& /*jobDesc*/) { return false; }
};
using BuilderConfigurationRequestBus = AZ::EBus<BuilderConfigurationRequests>;
} // namespace AssetProcessor
| 35.333333
| 148
| 0.72372
|
[
"3d"
] |
3afa8c790c09fb7d6e7d7d6a1a26b30bc202d41e
| 1,397
|
h
|
C
|
Source/Resources/ModelLoader.h
|
aaronmjacobs/Swap
|
955f36bc95b6829bf1a1a89b430df7816c065ac0
|
[
"MIT"
] | null | null | null |
Source/Resources/ModelLoader.h
|
aaronmjacobs/Swap
|
955f36bc95b6829bf1a1a89b430df7816c065ac0
|
[
"MIT"
] | null | null | null |
Source/Resources/ModelLoader.h
|
aaronmjacobs/Swap
|
955f36bc95b6829bf1a1a89b430df7816c065ac0
|
[
"MIT"
] | null | null | null |
#pragma once
#include "Core/Pointers.h"
#include "Graphics/Material.h"
#include "Graphics/Model.h"
#include "Resources/TextureLoader.h"
#include <cstdint>
#include <string>
#include <unordered_map>
class Mesh;
enum class NormalGenerationMode : uint8_t
{
None,
Flat,
Smooth
};
struct ModelSpecification
{
std::string path;
NormalGenerationMode normalGenerationMode = NormalGenerationMode::Smooth;
LoadedTextureParameters textureParams;
bool cache = true;
bool cacheTextures = true;
bool operator==(const ModelSpecification& other) const
{
return path == other.path
&& normalGenerationMode == other.normalGenerationMode
&& textureParams == other.textureParams
&& cache == other.cache
&& cacheTextures == other.cacheTextures;
}
};
// Provide a template specialization to allow using ModelSpecification as a key in std::unordered_map
namespace std
{
template<>
struct hash<ModelSpecification>
{
size_t operator()(const ModelSpecification& specification) const;
};
}
struct ModelRef
{
ModelRef(const Model& model);
WPtr<Mesh> mesh;
std::vector<Material> materials;
};
class ModelLoader
{
public:
Model loadModel(const ModelSpecification& specification, TextureLoader& textureLoader);
void clearCachedData();
private:
std::unordered_map<ModelSpecification, ModelRef> modelMap;
};
| 20.850746
| 101
| 0.720115
|
[
"mesh",
"vector",
"model"
] |
3afd42664318d0c8e0d1ed1a9bfe9a5f69898267
| 30,405
|
c
|
C
|
src/telebot-parser.c
|
samuel-allan/axxclubbot
|
158f8e1a99df40e1846f81a04e25fd2be7c4cba1
|
[
"Apache-2.0"
] | null | null | null |
src/telebot-parser.c
|
samuel-allan/axxclubbot
|
158f8e1a99df40e1846f81a04e25fd2be7c4cba1
|
[
"Apache-2.0"
] | null | null | null |
src/telebot-parser.c
|
samuel-allan/axxclubbot
|
158f8e1a99df40e1846f81a04e25fd2be7c4cba1
|
[
"Apache-2.0"
] | null | null | null |
/*
* telebot
*
* Copyright (c) 2015 Elmurod Talipov.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <json.h>
#include <json_object.h>
#include <telebot-private.h>
#include <telebot-common.h>
#include <telebot-api.h>
#include <telebot-parser.h>
struct json_object *telebot_parser_str_to_obj(char *data)
{
return json_tokener_parse(data);
}
telebot_error_e telebot_parser_get_updates(struct json_object *obj,
telebot_update_t **updates, int *count)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (updates == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
struct json_object *array = obj;
int array_len = json_object_array_length(array);
if (!array_len)
return TELEBOT_ERROR_OPERATION_FAILED;
telebot_update_t *tmp = calloc(array_len, sizeof(telebot_update_t));
if (tmp == NULL)
return TELEBOT_ERROR_OUT_OF_MEMORY;
*count = array_len;
*updates = tmp;
int index;
for (index=0;index<array_len;index++) {
struct json_object *item = json_object_array_get_idx(array, index);
memset(&(tmp[index].message), 0, sizeof(telebot_message_t));
tmp[index].update_id = 0;
struct json_object *update_id;
if (json_object_object_get_ex(item, "update_id", &update_id)) {
tmp[index].update_id = json_object_get_int(update_id);
json_object_put(update_id);
}
struct json_object *message;
if (json_object_object_get_ex(item, "message", &message)) {
if (telebot_parser_get_message(message, &(tmp[index].message)) !=
TELEBOT_ERROR_NONE)
ERR("Failed to parse message of bot update");
json_object_put(message);
}
json_object_put(item);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_message(struct json_object *obj,
telebot_message_t *msg)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (msg == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
memset(msg, 0, sizeof(telebot_message_t));
struct json_object *message_id;
if (!json_object_object_get_ex(obj, "message_id", &message_id)) {
ERR("Failed to get <message_id> from message object");
return TELEBOT_ERROR_OPERATION_FAILED;
}
msg->message_id = json_object_get_int(message_id);
json_object_put(message_id);
int ret;
struct json_object *from;
if (json_object_object_get_ex(obj, "from", &from)) {
ret = telebot_parser_get_user(from , &(msg->from));
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <from user> from message object");
json_object_put(from);
}
struct json_object *date;
if (json_object_object_get_ex(obj, "date", &date)) {
msg->date = json_object_get_int(date);
json_object_put(date);
}
struct json_object *chat;
if (json_object_object_get_ex(obj, "chat", &chat)) {
ret = telebot_parser_get_chat(chat , &(msg->chat));
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <chat> from message object");
json_object_put(chat);
}
struct json_object *forward_from;
if (json_object_object_get_ex(obj, "forward_from", &forward_from)) {
ret = telebot_parser_get_user(forward_from , &(msg->forward_from));
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <forward from> from message object");
json_object_put(forward_from);
}
struct json_object *forward_date;
if (json_object_object_get_ex(obj, "forward_date", &forward_date)) {
msg->forward_date = json_object_get_int(forward_date);
json_object_put(forward_date);
}
/* FIXME: allocating memory with alloca(), it is really wrong. */
/*
struct json_object *reply_to_message;
if (json_object_object_get_ex(obj, "reply_to_message", &reply_to_message)) {
telebot_message_t *reply = alloca(sizeof(telebot_message_t));
ret = telebot_parser_get_message(reply_to_message , reply);
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <reply_to_message> from message object");
msg->reply_to_message = reply;
}
*/
struct json_object *text;
if (json_object_object_get_ex(obj, "text", &text)) {
snprintf(msg->text, TELEBOT_MESSAGE_TEXT_SIZE, "%s",
json_object_get_string(text));
json_object_put(text);
}
struct json_object *audio;
if (json_object_object_get_ex(obj, "audio", &audio)) {
ret = telebot_parser_get_audio(audio , &(msg->audio));
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <audio> from message object");
json_object_put(audio);
}
struct json_object *document;
if (json_object_object_get_ex(obj, "document", &document)) {
ret = telebot_parser_get_document(document , &(msg->document));
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <document> from message object");
json_object_put(document);
}
struct json_object *photo;
if (json_object_object_get_ex(obj, "photo", &photo)) {
ret = telebot_parser_get_photos(photo , msg->photo,
TELEBOT_MESSAGE_PHOTO_SIZE);
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <photo> from message object");
json_object_put(photo);
}
struct json_object *video;
if (json_object_object_get_ex(obj, "video", &video)) {
ret = telebot_parser_get_video(video , &(msg->video));
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <video> from message object");
json_object_put(video);
}
struct json_object *voice;
if (json_object_object_get_ex(obj, "voice", &voice)) {
ret = telebot_parser_get_voice(voice , &(msg->voice));
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <voice> from message object");
json_object_put(voice);
}
struct json_object *caption;
if (json_object_object_get_ex(obj, "caption", &caption)) {
snprintf(msg->caption, TELEBOT_MESSAGE_CAPTION_SIZE, "%s",
json_object_get_string(caption));
json_object_put(caption);
}
struct json_object *contact;
if (json_object_object_get_ex(obj, "contact", &contact)) {
ret = telebot_parser_get_contact(contact , &(msg->contact));
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <contact> from message object");
json_object_put(contact);
}
struct json_object *location;
if (json_object_object_get_ex(obj, "location", &location)) {
ret = telebot_parser_get_location(location , &(msg->location));
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <location> from message object");
json_object_put(location);
}
struct json_object *ncp;
if (json_object_object_get_ex(obj, "new_chat_participant", &ncp)) {
ret = telebot_parser_get_user(ncp , &(msg->new_chat_participant));
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <new_chat_participant> from message object");
json_object_put(ncp);
}
struct json_object *lcp;
if (json_object_object_get_ex(obj, "left_chat_participant", &lcp)) {
ret = telebot_parser_get_user(lcp , &(msg->left_chat_participant));
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <left_chat_participant> from message object");
json_object_put(lcp);
}
struct json_object *nct;
if (json_object_object_get_ex(obj, "new_chat_title", &nct)) {
snprintf(msg->new_chat_title, TELEBOT_CHAT_TITLE_SIZE, "%s",
json_object_get_string(nct));
json_object_put(nct);
}
struct json_object *new_chat_photo;
if (json_object_object_get_ex(obj, "new_chat_photo", &new_chat_photo)) {
ret = telebot_parser_get_photos(new_chat_photo , msg->new_chat_photo,
TELEBOT_MESSAGE_NEW_CHAT_PHOTO_SIZE);
if (ret != TELEBOT_ERROR_NONE)
ERR("Failed to get <left_chat_participant> from message object");
json_object_put(new_chat_photo);
}
struct json_object *del_chat_photo;
if (json_object_object_get_ex(obj, "delete_chat_photo", &del_chat_photo)) {
msg->delete_chat_photo = json_object_get_boolean(del_chat_photo);
json_object_put(del_chat_photo);
}
struct json_object *gcc;
if (json_object_object_get_ex(obj, "group_chat_created", &gcc)) {
msg->group_chat_created = json_object_get_boolean(gcc);
json_object_put(gcc);
}
struct json_object *sgcc;
if (json_object_object_get_ex(obj, "supergroup_chat_created", &sgcc)) {
msg->supergroup_chat_created = json_object_get_boolean(sgcc);
json_object_put(sgcc);
}
struct json_object *cacc;
if (json_object_object_get_ex(obj, "channel_chat_created", &cacc)) {
msg->channel_chat_created = json_object_get_boolean(cacc);
json_object_put(cacc);
}
struct json_object *mtci;
if (json_object_object_get_ex(obj, "migrate_to_chat_id", &mtci)) {
msg->migrate_to_chat_id = json_object_get_int(mtci);
json_object_put(mtci);
}
struct json_object *mftci;
if (json_object_object_get_ex(obj, "migrate_from_chat_id", &mftci)) {
msg->migrate_from_chat_id = json_object_get_int(mftci);
json_object_put(mftci);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_user(struct json_object *obj,
telebot_user_t *user)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (user == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
memset(user, 0, sizeof(telebot_user_t));
struct json_object *id;
if (!json_object_object_get_ex(obj, "id", &id)) {
ERR("Object is not json user type, id not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
user->id = json_object_get_int(id);
json_object_put(id);
struct json_object *first_name;
if (!json_object_object_get_ex(obj, "first_name", &first_name)) {
ERR("Object is not user type, first_name not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
snprintf(user->first_name, TELEBOT_FIRST_NAME_SIZE, "%s",
json_object_get_string(first_name));
json_object_put(first_name);
struct json_object *last_name;
if (json_object_object_get_ex(obj, "last_name", &last_name)) {
snprintf(user->last_name, TELEBOT_LAST_NAME_SIZE, "%s",
json_object_get_string(last_name));
json_object_put(last_name);
}
struct json_object *username;
if (json_object_object_get_ex(obj, "username", &username)) {
snprintf(user->username, TELEBOT_USER_NAME_SIZE, "%s",
json_object_get_string(username));
json_object_put(username);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_chat(struct json_object *obj,
telebot_chat_t *chat)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (chat == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
memset(chat, 0, sizeof(telebot_chat_t));
struct json_object *id;
if (json_object_object_get_ex(obj, "id", &id)) {
chat->id = json_object_get_int(id);
json_object_put(id);
}
else {
ERR("Object is not chat type, id not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *type;
if (json_object_object_get_ex(obj, "type", &type)){
snprintf(chat->type, TELEBOT_CHAT_TYPE_SIZE, "%s",
json_object_get_string(type));
json_object_put(type);
}
else {
ERR("Object is not chat type, type not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *title;
if (json_object_object_get_ex(obj, "title", &title)) {
snprintf(chat->title, TELEBOT_CHAT_TITLE_SIZE, "%s",
json_object_get_string(title));
json_object_put(title);
}
struct json_object *username;
if (json_object_object_get_ex(obj, "username", &username)) {
snprintf(chat->username, TELEBOT_USER_NAME_SIZE, "%s",
json_object_get_string(username));
json_object_put(username);
}
struct json_object *first_name;
if (json_object_object_get_ex(obj, "first_name", &first_name)) {
snprintf(chat->first_name, TELEBOT_FIRST_NAME_SIZE, "%s",
json_object_get_string(first_name));
json_object_put(first_name);
}
struct json_object *last_name;
if (json_object_object_get_ex(obj, "last_name", &last_name)) {
snprintf(chat->last_name, TELEBOT_LAST_NAME_SIZE, "%s",
json_object_get_string(last_name));
json_object_put(last_name);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_audio(struct json_object *obj,
telebot_audio_t *audio)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (audio == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
memset(audio, 0, sizeof(telebot_audio_t));
struct json_object *file_id;
if (json_object_object_get_ex(obj, "file_id", &file_id)) {
snprintf(audio->file_id, TELEBOT_FILE_ID_SIZE, "%s",
json_object_get_string(file_id));
json_object_put(file_id);
}
else {
ERR("Object is not audio type, file_id not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *duration;
if (json_object_object_get_ex(obj, "duration", &duration)){
audio->duration = json_object_get_int(duration);
json_object_put(duration);
}
else {
ERR("Object is not audio type, duration not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *performer;
if (json_object_object_get_ex(obj, "performer", &performer)) {
snprintf(audio->performer, TELEBOT_AUDIO_PERFORMER_SIZE, "%s",
json_object_get_string(performer));
json_object_put(performer);
}
struct json_object *title;
if (json_object_object_get_ex(obj, "title", &title)) {
snprintf(audio->title, TELEBOT_AUDIO_TITLE_SIZE, "%s",
json_object_get_string(title));
json_object_put(title);
}
struct json_object *mime_type;
if (json_object_object_get_ex(obj, "mime_type", &mime_type)) {
snprintf(audio->mime_type, TELEBOT_AUDIO_MIME_TYPE_SIZE, "%s",
json_object_get_string(mime_type));
json_object_put(mime_type);
}
struct json_object *file_size;
if (json_object_object_get_ex(obj, "file_size", &file_size)) {
audio->file_size = json_object_get_int(file_size);
json_object_put(file_size);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_document(struct json_object *obj,
telebot_document_t *document)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (document == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
memset(document, 0, sizeof(telebot_document_t));
struct json_object *file_id;
if (json_object_object_get_ex(obj, "file_id", &file_id)) {
snprintf(document->file_id, TELEBOT_FILE_ID_SIZE, "%s",
json_object_get_string(file_id));
json_object_put(file_id);
}
else {
ERR("Object is not audio type, file_id not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *thumb;
if (json_object_object_get_ex(obj, "thumb", &thumb)) {
if (telebot_parser_get_photo(thumb, &(document->thumb)) !=
TELEBOT_ERROR_NONE)
ERR("Failed to get <thumb> from document object");
json_object_put(thumb);
}
struct json_object *file_name;
if (json_object_object_get_ex(obj, "file_name", &file_name)) {
snprintf(document->file_name, TELEBOT_FILE_NAME_SIZE, "%s",
json_object_get_string(file_name));
json_object_put(file_name);
}
struct json_object *mime_type;
if (json_object_object_get_ex(obj, "mime_type", &mime_type)) {
snprintf(document->mime_type, TELEBOT_DOCUMENT_MIME_TYPE_SIZE, "%s",
json_object_get_string(mime_type));
json_object_put(file_name);
}
struct json_object *file_size;
if (json_object_object_get_ex(obj, "file_size", &file_size)) {
document->file_size = json_object_get_int(file_size);
json_object_put(file_name);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_profile_photos(struct json_object *obj,
telebot_photo_t **photos, int *count)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (photos == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
int total_count;
struct json_object *total_count_obj;
if (json_object_object_get_ex(obj, "total_count", &total_count_obj)) {
total_count = json_object_get_int(total_count_obj);
json_object_put(total_count_obj);
}
else {
ERR("Object is not user profile photo type, total_count not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
if (total_count == 0)
return TELEBOT_ERROR_NONE;
telebot_photo_t *tmp = calloc(total_count, sizeof(telebot_photo_t));
*photos = tmp;
*count = total_count;
struct json_object *array;
if (!json_object_object_get_ex(obj, "photos", &array)) {
ERR("Failed to get photos from <UserProfilePhotos> object");
return TELEBOT_ERROR_OPERATION_FAILED;
}
int i, j, k = 0, n, m;
n = json_object_array_length(array);
telebot_error_e ret = TELEBOT_ERROR_NONE;
for (i=0;i<n;i++) {
struct json_object *item = json_object_array_get_idx(array, i);
m = json_object_array_length(item);
for(j=0;j<m;j++) {
struct json_object *photo = json_object_array_get_idx(item, j);
ret |= telebot_parser_get_photo(photo, &(tmp[k]));
json_object_put(photo);
k++;
}
json_object_put(item);
}
json_object_put(array);
if (ret != TELEBOT_ERROR_NONE) {
return TELEBOT_ERROR_OPERATION_FAILED;
*photos = NULL;
*count = 0;
free(tmp);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_photo(struct json_object *obj,
telebot_photo_t *photo)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (photo == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
memset(photo, 0, sizeof(telebot_photo_t));
struct json_object *file_id;
if (json_object_object_get_ex(obj, "file_id", &file_id)) {
snprintf(photo->file_id, TELEBOT_FILE_ID_SIZE, "%s",
json_object_get_string(file_id));
json_object_put(file_id);
}
else {
ERR("Object is not photo size type, file_id not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *width;
if (json_object_object_get_ex(obj, "width", &width)){
photo->width = json_object_get_int(width);
json_object_put(width);
}
else {
ERR("Object is not photo size type, width not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *height;
if (json_object_object_get_ex(obj, "height", &height)){
photo->height = json_object_get_int(height);
json_object_put(height);
}
else {
ERR("Object is not photo size type, height not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *file_size;
if (json_object_object_get_ex(obj, "file_size", &file_size)) {
photo->file_size = json_object_get_int(file_size);
json_object_put(file_size);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_photos(struct json_object *obj,
telebot_photo_t photo_array[], int array_size)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (photo_array == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
struct json_object *array = obj;
int array_len = json_object_array_length(array);
if (!array_len)
return TELEBOT_ERROR_OPERATION_FAILED;
/* FIXME: why we don't have enough memory? */
if (array_len > array_size)
array_len = array_size;
int index;
for (index=0;index<array_len;index++) {
struct json_object *item = json_object_array_get_idx(array, index);
if (telebot_parser_get_photo(item, &(photo_array[index])) !=
TELEBOT_ERROR_NONE)
ERR("Failed to parse photo object");
json_object_put(item);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_sticker(struct json_object *obj,
telebot_sticker_t *sticker)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (sticker == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
memset(sticker, 0, sizeof(telebot_sticker_t));
struct json_object *file_id;
if (json_object_object_get_ex(obj, "file_id", &file_id)) {
snprintf(sticker->file_id, TELEBOT_FILE_ID_SIZE, "%s",
json_object_get_string(file_id));
json_object_put(file_id);
}
else {
ERR("Object is not sticker type, file_id not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *width;
if (json_object_object_get_ex(obj, "width", &width)){
sticker->width = json_object_get_int(width);
json_object_put(width);
}
else {
ERR("Object is not sticker type, width not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *height;
if (json_object_object_get_ex(obj, "height", &height)){
sticker->height = json_object_get_int(height);
json_object_put(height);
}
else {
ERR("Object is not sticker type, height not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *thumb;
if (json_object_object_get_ex(obj, "thumb", &thumb)) {
if (telebot_parser_get_photo(thumb, &(sticker->thumb)) !=
TELEBOT_ERROR_NONE)
ERR("Failed to get <thumb> from sticker object");
json_object_put(thumb);
}
struct json_object *file_size;
if (json_object_object_get_ex(obj, "file_size", &file_size)) {
sticker->file_size = json_object_get_int(file_size);
json_object_put(file_size);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_video(struct json_object *obj,
telebot_video_t *video)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (video == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
memset(video, 0, sizeof(telebot_video_t));
struct json_object *file_id;
if (json_object_object_get_ex(obj, "file_id", &file_id)) {
snprintf(video->file_id, TELEBOT_FILE_ID_SIZE, "%s",
json_object_get_string(file_id));
json_object_put(file_id);
}
else {
ERR("Object is not video type, file_id not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *width;
if (json_object_object_get_ex(obj, "width", &width)){
video->width = json_object_get_int(width);
json_object_put(width);
}
else {
ERR("Object is not video type, width not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *height;
if (json_object_object_get_ex(obj, "height", &height)){
video->height = json_object_get_int(height);
json_object_put(height);
}
else {
ERR("Object is not video type, height not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *duration;
if (json_object_object_get_ex(obj, "duration", &duration)){
video->duration = json_object_get_int(duration);
json_object_put(duration);
}
else {
ERR("Object is not video type, duration not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *thumb;
if (json_object_object_get_ex(obj, "thumb", &thumb)) {
if (telebot_parser_get_photo(thumb, &(video->thumb)) !=
TELEBOT_ERROR_NONE)
ERR("Failed to get <thumb> from video object");
json_object_put(thumb);
}
struct json_object *mime_type;
if (json_object_object_get_ex(obj, "mime_type", &mime_type)) {
snprintf(video->mime_type, TELEBOT_VIDEO_MIME_TYPE_SIZE, "%s",
json_object_get_string(mime_type));
json_object_put(mime_type);
}
struct json_object *file_size;
if (json_object_object_get_ex(obj, "file_size", &file_size)) {
video->file_size = json_object_get_int(file_size);
json_object_put(file_size);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_voice(struct json_object *obj,
telebot_voice_t *voice)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (voice == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
memset(voice, 0, sizeof(telebot_voice_t));
struct json_object *file_id;
if (json_object_object_get_ex(obj, "file_id", &file_id)) {
snprintf(voice->file_id, TELEBOT_FILE_ID_SIZE, "%s",
json_object_get_string(file_id));
json_object_put(file_id);
}
else {
ERR("Object is not voice type, file_id not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *duration;
if (json_object_object_get_ex(obj, "duration", &duration)){
voice->duration = json_object_get_int(duration);
json_object_put(duration);
}
else {
ERR("Object is not voice type, voice duration not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *mime_type;
if (json_object_object_get_ex(obj, "mime_type", &mime_type)) {
snprintf(voice->mime_type, TELEBOT_AUDIO_MIME_TYPE_SIZE, "%s",
json_object_get_string(mime_type));
json_object_put(mime_type);
}
struct json_object *file_size;
if (json_object_object_get_ex(obj, "file_size", &file_size)) {
voice->file_size = json_object_get_int(file_size);
json_object_put(file_size);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_contact(struct json_object *obj,
telebot_contact_t *contact)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (contact == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
memset(contact, 0, sizeof(telebot_contact_t));
struct json_object *phone_number;
if (json_object_object_get_ex(obj, "phone_number", &phone_number)) {
snprintf(contact->phone_number, TELEBOT_PHONE_NUMBER_SIZE, "%s",
json_object_get_string(phone_number));
json_object_put(phone_number);
}
else {
ERR("Object is not contact type, phone number not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *first_name;
if (json_object_object_get_ex(obj, "first_name", &first_name)){
snprintf(contact->first_name, TELEBOT_FIRST_NAME_SIZE, "%s",
json_object_get_string(first_name));
json_object_put(first_name);
}
else {
ERR("Object is not contact type, first name not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *last_name;
if (json_object_object_get_ex(obj, "last_name", &last_name)){
snprintf(contact->last_name, TELEBOT_LAST_NAME_SIZE, "%s",
json_object_get_string(last_name));
json_object_put(last_name);
}
struct json_object *user_id;
if (json_object_object_get_ex(obj, "user_id", &user_id)) {
contact->user_id = json_object_get_int(user_id);
json_object_put(user_id);
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_location(struct json_object *obj,
telebot_location_t *location)
{
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (location == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
memset (location, 0, sizeof(telebot_location_t));
struct json_object *latitude;
if (json_object_object_get_ex (obj, "latitude", &latitude)) {
location->latitude = json_object_get_double(latitude);
json_object_put (latitude);
}
else {
ERR("Object is not location type, latitude not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
struct json_object *longitude;
if (json_object_object_get_ex (obj, "longitude", &longitude)) {
location->longitude = json_object_get_double(longitude);
json_object_put (longitude);
}
else {
ERR("Object is not location type, latitude not found");
return TELEBOT_ERROR_OPERATION_FAILED;
}
return TELEBOT_ERROR_NONE;
}
telebot_error_e telebot_parser_get_file_path(struct json_object *obj,
char **path) {
if (obj == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
if (path == NULL)
return TELEBOT_ERROR_INVALID_PARAMETER;
struct json_object *file_path;
if (json_object_object_get_ex (obj, "file_path", &file_path)) {
*path = strdup(json_object_get_string(file_path));
json_object_put (file_path);
}
else {
*path = NULL;
return TELEBOT_ERROR_OPERATION_FAILED;
}
return TELEBOT_ERROR_NONE;
}
| 32.518717
| 80
| 0.665187
|
[
"object"
] |
d70453f03714780127debfd09900fbd5a7dc7e4e
| 1,267
|
h
|
C
|
wfs/stored_queries/GetDataSetByIdHandler.h
|
nakkim/smartmet-plugin-wfs
|
851334dd3be1a24b9708f66696f088fdc857a999
|
[
"MIT"
] | null | null | null |
wfs/stored_queries/GetDataSetByIdHandler.h
|
nakkim/smartmet-plugin-wfs
|
851334dd3be1a24b9708f66696f088fdc857a999
|
[
"MIT"
] | 2
|
2018-04-17T10:02:46.000Z
|
2019-10-21T08:57:55.000Z
|
wfs/stored_queries/GetDataSetByIdHandler.h
|
nakkim/smartmet-plugin-wfs
|
851334dd3be1a24b9708f66696f088fdc857a999
|
[
"MIT"
] | 2
|
2017-05-10T12:03:51.000Z
|
2021-07-06T07:05:25.000Z
|
#pragma once
#include "PluginImpl.h"
#include "StoredQueryConfig.h"
#include "StoredQueryHandlerBase.h"
#include "StoredQueryHandlerFactoryDef.h"
#include <map>
namespace SmartMet
{
namespace Plugin
{
namespace WFS
{
/**
* @brief Handler for GetFeatureById stored query
*/
class GetDataSetByIdHandler : public StoredQueryHandlerBase
{
public:
GetDataSetByIdHandler(SmartMet::Spine::Reactor* reactor,
StoredQueryConfig::Ptr config,
PluginImpl& plugin_impl);
virtual ~GetDataSetByIdHandler();
virtual void query(const StoredQuery& query,
const std::string& language,
const boost::optional<std::string>& hostname,
std::ostream& output) const;
virtual bool redirect(const StoredQuery& query, std::string& new_stored_query_id) const;
virtual std::vector<std::string> get_return_types() const;
virtual void init_handler();
private:
/**
* @brief Maps data set ID to corresponding stored query ID which returns this
*/
std::map<std::string, std::string> data_set_map;
};
} // namespace WFS
} // namespace Plugin
} // namespace SmartMet
extern SmartMet::Plugin::WFS::StoredQueryHandlerFactoryDef wfs_get_feature_by_id_handler_factory;
| 25.34
| 97
| 0.702447
|
[
"vector"
] |
d7046194687e42f2e698b9ba8baf86adec52d1c5
| 4,893
|
h
|
C
|
Projects/RealityEngine/include/Gameplay/ComponentSystem.h
|
Volta948/RealityEngine
|
1a9e4b7db00617176d06004af934d6602dd5920a
|
[
"BSD-3-Clause"
] | null | null | null |
Projects/RealityEngine/include/Gameplay/ComponentSystem.h
|
Volta948/RealityEngine
|
1a9e4b7db00617176d06004af934d6602dd5920a
|
[
"BSD-3-Clause"
] | null | null | null |
Projects/RealityEngine/include/Gameplay/ComponentSystem.h
|
Volta948/RealityEngine
|
1a9e4b7db00617176d06004af934d6602dd5920a
|
[
"BSD-3-Clause"
] | 1
|
2021-11-05T02:55:27.000Z
|
2021-11-05T02:55:27.000Z
|
// Copyright Reality Engine. All Rights Reserved.
#pragma once
#include "Rendering/Opengl/GLParticleSystem.h"
#include "Rendering/Opengl/GLPipeline.h"
#include "Rendering/Opengl/GLMaterial.h"
#include "Rendering/Opengl/GLContext.h"
#include "Rendering/Opengl/GLLight.h"
#include "Rendering/Opengl/GLMesh.h"
#include "Rendering/Opengl/GLModel.h"
#include "ComponentHelper.h"
#include "Windowing/IO.h"
#include "Scene.h"
namespace Reality {
class ComponentSystem {
public:
void UpdateTransforms(Scene& scene) const;
void UpdateCameras(Scene& scene, Vector2 windowSize) const;
void UpdateLights(Scene& scene) const;
void UpdateMeshesShadow(Scene& scene) const;
void UpdateMeshes(Scene& scene) const;
void UpdateParticles(Scene& scene, Vector3 cameraPosition) const;
void UpdateMonoBehaviours(Scene& scene) const;
};
}
inline void Reality::ComponentSystem::UpdateTransforms(Scene& scene) const {
std::function<void(CTransform&)> UpdateHierarchy = [&UpdateHierarchy](auto& root) {
if (root.HasChanged()) {
root.SetTrs(Matrix4::Scale(root.GetScale()) * root.GetRotation().GetMatrix() *
Matrix4::Translate(root.GetPosition()));
}
if (const auto parent{ root.GetParent() }; parent && parent->HasChanged()) {
root.SetTrs(root.GetTrs() * parent->GetTrs());
}
for (auto child : root.GetChildren()) {
UpdateHierarchy(*child);
}
root.SetHasChanged(false);
};
for (const auto& object : scene.m_Roots) {
UpdateHierarchy(object->Transform);
}
}
inline void Reality::ComponentSystem::UpdateCameras(Scene& scene, Vector2 windowSize) const {
for (const auto camera : scene.m_Manager.GetComponents<CCamera>()) {
CCamera::s_Main = static_cast<const CCamera*>(camera);
}
if (CCamera::s_Main) {
if (const auto transform{ CCamera::s_Main->GetGameObject().GetComponent<CTransform>() }; transform && transform->HasChanged()) {
GLContext::SetProjectionMatrix(Matrix4::Perspective(windowSize.X / windowSize.Y, CCamera::s_Main->Near,
CCamera::s_Main->Far, CCamera::s_Main->Fov));
GLContext::SetViewMatrix(Matrix4::LookAt(transform->GetPosition(), transform->GetForward(), transform->GetUp()));
}
}
}
inline void Reality::ComponentSystem::UpdateLights(Scene& scene) const {
if (auto lights{ scene.m_Manager.GetComponents<CLight>() }; !lights.empty()) {
std::vector<GLLight> glLights(lights.size());
for (std::size_t i{}; i < glLights.size(); ++i) {
const auto& light{ *static_cast<const CLight*>(lights[i]) };
glLights[i] = {
light.Color, light.GetGameObject().Transform.GetPosition(), 0,
light.GetGameObject().Transform.GetForward(), 0, (enum GLLight::Type)light.Type,
(enum GLLight::Shadow)light.Shadow, light.Intensity, light.Range - 10.f, light.SpotAngle
};
if (light.Type == CLight::Type::Directional && light.Shadow == CLight::Shadow::Soft) {
GLContext::SetShadowMatrix(glLights[i].Direction, {}, { -15.f, 15.f, -15.f, 15.f, -15.f, 15.f });
}
}
GLContext::SetLights(glLights);
}
}
inline void Reality::ComponentSystem::UpdateMeshesShadow(Scene& scene) const {
for (const auto mesh : scene.m_Manager.GetComponents<CMeshRenderer>()) {
if (mesh->GetGameObject().IsActive) {
GLContext::SetModelMatrix(mesh->GetGameObject().Transform.GetTrs());
if (const auto glModel{ static_cast<const CMeshRenderer*>(mesh)->GetModel() }) {
for (const auto& glMesh : glModel->Meshes) {
glMesh->Draw();
}
}
}
}
}
inline void Reality::ComponentSystem::UpdateMeshes(Scene& scene) const {
for (const auto mesh : scene.m_Manager.GetComponents<CMeshRenderer>()) {
if (mesh->GetGameObject().IsActive) {
GLContext::SetModelMatrix(mesh->GetGameObject().Transform.GetTrs());
if (const auto glModel{ static_cast<const CMeshRenderer*>(mesh)->GetModel() }) {
for (const auto& glMesh : glModel->Meshes) {
if (const auto glMaterial{ glMesh->Material }) {
glMaterial->Bind();
}
glMesh->Draw();
}
}
}
}
}
inline void Reality::ComponentSystem::UpdateParticles(Scene& scene, Vector3 cameraPosition) const {
for (const auto& system : scene.m_Manager.GetComponents<CParticleSystem>()) {
if (const auto glSystem{ static_cast<const CParticleSystem*>(system)->System }) {
glSystem->Direction = Vector3::Normalize(system->GetGameObject().Transform.GetForward());
glSystem->Position = system->GetGameObject().Transform.GetPosition();
if (auto camera{ CCamera::s_Main }) {
glSystem->Update(g_Io->Time->GetDeltaTime() * glSystem->Speed, cameraPosition);
}
}
}
}
inline void Reality::ComponentSystem::UpdateMonoBehaviours(Scene& scene) const {
for (const auto& mono : scene.m_Manager.GetComponents<CMonoBehaviour>()) {
if (const auto script{ static_cast<CMonoBehaviour*>(mono) }) {
script->Update();
}
}
}
| 37.638462
| 131
| 0.690987
|
[
"mesh",
"object",
"vector",
"transform"
] |
d70fa75a78b730dc273bc79e25049177bddab136
| 7,072
|
h
|
C
|
kernels/linux-2.4.0/drivers/acpi/include/acpixf.h
|
liuhaozzu/linux
|
bdf9758cd23e34b5f53e8e6339d9b29348615e14
|
[
"Apache-2.0"
] | 4
|
2020-01-01T20:26:42.000Z
|
2021-10-17T21:51:58.000Z
|
kernels/linux-2.4.0/drivers/acpi/include/acpixf.h
|
liuhaozzu/linux
|
bdf9758cd23e34b5f53e8e6339d9b29348615e14
|
[
"Apache-2.0"
] | 4
|
2020-07-23T11:20:30.000Z
|
2020-07-24T20:09:09.000Z
|
linux/drivers/acpi/include/acpixf.h
|
CodeAsm/PS1Linux
|
8c3c4d9ffccf446dd061a38186efc924da8a66be
|
[
"CC0-1.0"
] | null | null | null |
/******************************************************************************
*
* Name: acpixf.h - External interfaces to the ACPI subsystem
*
*****************************************************************************/
/*
* Copyright (C) 2000 R. Byron Moore
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ACXFACE_H__
#define __ACXFACE_H__
#include "actypes.h"
#include "actbl.h"
/*
* Global interfaces
*/
ACPI_STATUS
acpi_initialize_subsystem (
void);
ACPI_STATUS
acpi_enable_subsystem (
u32 flags);
ACPI_STATUS
acpi_terminate (
void);
ACPI_STATUS
acpi_enable (
void);
ACPI_STATUS
acpi_disable (
void);
ACPI_STATUS
acpi_get_system_info(
ACPI_BUFFER *ret_buffer);
ACPI_STATUS
acpi_format_exception (
ACPI_STATUS exception,
ACPI_BUFFER *out_buffer);
/*
* ACPI table manipulation interfaces
*/
ACPI_STATUS
acpi_find_root_pointer (
ACPI_PHYSICAL_ADDRESS *rsdp_physical_address);
ACPI_STATUS
acpi_load_tables (
ACPI_PHYSICAL_ADDRESS rsdp_physical_address);
ACPI_STATUS
acpi_load_table (
ACPI_TABLE_HEADER *table_ptr);
ACPI_STATUS
acpi_unload_table (
ACPI_TABLE_TYPE table_type);
ACPI_STATUS
acpi_get_table_header (
ACPI_TABLE_TYPE table_type,
u32 instance,
ACPI_TABLE_HEADER *out_table_header);
ACPI_STATUS
acpi_get_table (
ACPI_TABLE_TYPE table_type,
u32 instance,
ACPI_BUFFER *ret_buffer);
/*
* Namespace and name interfaces
*/
ACPI_STATUS
acpi_walk_namespace (
ACPI_OBJECT_TYPE type,
ACPI_HANDLE start_object,
u32 max_depth,
WALK_CALLBACK user_function,
void *context,
void * *return_value);
ACPI_STATUS
acpi_get_devices (
NATIVE_CHAR *HID,
WALK_CALLBACK user_function,
void *context,
void **return_value);
ACPI_STATUS
acpi_get_name (
ACPI_HANDLE handle,
u32 name_type,
ACPI_BUFFER *ret_path_ptr);
ACPI_STATUS
acpi_get_handle (
ACPI_HANDLE parent,
ACPI_STRING pathname,
ACPI_HANDLE *ret_handle);
/*
* Object manipulation and enumeration
*/
ACPI_STATUS
acpi_evaluate_object (
ACPI_HANDLE object,
ACPI_STRING pathname,
ACPI_OBJECT_LIST *parameter_objects,
ACPI_BUFFER *return_object_buffer);
ACPI_STATUS
acpi_get_object_info (
ACPI_HANDLE device,
ACPI_DEVICE_INFO *info);
ACPI_STATUS
acpi_get_next_object (
ACPI_OBJECT_TYPE type,
ACPI_HANDLE parent,
ACPI_HANDLE child,
ACPI_HANDLE *out_handle);
ACPI_STATUS
acpi_get_type (
ACPI_HANDLE object,
ACPI_OBJECT_TYPE *out_type);
ACPI_STATUS
acpi_get_parent (
ACPI_HANDLE object,
ACPI_HANDLE *out_handle);
/*
* Acpi_event handler interfaces
*/
ACPI_STATUS
acpi_install_fixed_event_handler (
u32 acpi_event,
FIXED_EVENT_HANDLER handler,
void *context);
ACPI_STATUS
acpi_remove_fixed_event_handler (
u32 acpi_event,
FIXED_EVENT_HANDLER handler);
ACPI_STATUS
acpi_install_notify_handler (
ACPI_HANDLE device,
u32 handler_type,
NOTIFY_HANDLER handler,
void *context);
ACPI_STATUS
acpi_remove_notify_handler (
ACPI_HANDLE device,
u32 handler_type,
NOTIFY_HANDLER handler);
ACPI_STATUS
acpi_install_address_space_handler (
ACPI_HANDLE device,
ACPI_ADDRESS_SPACE_TYPE space_id,
ADDRESS_SPACE_HANDLER handler,
ADDRESS_SPACE_SETUP setup,
void *context);
ACPI_STATUS
acpi_remove_address_space_handler (
ACPI_HANDLE device,
ACPI_ADDRESS_SPACE_TYPE space_id,
ADDRESS_SPACE_HANDLER handler);
ACPI_STATUS
acpi_install_gpe_handler (
u32 gpe_number,
u32 type,
GPE_HANDLER handler,
void *context);
ACPI_STATUS
acpi_acquire_global_lock (
void);
ACPI_STATUS
acpi_release_global_lock (
void);
ACPI_STATUS
acpi_remove_gpe_handler (
u32 gpe_number,
GPE_HANDLER handler);
ACPI_STATUS
acpi_enable_event (
u32 acpi_event,
u32 type);
ACPI_STATUS
acpi_disable_event (
u32 acpi_event,
u32 type);
ACPI_STATUS
acpi_clear_event (
u32 acpi_event,
u32 type);
ACPI_STATUS
acpi_get_event_status (
u32 acpi_event,
u32 type,
ACPI_EVENT_STATUS *event_status);
/*
* Resource interfaces
*/
ACPI_STATUS
acpi_get_current_resources(
ACPI_HANDLE device_handle,
ACPI_BUFFER *ret_buffer);
ACPI_STATUS
acpi_get_possible_resources(
ACPI_HANDLE device_handle,
ACPI_BUFFER *ret_buffer);
ACPI_STATUS
acpi_set_current_resources (
ACPI_HANDLE device_handle,
ACPI_BUFFER *in_buffer);
ACPI_STATUS
acpi_get_irq_routing_table (
ACPI_HANDLE bus_device_handle,
ACPI_BUFFER *ret_buffer);
/*
* Hardware (ACPI device) interfaces
*/
ACPI_STATUS
acpi_set_firmware_waking_vector (
ACPI_PHYSICAL_ADDRESS physical_address);
ACPI_STATUS
acpi_get_firmware_waking_vector (
ACPI_PHYSICAL_ADDRESS *physical_address);
ACPI_STATUS
acpi_get_processor_throttling_info (
ACPI_HANDLE processor_handle,
ACPI_BUFFER *user_buffer);
ACPI_STATUS
acpi_set_processor_throttling_state (
ACPI_HANDLE processor_handle,
u32 throttle_state);
ACPI_STATUS
acpi_get_processor_throttling_state (
ACPI_HANDLE processor_handle,
u32 *throttle_state);
ACPI_STATUS
acpi_get_processor_cx_info (
ACPI_HANDLE processor_handle,
ACPI_BUFFER *user_buffer);
ACPI_STATUS
acpi_set_processor_sleep_state (
ACPI_HANDLE processor_handle,
u32 cx_state);
ACPI_STATUS
acpi_processor_sleep (
ACPI_HANDLE processor_handle,
u32 *pm_timer_ticks);
#endif /* __ACXFACE_H__ */
| 22.169279
| 79
| 0.63815
|
[
"object"
] |
d70fae9b8bfa237ce2dd8e235ce7d5427e3c0d23
| 3,241
|
h
|
C
|
LidarPlugin/Common/CameraProjection.h
|
Pandinosaurus/LidarView
|
9b9b2976e9ac5dcd891a604dabbb79bd6fc6a57a
|
[
"Apache-2.0"
] | 1
|
2021-05-29T21:07:24.000Z
|
2021-05-29T21:07:24.000Z
|
LidarPlugin/Common/CameraProjection.h
|
yxw027/LidarView
|
9267729e62886a324ba7f2e3fed50db38b24f001
|
[
"Apache-2.0"
] | null | null | null |
LidarPlugin/Common/CameraProjection.h
|
yxw027/LidarView
|
9267729e62886a324ba7f2e3fed50db38b24f001
|
[
"Apache-2.0"
] | 1
|
2020-05-30T10:07:35.000Z
|
2020-05-30T10:07:35.000Z
|
//=========================================================================
//
// Copyright 2019 Kitware, Inc.
// Author: Guilbert Pierre ([email protected])
// Data: 03-27-2019
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//=========================================================================
#ifndef CAMERA_PROJECTION_H
#define CAMERA_PROJECTION_H
// STD
#include <string>
#include <vector>
// EIGEN
#include <Eigen/Dense>
/**
* @brief LoadCameraParamsFromCSV Load parameters from a csv file
*
* @param filename filename that contains the parameters
* @param W loaded parameters
*/
void LoadCameraParamsFromCSV(std::string filename, Eigen::VectorXd& W);
/**
* @brief WriteCameraParamsCSV Write parameters into a csv file
*
* @param filename filename to write the camera parameters
* @param W to write parameters
*/
void WriteCameraParamsCSV(std::string filename, Eigen::VectorXd& W);
/**
* @brief FisheyeProjection Project a 3D point using a fisheye camera model
* the projected 2D points will be expressed in pixel coordinates
*
* @param W fisheye camera model parameters
* @param X 3D point to project
* @param shouldClip Clip points that are behind the camera plane
*/
Eigen::Vector2d FisheyeProjection(const Eigen::Matrix<double, 15, 1>& W,
const Eigen::Vector3d& X,
bool shouldClip = false);
/**
* @brief BrownConradyPinholeProjection Project a 3D point using a pinhole
* camera model with Brown-Conrady camera distortion model.
* the projected 2D points will be expressed in pixel coordinates
*
* @param W pinhole Brown-Conrady camera model parameters
* @param X 3D point to project
* @param shouldPlaneClip Clip points that are behind the camera plane
* @param shouldFoVClip Clip points that are not in the FoV of the camera
* this is usefull since high distortion parameters can introduce
* non injective behaviour between 3D direction and 2D pixels
* @param fovAngle angle of the field of view
*/
Eigen::Vector2d BrownConradyPinholeProjection(const Eigen::Matrix<double, 17, 1>& W,
const Eigen::Vector3d& X,
bool shouldPlaneClip = false);
/**
* @brief GetRGBColourFromReflectivity map the reflectivity signal
* onto a RGB color map
*
* @param v reflectivity signal
* @param vmin minimal value of the reflectivity signal
* @param vmax maximal value of the reflectivity signal
*/
Eigen::Vector3d GetRGBColourFromReflectivity(double v, double vmin, double vmax);
#endif // CAMERA_PROJECTION_H
| 37.686047
| 84
| 0.658439
|
[
"vector",
"model",
"3d"
] |
d7151518b0b174441c396fbe3bac787cce7a5e85
| 3,319
|
h
|
C
|
art/compiler/sea_ir/types/type_inference_visitor.h
|
CanPisces/DexHunter
|
b8f46563c7f3aeb79cf40db09e1d231649f1a29a
|
[
"Apache-2.0"
] | 1,306
|
2015-09-01T05:06:16.000Z
|
2022-03-10T07:13:10.000Z
|
art/compiler/sea_ir/types/type_inference_visitor.h
|
cnrat/DexHunter
|
b8f46563c7f3aeb79cf40db09e1d231649f1a29a
|
[
"Apache-2.0"
] | 11
|
2015-09-02T09:06:42.000Z
|
2020-12-26T04:59:34.000Z
|
art/compiler/sea_ir/types/type_inference_visitor.h
|
cnrat/DexHunter
|
b8f46563c7f3aeb79cf40db09e1d231649f1a29a
|
[
"Apache-2.0"
] | 598
|
2015-09-01T05:06:18.000Z
|
2022-03-27T07:59:49.000Z
|
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_VISITOR_H_
#define ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_VISITOR_H_
#include "dex_file-inl.h"
#include "sea_ir/ir/visitor.h"
#include "sea_ir/types/types.h"
namespace sea_ir {
// The TypeInferenceVisitor visits each instruction and computes its type taking into account
// the current type of the operands. The type is stored in the visitor.
// We may be better off by using a separate visitor type hierarchy that has return values
// or that passes data as parameters, than to use fields to store information that should
// in fact be returned after visiting each element. Ideally, I would prefer to use templates
// to specify the returned value type, but I am not aware of a possible implementation
// that does not horribly duplicate the visitor infrastructure code (version 1: no return value,
// version 2: with template return value).
class TypeInferenceVisitor: public IRVisitor {
public:
TypeInferenceVisitor(SeaGraph* graph, TypeData* type_data,
art::verifier::RegTypeCache* types):
graph_(graph), type_data_(type_data), type_cache_(types), crt_type_() {
}
// There are no type related actions to be performed on these classes.
void Initialize(SeaGraph* graph) { }
void Visit(SeaGraph* graph);
void Visit(Region* region) { }
void Visit(PhiInstructionNode* instruction);
void Visit(SignatureNode* parameter);
void Visit(InstructionNode* instruction) { }
void Visit(UnnamedConstInstructionNode* instruction);
void Visit(ConstInstructionNode* instruction) { }
void Visit(ReturnInstructionNode* instruction) { }
void Visit(IfNeInstructionNode* instruction) { }
void Visit(MoveResultInstructionNode* instruction);
void Visit(InvokeStaticInstructionNode* instruction);
void Visit(AddIntInstructionNode* instruction);
void Visit(GotoInstructionNode* instruction) { }
void Visit(IfEqzInstructionNode* instruction) { }
const Type* MergeTypes(std::vector<const Type*>& types) const;
const Type* MergeTypes(const Type* t1, const Type* t2) const;
std::vector<const Type*> GetOperandTypes(InstructionNode* instruction) const;
const Type* GetType() {
// TODO: Currently multiple defined types are not supported.
if (!crt_type_.empty()) {
const Type* single_type = crt_type_.at(0);
crt_type_.clear();
return single_type;
}
return NULL;
}
protected:
const SeaGraph* const graph_;
TypeData* type_data_;
art::verifier::RegTypeCache* type_cache_;
std::vector<const Type*> crt_type_; // Stored temporarily between two calls to Visit.
};
} // namespace sea_ir
#endif // ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_VISITOR_H_
| 40.47561
| 99
| 0.751732
|
[
"vector"
] |
d716eee0ad6ecf68b3b562396579cb867adbe6f4
| 349
|
h
|
C
|
Demo/QGLCoreDataPlugin/CoreData/Categories/NSManagedObject+helper.h
|
Guicai-Li/QGLCoreDataPlugin
|
6acb33ab23d42aa2f4f92d65c45accb3d097d1f2
|
[
"MIT"
] | 1
|
2016-02-23T06:49:21.000Z
|
2016-02-23T06:49:21.000Z
|
Demo/QGLCoreDataPlugin/CoreData/Categories/NSManagedObject+helper.h
|
Guicai-Li/QGLCoreDataPlugin
|
6acb33ab23d42aa2f4f92d65c45accb3d097d1f2
|
[
"MIT"
] | null | null | null |
Demo/QGLCoreDataPlugin/CoreData/Categories/NSManagedObject+helper.h
|
Guicai-Li/QGLCoreDataPlugin
|
6acb33ab23d42aa2f4f92d65c45accb3d097d1f2
|
[
"MIT"
] | null | null | null |
//
// NSManagedObject+helper.h
// QGLCoreDataPlugin
//
// Created by Guicai.Li on 14-10-16.
// Copyright (c) 2014年 Guicai Li. All rights reserved.
//
#import <Foundation/Foundation.h>
@import CoreData;
@interface NSManagedObject (helper)
+ (id)createNSManagedObjectForName:(NSString *)name;
+ (void)deleteNSManagedObject:(id)object;
@end
| 16.619048
| 55
| 0.724928
|
[
"object"
] |
d72ee280ccb205b5af94b455f78ba3b37461a7b8
| 3,200
|
c
|
C
|
src/lib/libc/tests/stdlib/dynthr_test.c
|
lastweek/source-freebsd
|
0821950b0c40cbc891a27964b342e0202a3859ec
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
src/lib/libc/tests/stdlib/dynthr_test.c
|
lastweek/source-freebsd
|
0821950b0c40cbc891a27964b342e0202a3859ec
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
src/lib/libc/tests/stdlib/dynthr_test.c
|
lastweek/source-freebsd
|
0821950b0c40cbc891a27964b342e0202a3859ec
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (C) 2019 Andrew Gierth
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Though this file is initially distributed under the 2-clause BSD license,
* the author grants permission for its redistribution under alternative
* licenses as set forth at <https://rhodiumtoad.github.io/RELICENSE.txt>.
* This paragraph and the RELICENSE.txt file are not part of the license and
* may be omitted in redistributions.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <unistd.h>
#include <dlfcn.h>
#include <atf-c.h>
typedef void (modfunc_t)(int op);
/*
* Minimal test case for PR 235158; mutual dependencies between jemalloc and
* libthr causing issues in thread creation. Specifically to this case, libthr
* uses calloc to initialize pthread mutexes, and jemalloc uses pthread mutexes.
*
* Deferred initialization provided by jemalloc proved to be fragile, causing
* issues like in the referenced PR where thread creation in a shared object
* loaded via dlopen(3) would stall unless the calling application also linked
* against pthread.
*/
ATF_TC(maintc);
ATF_TC_HEAD(maintc, tc)
{
atf_tc_set_md_var(tc, "timeout", "3");
}
ATF_TC_BODY(maintc, tc)
{
char *libpath;
modfunc_t *func;
void *mod_handle;
const char *srcdir;
dlfunc_t rawfunc;
srcdir = atf_tc_get_config_var(tc, "srcdir");
if (asprintf(&libpath, "%s/dynthr_mod.so", srcdir) < 0)
atf_tc_fail("failed to construct path to libthr");
mod_handle = dlopen(libpath, RTLD_LOCAL);
free(libpath);
if (mod_handle == NULL)
atf_tc_fail("failed to open dynthr_mod.so: %s", dlerror());
rawfunc = dlfunc(mod_handle, "mod_main");
if (rawfunc == NULL)
atf_tc_fail("failed to resolve function mod_main");
func = (modfunc_t *)rawfunc;
func(1);
func(0);
}
ATF_TP_ADD_TCS(tp)
{
ATF_TP_ADD_TC(tp, maintc);
return (atf_no_error());
}
| 34.042553
| 80
| 0.746875
|
[
"object"
] |
d73e81cb9f59b6c5042167c060b94173518dc4f2
| 890
|
h
|
C
|
bwi_kr_execution/actasp/include/actasp/Action.h
|
YuqianJiang/bwi_experimental
|
aa0915f170c6c1720a34d0ab24d5b287a9b1bb6d
|
[
"BSD-3-Clause"
] | 1
|
2016-11-25T22:45:52.000Z
|
2016-11-25T22:45:52.000Z
|
bwi_kr_execution/actasp/include/actasp/Action.h
|
YuqianJiang/bwi_experimental
|
aa0915f170c6c1720a34d0ab24d5b287a9b1bb6d
|
[
"BSD-3-Clause"
] | null | null | null |
bwi_kr_execution/actasp/include/actasp/Action.h
|
YuqianJiang/bwi_experimental
|
aa0915f170c6c1720a34d0ab24d5b287a9b1bb6d
|
[
"BSD-3-Clause"
] | null | null | null |
#ifndef actasp_Action_h__guard
#define actasp_Action_h__guard
#include <actasp/AspFluent.h>
#include <string>
#include <vector>
namespace actasp {
struct Action {
virtual int paramNumber() const = 0;
virtual std::string getName() const = 0;
virtual void run() = 0;
virtual bool hasFinished() const = 0;
virtual bool hasFailed() const {return false;}
virtual Action *cloneAndInit(const actasp::AspFluent & fluent) const =0;
virtual Action *clone() const =0;
std::string toASP(unsigned int timeStep) const;
AspFluent toFluent(unsigned int timeStep) const;
bool operator==(const Action *other) const {
return this->toASP(0) == other->toASP(0);
}
bool operator<(const Action *other) const {
return this->toASP(0) < other->toASP(0);
}
virtual ~Action() {}
private:
virtual std::vector<std::string> getParameters() const = 0;
};
}
#endif
| 18.163265
| 73
| 0.692135
|
[
"vector"
] |
d75e2ecec00bfa8f3e596c1e396397753f80e5e0
| 3,403
|
h
|
C
|
builtin/python/callback.h
|
kstepanmpmg/mldb
|
f78791cd34d01796705c0f173a14359ec1b2e021
|
[
"Apache-2.0"
] | 665
|
2015-12-09T17:00:14.000Z
|
2022-03-25T07:46:46.000Z
|
builtin/python/callback.h
|
tomzhang/mldb
|
a09cf2d9ca454d1966b9e49ae69f2fe6bf571494
|
[
"Apache-2.0"
] | 797
|
2015-12-09T19:48:19.000Z
|
2022-03-07T02:19:47.000Z
|
builtin/python/callback.h
|
matebestek/mldb
|
f78791cd34d01796705c0f173a14359ec1b2e021
|
[
"Apache-2.0"
] | 103
|
2015-12-25T04:39:29.000Z
|
2022-02-03T02:55:22.000Z
|
/** callback.h -*- C++ -*-
Rémi Attab, 16 Jan 2013
Copyright (c) 2013 mldb.ai inc. All rights reserved.
This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved.
std::function and std::function compatible python callbacks.
Note that we can't have an automatic python-callback to std::function
converter because we have to determine at runtime whether a callable python
object matches the signature of the target std::function. Since python is a
dymanic language, there is no realistic way of doing that.
What this essentially means is that to wrap function that take callbacks we
have to add another function on top which will convert the PyObject into a
SafeCallback or UnsafeCallback object forwarding the call to the wrapped
function. Might be possible to generate using template magic. I leave that
to the reader as an exercise*.
Not ideal, but that's life.
*/
#pragma once
#include <boost/python.hpp>
#include <iostream>
namespace MLDB {
namespace Python {
/******************************************************************************/
/* LOCK GIL */
/******************************************************************************/
/** RAII object to acquire the python GIL.
Required to safely handle python objects from multiple threads.
*/
struct LockGil
{
LockGil() : state(PyGILState_Ensure()) {}
~LockGil() {
PyGILState_Release(state);
}
private:
PyGILState_STATE state;
};
/******************************************************************************/
/* SAFE CALLBACK */
/******************************************************************************/
/** Python callback that can safely be called from multiple threads.
Thread safety is accomplished by grabbing the GIL before every call.
*/
template<typename R, typename... Args>
struct SafeCallback
{
SafeCallback(PyObject* callable) : callback(callable) {}
template<typename... FnArgs>
R operator() (FnArgs&&... args) const
{
// std::cout << "locking GIL: " << sizeof...(FnArgs) << std::endl;
LockGil lock;
try {
return boost::python::call<R>(callback, std::forward<FnArgs>(args)...);
}
catch (...) {
std::cout << "Python function threw an error." << std::endl;
return 0;
}
}
private:
PyObject* callback;
};
/******************************************************************************/
/* UNSAFE CALLBACK */
/******************************************************************************/
/** Python callback that is not thread safe.*/
template<typename R, typename... Args>
struct UnsafeCallback
{
UnsafeCallback(PyObject* callable) : callback(callable) {}
template<typename... FnArgs>
R operator() (FnArgs&&... args) const
{
try {
return boost::python::call<R>(callback, std::forward<FnArgs>(args)...);
}
catch (...) {
std::cout << "Python function threw an error." << std::endl;
return 0;
}
}
private:
PyObject* callback;
};
} // namespace Python
} // namespace MLDB
| 28.838983
| 83
| 0.511607
|
[
"object"
] |
c723cb0ed8e3472d7570e409702f0b059da2ad30
| 1,755
|
h
|
C
|
src/ui/display.h
|
ParikhKadam/cycloid
|
c5e64e8379f801417a38755eb6b2fde881dabd8c
|
[
"MIT"
] | 1
|
2022-02-10T07:02:59.000Z
|
2022-02-10T07:02:59.000Z
|
src/ui/display.h
|
ParikhKadam/cycloid
|
c5e64e8379f801417a38755eb6b2fde881dabd8c
|
[
"MIT"
] | null | null | null |
src/ui/display.h
|
ParikhKadam/cycloid
|
c5e64e8379f801417a38755eb6b2fde881dabd8c
|
[
"MIT"
] | null | null | null |
#ifndef UI_DISPLAY_H_
#define UI_DISPLAY_H_
#include "hw/lcd/fbdev.h"
#include "localization/coneslam/localize.h"
#include <vector>
class FisheyeLens;
class UIDisplay {
public:
enum DisplayMode { TRACKMAP = 0, CAMERAVIEW, FRONTVIEW, NUM_MODES };
bool Init();
void InitCamera(const FisheyeLens &lens, float camtilt);
#if 0
void UpdateBirdseye(const uint8_t *yuv, int w, int h);
void UpdateConeView(const uint8_t *yuv, int ncones, int *conesx);
void UpdateParticleView(const coneslam::Localizer *l);
#endif
void UpdateCameraView(const uint8_t *yuv,
const std::vector<std::pair<float, float>> &gridpts);
void UpdateCeiltrackView(const float *xytheta, float xgrid, float ygrid,
float sixz, float sizy, const int32_t *obs1,
const int32_t *obs2, float wheel_v, float fps);
void UpdateConfig(const char *configmenu[], int nconfigs, int config_item,
const int16_t *config_values);
void UpdateDashboard(float v, float w, int32_t lon, int32_t lat, int numSV,
float gpsv, float mlon, float mlat, float mag_north,
float mag_east, float ye, float psie, float autok,
float autov, float heading);
void UpdateEncoders(uint16_t *wheel_pos);
void UpdateStatus(const char *status, uint16_t color = 0xffff);
void NextMode();
uint16_t *GetScreenBuffer() { return screen_.GetBuffer(); }
private:
void remapYUV(const uint16_t *maptbl, const uint8_t *yuv, uint16_t *buf);
LCDScreen screen_;
uint8_t *backgroundyuv_;
uint16_t *frontremap_;
DisplayMode mode_;
uint16_t configbuf_[100 * 320];
uint16_t statusbuf_[20 * 320];
};
#endif // UI_DISPLAY_H_
| 28.770492
| 77
| 0.674644
|
[
"vector"
] |
c7240045d5ef71653f4048d3175d582029a7c6c2
| 3,250
|
h
|
C
|
DSPFilters4JUCEDemo/JuceLibraryCode/modules/juce_core/containers/juce_ScopedValueSetter.h
|
rc-h/dspfilters4juce
|
60b32a3af7eb89289f89e01db1239e312544ec2c
|
[
"MIT"
] | 10
|
2017-07-16T04:50:47.000Z
|
2022-02-14T06:10:45.000Z
|
DSPFilters4JUCEDemo/JuceLibraryCode/modules/juce_core/containers/juce_ScopedValueSetter.h
|
rc-h/dspfilters4juce
|
60b32a3af7eb89289f89e01db1239e312544ec2c
|
[
"MIT"
] | 199
|
2016-07-28T07:30:48.000Z
|
2017-10-14T06:15:40.000Z
|
UI/JuceLibraryCode/modules/juce_core/containers/juce_ScopedValueSetter.h
|
subutai-io/launcher
|
d8397995e18200b12d60781ed485af04f70bff03
|
[
"Apache-2.0"
] | 3
|
2017-11-07T14:44:14.000Z
|
2021-03-16T02:45:57.000Z
|
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2016 - ROLI Ltd.
Permission is granted to use this software under the terms of the ISC license
http://www.isc.org/downloads/software-support-policy/isc-license/
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH REGARD
TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
OF THIS SOFTWARE.
-----------------------------------------------------------------------------
To release a closed-source product which uses other parts of JUCE not
licensed under the ISC terms, commercial licenses are available: visit
www.juce.com for more information.
==============================================================================
*/
#ifndef JUCE_SCOPEDVALUESETTER_H_INCLUDED
#define JUCE_SCOPEDVALUESETTER_H_INCLUDED
//==============================================================================
/**
Helper class providing an RAII-based mechanism for temporarily setting and
then re-setting a value.
E.g. @code
int x = 1;
{
ScopedValueSetter setter (x, 2);
// x is now 2
}
// x is now 1 again
{
ScopedValueSetter setter (x, 3, 4);
// x is now 3
}
// x is now 4
@endcode
*/
template <typename ValueType>
class ScopedValueSetter
{
public:
/** Creates a ScopedValueSetter that will immediately change the specified value to the
given new value, and will then reset it to its original value when this object is deleted.
*/
ScopedValueSetter (ValueType& valueToSet,
ValueType newValue)
: value (valueToSet),
originalValue (valueToSet)
{
valueToSet = newValue;
}
/** Creates a ScopedValueSetter that will immediately change the specified value to the
given new value, and will then reset it to be valueWhenDeleted when this object is deleted.
*/
ScopedValueSetter (ValueType& valueToSet,
ValueType newValue,
ValueType valueWhenDeleted)
: value (valueToSet),
originalValue (valueWhenDeleted)
{
valueToSet = newValue;
}
~ScopedValueSetter()
{
value = originalValue;
}
private:
//==============================================================================
ValueType& value;
const ValueType originalValue;
JUCE_DECLARE_NON_COPYABLE (ScopedValueSetter)
};
#endif // JUCE_SCOPEDVALUESETTER_H_INCLUDED
| 31.553398
| 100
| 0.580308
|
[
"object"
] |
c7292a41312d9bab0372dbf108d3a68b71e4cb8e
| 5,820
|
h
|
C
|
inc/fbrequest.h
|
digitalsurgeon/qfacebookconnect
|
cbe613ab4516f3b8b1090abd7a670307e556c998
|
[
"MIT"
] | null | null | null |
inc/fbrequest.h
|
digitalsurgeon/qfacebookconnect
|
cbe613ab4516f3b8b1090abd7a670307e556c998
|
[
"MIT"
] | null | null | null |
inc/fbrequest.h
|
digitalsurgeon/qfacebookconnect
|
cbe613ab4516f3b8b1090abd7a670307e556c998
|
[
"MIT"
] | null | null | null |
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FBREQUEST_H
#define FBREQUEST_H
#include <QObject>
#include <QString>
#include <QDateTime>
#include <QHttp>
#include <QHash>
#include <QNetworkAccessManager>
#include <QNetworkReply>
#include "fbconnectglobal.h"
#include "fbxmlhandler.h"
// FORWARD DECLARATIONS
class FBSession;
class FBError;
///////////////////////////////////////////////////////////////////////////////////////////////////
class QFACEBOOKCONNECTSHARED_EXPORT FBRequest : public QObject
{
Q_OBJECT
private:
/* pointer to the session that owns this request */
FBSession* iSession;
/**
* The URL which will be contacted to execute the request.
*/
QString iUrl;
/**
* The API method which will be called.
*/
QString iMethod;
/**
* An object used by the user of the request to help identify the meaning of the request.
*/
void* iUserInfo;
/**
* The dictionary of parameters to pass to the method.
*
* These values in the dictionary will be converted to strings using the
* standard Objective-C object-to-string conversion facilities.
*/
Dictionary iParams;
/**
* A data parameter.
*
* Used for methods such as photos.upload, video.upload, events.create, and
* events.edit.
*/
QByteArray iDataParam;
/**
* true if iDataParam holds picture data
*/
bool iDataParamPicture;
/**
* The timestamp of when the request was sent to the server.
*/
QDateTime iTimestamp;
QHttp iConnection;
QByteArray iResponseText;
QNetworkAccessManager iNetworkAccessManager;
signals: /* the signals ommitted by FBRequest */
/**
* Called just before the request is sent to the server.
*/
void requestLoading();
/**
* Called when the server responds and begins to send back data.
*/
//todo: void requestDidReceiveResponse (FBRequest* aRequest, NSURLResponse* aResponse);
/**
* Called when an error prevents the request from completing successfully.
*/
void requestFailedWithNetworkError( QNetworkReply::NetworkError code );
void requestFailedWithFacebookError ( const FBError& aError );
/**
* Called when a request returns and its response has been parsed into an object.
*
* The resulting object may be a dictionary, an array, a string, or a number, depending
* on thee format of the API response.
*/
void requestDidLoad ( const QVariant& aResult);
/**
* Called when the request was cancelled.
*/
void requestWasCancelled ();
private slots:
void networkReplyError ( QNetworkReply::NetworkError code );
void networkReplyFinished ();
public: /* class functions */
/**
* Creates a new API request for the global session.
*/
static FBRequest* request();
/**
* Creates a new API request for a particular session.
*/
static FBRequest* requestWithSession (FBSession* aSession);
public: /* instance functions */
/**
* Creates a new request paired to a session.
*/
FBRequest (FBSession* aSession);
/**
* Calls a method on the server asynchronously.
*
* The delegate will be called for each stage of the loading process.
*/
void call (const QString& aMethod, const Dictionary& aParams);
/**
* Calls a method on the server asynchronously, with a file upload component.
*
* The delegate will be called for each stage of the loading process.
*/
void callWithDataParams (const QString& aMethod, const Dictionary& aParams, const QByteArray& aDataParam, bool aDataParamPicture);
/**
* Calls a URL on the server asynchronously.
*
* The delegate will be called for each stage of the loading process.
*/
void post( const QString& aUrl, const Dictionary& aParams);
/**
* Stops an active request before the response has returned.
*/
void cancel();
/**
* returns the time stamp of when the request was sent to the server
*/
const QDateTime& timeStamp() const;
void connect();
private:
/**
* Given a string returns its hex coded md5 hash
*/
static QString md5(const QString&);
/**
* @return true if the current request method is a special method
*/
bool isSpecialMethod() const;
/**
* @return QString a url to use for the given method
*/
QString urlForMethod (const QString& aMethod) const;
/**
* @return the Get Url for the request
*/
QString generateGetURL() const;
QString generateCallId() const;
QString generateSig();
void generatePostBody(QByteArray& body);
/**
* handles the data received from the server
* @param aResponseData is the data received from the server
*/
void handleResponseData( const QByteArray& aResponseData );
/**
* @param aResponseData is the data received from the server
* @param aError will get error codes if any error occurs ( this will change in the future )
* @return a void* pointer, this will change
*/
QVariant parseXMLResponse ( const QByteArray& aResponseData, FBError& aError);
};
#endif // FBREQUEST_H
| 26.697248
| 134
| 0.651375
|
[
"object"
] |
c73525ef2b16c30206938084f28535e55c80aab8
| 2,935
|
h
|
C
|
tools/drawMapTest/qaspectratiolayout.h
|
roman-murashov/hedgewars
|
74f633d76bf95674f68f6872472bd21825f1f8c0
|
[
"Apache-2.0"
] | null | null | null |
tools/drawMapTest/qaspectratiolayout.h
|
roman-murashov/hedgewars
|
74f633d76bf95674f68f6872472bd21825f1f8c0
|
[
"Apache-2.0"
] | null | null | null |
tools/drawMapTest/qaspectratiolayout.h
|
roman-murashov/hedgewars
|
74f633d76bf95674f68f6872472bd21825f1f8c0
|
[
"Apache-2.0"
] | null | null | null |
/*
* Copyright (c) 2009 Nokia Corporation.
*/
#ifndef QASPECTRATIOLAYOUT_H_
#define QASPECTRATIOLAYOUT_H_
#include <QLayout>
#include <QPointer>
#include <QRect>
#include <QWidgetItem>
#include <QLayoutItem>
class QAspectRatioLayout : public QLayout
{
Q_OBJECT
public:
QAspectRatioLayout(QWidget* parent, int spacing =-1);
QAspectRatioLayout(int spacing = -1);
~QAspectRatioLayout();
/* Convenience method */
virtual void add(QLayoutItem* item);
/* http://doc.trolltech.com/qlayout.html#addItem */
virtual void addItem(QLayoutItem* item);
/* http://doc.trolltech.com/qlayout.html#addWidget */
virtual void addWidget(QWidget* widget);
/* http://doc.trolltech.com/qlayout.html#takeAt */
virtual QLayoutItem* takeAt(int index);
/* http://doc.trolltech.com/qlayout.html#itemAt */
virtual QLayoutItem* itemAt(int index) const;
/* http://doc.trolltech.com/qlayout.html#count */
virtual int count() const;
/*
* These are ours since we do have only one item.
*/
virtual QLayoutItem* replaceItem(QLayoutItem* item);
virtual QLayoutItem* take();
virtual bool hasItem() const;
/* http://doc.trolltech.com/qlayout.html#expandingDirections */
virtual Qt::Orientations expandingDirections() const;
/*
* This method contains most of the juice of this article.
* http://doc.trolltech.com/qlayoutitem.html#setGeometry
*/
virtual void setGeometry(const QRect& rect);
/* http://doc.trolltech.com/qlayoutitem.html#geometry */
virtual QRect geometry();
/* http://doc.trolltech.com/qlayoutitem.html#sizeHint */
virtual QSize sizeHint() const;
/* http://doc.trolltech.com/qlayout.html#minimumSize */
virtual QSize minimumSize() const;
/* http://doc.trolltech.com/qlayoutitem.html#hasHeightForWidth */
virtual bool hasHeightForWidth() const;
private:
/* Saves the last received rect. */
void setLastReceivedRect(const QRect& rect);
/* Used to initialize the object. */
void init(int spacing);
/* Calculates the maximum size for the item from the assigned size. */
QSize calculateProperSize(QSize from) const;
/* Calculates the center location from the assigned size and
* the items size. */
QPoint calculateCenterLocation(QSize from, QSize itemSize) const;
/* Check if two QRects are equal */
bool areRectsEqual(const QRect& a, const QRect& b) const;
/* Contains item reference */
QLayoutItem* item;
/*
* Used for caching so we won't do calculations every time
* setGeometry is called.
*/
QRect* lastReceivedRect;
/* Contains geometry */
QRect* _geometry;
};
#endif /* QASPECTRATIOLAYOUT_H_ */
| 33.352273
| 78
| 0.638842
|
[
"geometry",
"object"
] |
c73f7c8365a66ce0af5e507a34c53928774b502e
| 1,516
|
h
|
C
|
src/game.h
|
CoffeePanda0/FedoraEngine
|
6f849ef8f23e0189303632f26a2a660d0ebbee26
|
[
"BSD-3-Clause"
] | 5
|
2020-07-14T22:50:00.000Z
|
2022-03-31T20:26:02.000Z
|
src/game.h
|
CoffeePanda0/FedoraEngine
|
6f849ef8f23e0189303632f26a2a660d0ebbee26
|
[
"BSD-3-Clause"
] | null | null | null |
src/game.h
|
CoffeePanda0/FedoraEngine
|
6f849ef8f23e0189303632f26a2a660d0ebbee26
|
[
"BSD-3-Clause"
] | null | null | null |
#pragma once
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <SDL.h>
#include <SDL_image.h>
#include <SDL_mixer.h>
#include <SDL_ttf.h>
#include "entity/gameobject.h"
#include "lib.h"
#include "player.h"
#include "ui/ui.h"
#include "map.h"
#include "ui/menu.h"
#include "entity/enemy.h"
#include "client.h"
#include "entity/particle.h"
#include "ui/console.h"
#include "lua.h"
#include "entity/animation.h"
#define LEN(x) (sizeof(x)/sizeof(x[0]))
extern int screen_height;
extern int screen_width;
extern Uint64 now;
void init(const char* window_title, int xpos, int ypos, int window_width, int window_height);
void Clean();
void LoadSave(const char *name);
void SaveGame(const char *name);
void ChangeLevel();
void LoadLevels();
extern char **levels; // Stores the lua files for each level
extern int level_count;
extern int current_level;
void Update();
void Render();
void event_handler();
extern Mix_Music* bgMusic;
Mix_Music* LoadMusic(const char* path);
Mix_Chunk* LoadSFX(const char* path);
extern SDL_Rect BgRect;
extern bool paused;
extern bool TextPaused;
extern bool intext;
SDL_Texture* TextureManager(const char* texture, SDL_Renderer* ren);
Mix_Music* LoadMusic(const char* path);
Mix_Chunk* LoadSFX(const char* path);
char *strseps(char **sp, char *sep);
extern SDL_Surface *s;
extern SDL_Window* window;
extern SDL_Renderer* renderer;
extern char *save_path;
extern bool GameActive;
extern bool multiplayer;
extern struct GameObject end_flag;
| 21.055556
| 93
| 0.75
|
[
"render"
] |
c743bae163d4cd155bcb00be808188b9e8f0fe3c
| 4,105
|
h
|
C
|
RobustFit/RobustFit/RobustFit.h
|
CGLab-GIST/robust-fit
|
75b3ba532a52e9754fd1e14c66e6f52cd69b03e8
|
[
"BSD-3-Clause"
] | 12
|
2019-05-22T04:20:14.000Z
|
2021-09-02T13:38:30.000Z
|
RobustFit/RobustFit/RobustFit.h
|
CGLab-GIST/robust-fit
|
75b3ba532a52e9754fd1e14c66e6f52cd69b03e8
|
[
"BSD-3-Clause"
] | null | null | null |
RobustFit/RobustFit/RobustFit.h
|
CGLab-GIST/robust-fit
|
75b3ba532a52e9754fd1e14c66e6f52cd69b03e8
|
[
"BSD-3-Clause"
] | 2
|
2021-12-15T15:52:57.000Z
|
2022-01-03T14:13:42.000Z
|
// Copyright(c) 2019 CGLab, GIST. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met :
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// - Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and / or other materials provided with the distribution.
// - Neither the name of the copyright holder nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include "RobustFitOptions.h"
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <algorithm>
#include <vector_types.h>
#include <vector>
/* MST (Kruskal's Algorithm) */
// a structure to represent a weighted edge (a gradient) in graph
struct Edge {
int srcIdx, destIdx;
float weightVar; // edge weight: estimated gradient error
};
// a structure to represent an undirected graph (vertex: pixel color, edge: gradient)
struct Graph {
// V-> Number of vertices(pixel colors), E-> Number of edges(gradients in horizontal & vertical directions)
int V;
int E;
// The graph is represented as an array of edges.
// Since the graph is undirected, an edge from src to dest is the same as the one vice versa.
// Both are counted as 1 edge here.
std::vector<Edge> edge;
};
// A structure to represent a subset for union and find functions.
struct subset {
int parent;
int rank;
};
class RobustFit {
private:
int m_width, m_height, m_nPix;
float m_alpha_solver;
// pointers of input images
const std::vector<float4> *p_throughput;
const std::vector<float4> *p_dx, *p_dy;
const std::vector<float4> *p_varThroughput;
// an edge map indicates whether an edge (a gradient) is included in a gradient subset (0: an edge (a gradient) excluded, 1: an edge (a gradient) included)
char2* m_edges;
SolverConfig m_config;
public:
float4* m_optImg;
private:
// Functions for MST
static bool compare(const Edge &lhs, const Edge &rhs) {
return lhs.weightVar < rhs.weightVar;
}
int find(std::vector<subset> &subsets, int i);
void Union(std::vector<subset> &subsets, int x, int y);
void createGraph(Graph& graph, int nV, int nE);
void addEdges(Graph &graph, const float4* _errDx, const float4* _errDy, int width, int height);
int runMST(const float4* _errDx, const float4* _errDy, char2* _edges);
int MSTForward(struct Graph* graph, char2* _edges);
public:
RobustFit(const std::string preset);
~RobustFit();
void clearMemory();
void allocMemory(int xSize, int ySize, float alpha);
// gradient error estimation using filtering (adaptive NLM by Roussell)
void estimateGradientErr(float4* _filteredImg, float4* _errDx, float4* _errDy, const float4* img, const float4* varImg, const float4* _dx, const float4* _dy, int xSize, int ySize);
// finding the optimal gradient subset and solving
void calcGraph(char2* _optEdges, const float4 *_tp, const float4 *_dx, const float4 *_dy, const float4 *_errDx, const float4 *_errDy, const float4 *_nlmImg);
};
| 39.471154
| 181
| 0.741048
|
[
"vector"
] |
c748914e668847211f2aa581ea6e71fa4da866d2
| 2,062
|
h
|
C
|
algorithms/medium/0309. Best Time to Buy and Sell Stock with Cooldown.h
|
MultivacX/letcode2020
|
f86289f8718237303918a7705ae31625a12b68f6
|
[
"MIT"
] | null | null | null |
algorithms/medium/0309. Best Time to Buy and Sell Stock with Cooldown.h
|
MultivacX/letcode2020
|
f86289f8718237303918a7705ae31625a12b68f6
|
[
"MIT"
] | null | null | null |
algorithms/medium/0309. Best Time to Buy and Sell Stock with Cooldown.h
|
MultivacX/letcode2020
|
f86289f8718237303918a7705ae31625a12b68f6
|
[
"MIT"
] | null | null | null |
// 309. Best Time to Buy and Sell Stock with Cooldown
// https://leetcode.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/
// Runtime: 4 ms, faster than 86.34% of C++ online submissions for Best Time to Buy and Sell Stock with Cooldown.
// Memory Usage: 11.5 MB, less than 18.90% of C++ online submissions for Best Time to Buy and Sell Stock with Cooldown.
class Solution {
public:
int maxProfit(vector<int>& prices) {
const int N = prices.size();
if (N <= 2) return N == 0 ? 0 : max(0, prices.back() - prices.front());
vector<int> buy(N, 0);
vector<int> sell(N, 0);
vector<int> cooldown(N, 0);
buy[0] = -prices[0];
for (int i = 1; i < N; ++i) {
buy[i] = max(buy[i-1], cooldown[i-1] - prices[i]);
sell[i] = max(sell[i-1], buy[i-1] + prices[i]);
cooldown[i] = max(max(buy[i-1], sell[i-1]), cooldown[i-1]);
}
return max(max(buy[N-1], sell[N-1]), cooldown[N-1]);
}
};
class Solution {
public:
int maxProfit(vector<int>& prices) {
const int n = prices.size();
if (n <= 1) return 0;
vector<int> buy(n), sell(n), cooldown(n);
buy[0] = -prices[0];
for (int i = 1; i < n; ++i) {
buy[i] = max(buy[i - 1], cooldown[i - 1] - prices[i]);
sell[i] = max(sell[i - 1], buy[i - 1] + prices[i]);
cooldown[i] = max(cooldown[i - 1], sell[i - 1]);
}
return max(buy[n - 1], max(sell[n - 1], cooldown[n - 1]));
}
};
class Solution {
public:
int maxProfit(vector<int>& prices) {
const int n = prices.size();
if (n <= 1) return 0;
int buy = -prices[0], sell = 0, cooldown = 0;
for (int i = 1; i < n; ++i) {
int b = buy, s = sell, c = cooldown;
buy = max(b, c - prices[i]);
sell = max(s, b + prices[i]);
cooldown = max(c, s);
}
return max(buy, max(sell, cooldown));
}
};
| 31.242424
| 119
| 0.49321
|
[
"vector"
] |
c74a991061c835f1afd9503b54ef3a995b153e4d
| 8,437
|
h
|
C
|
include/detail/gpcpu/Matrix.h
|
jaredhoberock/gotham
|
e3551cc355646530574d086d7cc2b82e41e8f798
|
[
"Apache-2.0"
] | 6
|
2015-12-29T07:21:01.000Z
|
2020-05-29T10:47:38.000Z
|
include/detail/gpcpu/Matrix.h
|
jaredhoberock/gotham
|
e3551cc355646530574d086d7cc2b82e41e8f798
|
[
"Apache-2.0"
] | null | null | null |
include/detail/gpcpu/Matrix.h
|
jaredhoberock/gotham
|
e3551cc355646530574d086d7cc2b82e41e8f798
|
[
"Apache-2.0"
] | null | null | null |
/*! \file Matrix.h
* \author Jared Hoberock
* \brief Defines the interface for a matrix type
* templatized on type and dimension.
*/
#ifndef MATRIX_H
#define MATRIX_H
#include "Vector.h"
#ifdef minor
#undef minor
#endif // minor
namespace gpcpu
{
template<typename Scalar, unsigned int N, unsigned int M>
class Matrix
{
public:
/*! \typedef This
* \brief Shorthand.
*/
typedef Matrix<Scalar,N,M> This;
/*! \fn Matrix
* \brief Null constructor does nothing.
*/
inline Matrix(void);
/*! \fn Matrix
* \brief Constructor initializes the upper left 3x3 subblock
* of this Matrix.
*/
inline Matrix(const Scalar m00, const Scalar m01, const Scalar m02,
const Scalar m10, const Scalar m11, const Scalar m12,
const Scalar m20, const Scalar m21, const Scalar m22);
/*! \fn Matrix
* \brief Constructor initializes the upper left 3x4 subblock
* of this Matrix.
*/
inline Matrix(const Scalar m00, const Scalar m01, const Scalar m02, const Scalar m03,
const Scalar m10, const Scalar m11, const Scalar m12, const Scalar m13,
const Scalar m20, const Scalar m21, const Scalar m22, const Scalar m23);
/*! \fn Matrix
* \brief Constructor initializes the upper left 4x4 subblock
* of this Matrix.
*/
inline Matrix(const Scalar m00, const Scalar m01, const Scalar m02, const Scalar m03,
const Scalar m10, const Scalar m11, const Scalar m12, const Scalar m13,
const Scalar m20, const Scalar m21, const Scalar m22, const Scalar m23,
const Scalar m30, const Scalar m31, const Scalar m32, const Scalar m33);
/*! \fn operator()
* \brief This method provides access to i,jth element.
* \param i Which row to select.
* \param j Which column to select.
* \return A reference to the i,jth element.
*/
inline Scalar &operator()(const unsigned int i,
const unsigned int j);
/*! \fn operator()
* \brief This method provides const access to i,jth element.
* \param i Which row to select.
* \param j Which column to select.
* \return A reference to the i,jth element.
*/
inline const Scalar &operator()(const unsigned int i,
const unsigned int j) const;
/*! \fn operator const Scalar * ()
* \brief Cast to const Scalar * operator.
* \return Returns &mElements[0].
*/
inline operator const Scalar *(void) const;
/*! \fn operator Scalar *
* \brief Cast to Scalar * operator.
* \return Returns &mElements[0].
*/
inline operator Scalar *(void);
/*! \fn operator *
* \brief Matrix-Matrix multiplication.
* \param rhs The right hand side of the multiplication.
* \return (*this) * rhs
*/
template<unsigned int P>
inline Matrix<Scalar,N,P> operator*(const Matrix<Scalar,M,P> &rhs) const;
/*! \fn operator/
* \brief Scalar division.
* \param rhs The right hand side of the operation.
* \return (*this) / rhs
*/
This operator/(const Scalar &rhs) const;
/*! \fn operator/=
* \brief Scalar divide equal.
* \param rhs The right hand side of the operation.
* \return *this
*/
This &operator/=(const Scalar &rhs);
/*! \fn transpose()
* \brief This method returns the transpose of this Matrix.
* \return The transpose of this Matrix.
*/
inline Matrix<Scalar,M,N> transpose(void) const;
/*! \fn inverse()
* \brief This method computes the inverse of this Matrix
* when N == M and N is small.
* \return The inverse of this Matrix.
*/
inline This inverse(void) const;
/*! \fn adjoint()
* \brief This method computes the adjoint of this Matrix
* when N == M and N is small.
* \return The adjoint of this Matrix.
*/
inline This adjoint(void) const;
/*! \fn operator*=
* \brief Scalar times equal.
* \param s The Scalar to multiply by.
* \return *this.
*/
inline This &operator*=(const Scalar &s);
/*! \fn identity
* \brief This static method returns an identity matrix.
* \return I
*/
static This identity(void);
/*! \fn minor
* \brief Helper function for adjoint when N is 3.
*/
Scalar minor(const int r0, const int r1,
const int c0, const int c1) const;
/*! \fn minor
* \brief Helper function for adjoint when N is 4.
*/
Scalar minor(const int r0, const int r1, const int r2,
const int c0, const int c1, const int c2) const;
/*! \typedef Array
* \brief Shorthand.
*/
typedef Scalar Array[M*N];
/*! This method provides access to the Array.
* \return mElements;
* FIXME: move this to the .inl.
*/
inline Array &getData(void)
{
return mElements;
} // end getArray()
/*! This method multiplies a Vector by this Matrix.
* \param rhs The Vector to multiply by.
* \return (*this) * rhs
*/
Vector<Scalar,N> operator*(const Vector<Scalar,M> &rhs) const;
private:
/*! \fn determinant
* \brief Helper function for determinant when N is small.
* \return det(*this)
*/
Scalar determinant(void) const;
Array mElements;
}; // end class Matrix
template<typename Scalar, unsigned int N>
struct Determinant
{
};
template<typename Scalar>
struct Determinant<Scalar,2>
{
Scalar
operator()(const Matrix<Scalar,2,2> &A) const
{
return A(0,0)*A(1,1) - A(0,1)*A(1,0);
}
};
template<typename Scalar>
struct Determinant<Scalar,3>
{
Scalar
operator()(const Matrix<Scalar,3,3> &A) const
{
return A(0,0) * A.minor(1, 2, 1, 2) -
A(0,1) * A.minor(1, 2, 0, 2) +
A(0,2) * A.minor(1, 2, 0, 1);
}
};
template<typename Scalar>
struct Determinant<Scalar,4>
{
Scalar
operator()(const Matrix<Scalar,4,4> &A) const
{
return A(0,0) * A.minor(1, 2, 3, 1, 2, 3) -
A(0,1) * A.minor(1, 2, 3, 0, 2, 3) +
A(0,2) * A.minor(1, 2, 3, 0, 1, 3) -
A(0,3) * A.minor(1, 2, 3, 0, 1, 2);
}
};
/*! XXX Hack: C++ doesn't allow partial specialization of
* template methods. We work around it here.
*/
template<typename Scalar, unsigned int N, unsigned int M>
struct Adjoint
{
};
template<typename Scalar>
struct Adjoint<Scalar,2,2>
{
Matrix<Scalar,2,2>
operator()(const Matrix<Scalar,2,2> &A) const
{
Matrix<Scalar,2,2> result;
result(0,0) = A(1,1);
result(0,1) = -A(0,1);
result(1,0) = -A(1,0);
result(1,1) = A(0,0);
return result;
} // end operator()()
};
template<typename Scalar>
struct Adjoint<Scalar,3,3>
{
Matrix<Scalar,3,3>
operator()(const Matrix<Scalar,3,3> &A) const
{
Matrix<Scalar,3,3> result;
result(0,0) = A.minor(1, 2, 1, 2);
result(0,1) = -A.minor(0, 2, 1, 2);
result(0,2) = A.minor(0, 1, 1, 2);
result(1,0) = -A.minor(1, 2, 0, 2);
result(1,1) = A.minor(0, 2, 0, 2);
result(1,2) = -A.minor(0, 1, 0, 2);
result(2,0) = A.minor(1, 2, 0, 1);
result(2,1) = -A.minor(0, 2, 0, 1);
result(2,2) = A.minor(0, 1, 0, 1);
return result;
} // end operator()()
};
template<typename Scalar>
struct Adjoint<Scalar,4,4>
{
Matrix<Scalar,4,4>
operator()(const Matrix<Scalar,4,4> &A) const
{
Matrix<Scalar,4,4> result;
result(0,0) = A.minor(1, 2, 3, 1, 2, 3);
result(0,1) = -A.minor(0, 2, 3, 1, 2, 3);
result(0,2) = A.minor(0, 1, 3, 1, 2, 3);
result(0,3) = -A.minor(0, 1, 2, 1, 2, 3);
result(1,0) = -A.minor(1, 2, 3, 0, 2, 3);
result(1,1) = A.minor(0, 2, 3, 0, 2, 3);
result(1,2) = -A.minor(0, 1, 3, 0, 2, 3);
result(1,3) = A.minor(0, 1, 2, 0, 2, 3);
result(2,0) = A.minor(1, 2, 3, 0, 1, 3);
result(2,1) = -A.minor(0, 2, 3, 0, 1, 3);
result(2,2) = A.minor(0, 1, 3, 0, 1, 3);
result(2,3) = -A.minor(0, 1, 2, 0, 1, 3);
result(3,0) = -A.minor(1, 2, 3, 0, 1, 2);
result(3,1) = A.minor(0, 2, 3, 0, 1, 2);
result(3,2) = -A.minor(0, 1, 3, 0, 1, 2);
result(3,3) = A.minor(0, 1, 2, 0, 1, 2);
return result;
} // end operator()()
};
} // end namespace gpcpu
#include "Matrix.inl"
#endif // MATRIX_H
| 27.662295
| 90
| 0.571886
|
[
"vector"
] |
c74e33d0daa7e0e078823fdbe2d6c6c6b4ca62fb
| 15,596
|
c
|
C
|
src/uparser_core.c
|
issamsaid/uparser
|
e56780fc7e62b45aa8175e44af3ab33f0691e9ad
|
[
"BSD-3-Clause"
] | 2
|
2017-09-08T13:11:17.000Z
|
2018-04-04T02:58:17.000Z
|
src/uparser_core.c
|
issamsaid/uparser
|
e56780fc7e62b45aa8175e44af3ab33f0691e9ad
|
[
"BSD-3-Clause"
] | null | null | null |
src/uparser_core.c
|
issamsaid/uparser
|
e56780fc7e62b45aa8175e44af3ab33f0691e9ad
|
[
"BSD-3-Clause"
] | null | null | null |
///
/// @copyright Copyright (c) 2016-, Issam SAID <[email protected]>
/// All rights reserved.
///
/// Redistribution and use in source and binary forms, with or without
/// modification, are permitted provided that the following conditions
/// are met:
///
/// 1. Redistributions of source code must retain the above copyright
/// notice, this list of conditions and the following disclaimer.
/// 2. Redistributions in binary form must reproduce the above copyright
/// notice, this list of conditions and the following disclaimer in the
/// documentation and/or other materials provided with the distribution.
/// 3. Neither the name of the copyright holder nor the names of its
/// contributors may be used to endorse or promote products derived from
/// this software without specific prior written permission.
///
/// THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
/// INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
/// HOLDER OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
/// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
/// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
/// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
/// LIABILITY, WETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
/// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
/// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
///
/// @file src/uparser_core.c
/// @author Issam SAID
/// @brief Implementation of the core uparser functions which include the
/// initialization, help and the finalization.
///
#include <uparser/core.h>
#include <uparser/single/get.h>
#include <__uparser/types-inl.h>
#include <__uparser/util-inl.h>
#include <__uparser/error-inl.h>
#include <__uparser/config/util.h>
///
/// @brief A static uparser object that will be used internally.
///
__uparser_t *up = NULL;
void uparser_init(int argc, char** argv) {
if (up == NULL) {
up = (__uparser_t *)malloc(sizeof(__uparser_t));
up->exe_name = (char *)malloc(sizeof(char)*strlen(argv[0]));
up->short_lookup = &urb_sentinel;
up->long_lookup = &urb_sentinel;
up->args_index = &urb_sentinel;
up->argc = argc;
up->nb_args = 0;
up->nb_opts = 0;
up->argv = argv;
sprintf(up->exe_name, "%s", argv[0]);
up->get_bool = &uparser_get_bool_single;
up->get_char = &uparser_get_char_single;
up->get_string = &uparser_get_string_single;
up->get_int32 = &uparser_get_int32_single;
up->get_int64 = &uparser_get_int64_single;
up->get_float = &uparser_get_float_single;
up->get_double = &uparser_get_double_single;
}
}
void uparser_parse() {
char tmp[__UPARSER_STR_SIZE];
char *key, *value, **saved_value;
urb_t *n; int i, j, passed_args = 0; __uparser_map_t *entry;
if (up != NULL) {
for (i=1; i < up->argc; i++) {
sprintf(tmp, "%s", up->argv[i]);
key = strtok(tmp, "=");
if (__uparser_key_isshort(key)) {
if((value = strtok(NULL, "=")) == NULL) {
/// a series of boolean arguments: values are not needed,
/// we set them equal to true internally (false otherwise).
for (j=1; j<strlen(key); j++) {
if (key[j] == 'h') {
uparser_usage();
uparser_release();
exit(EXIT_SUCCESS);
} else {
UPARSER_EXIT_IF((n=urb_tree_find(&up->short_lookup,
(void*)&key[j],
__uparser_char_cmp)) == &urb_sentinel,
"key '%c' is not recognized", key[j]);
entry = (__uparser_map_t*)n->value;
saved_value = &entry->value;
UPARSER_EXIT_IF(!__uparser_opt_isboolean(entry),
"invalid boolean argument, found non boolean"
"'%c' with no assigned value", key[1]);
if (strlen("true") > strlen(*saved_value)) {
free(*saved_value);
*saved_value =
(char*)malloc(sizeof(char)*strlen("true"));
}
sprintf(*saved_value, "%s", "true");
}
}
} else {
/// this should be a short_key=value statement.
UPARSER_EXIT_IF(strlen(key) !=2,
"'%s' is an invalid short key", key);
UPARSER_EXIT_IF((n=urb_tree_find(&up->short_lookup,
(void*)&key[1],
__uparser_char_cmp)) == &urb_sentinel,
"key '%c' is not recognized", key[1]);
UPARSER_EXIT_IF(!__uparser_value_isvalid(value),
"argument value '%s' is not valid", value);
entry = (__uparser_map_t*)n->value;
saved_value = &entry->value;
UPARSER_EXIT_IF(__uparser_opt_isboolean(entry),
"boolean arguments don't take values, "
"found boolean '%c=%s'", key[1], value);
if (strlen(value) > strlen(*saved_value)) {
free(*saved_value);
*saved_value =
(char*)malloc(sizeof(char)*strlen(value));
}
sprintf(*saved_value, "%s", value);
}
} else if (__uparser_key_islong(key)) {
if((value = strtok(NULL, "=")) == NULL) {
/// this should be a long boolean argument,
/// the value equal to true internally (false otherwise).
if (strcmp(&key[2], "help") == 0) {
uparser_usage();
uparser_release();
exit(EXIT_SUCCESS);
} else {
UPARSER_EXIT_IF((n=urb_tree_find(&up->long_lookup,
(void*)&key[2],
__uparser_str_cmp)) == &urb_sentinel,
"key '%s' is not recognized", key);
entry = (__uparser_map_t*)n->value;
saved_value = &entry->value;
UPARSER_EXIT_IF(!__uparser_opt_isboolean(entry),
"invalid boolean argument, found non boolean"
"'%s' with no assigned value", &key[2]);
if (strlen("true") > strlen(*saved_value)) {
free(*saved_value);
*saved_value =
(char*)malloc(sizeof(char)*strlen("true"));
}
sprintf(*saved_value, "%s", "true");
}
} else {
/// this should be a long_key=value statement.
UPARSER_EXIT_IF(!__uparser_value_isvalid(value),
"argument value '%s' is not valid", value);
if (__uparser_str_cmp(&key[2], __UPARSER_FILE_KEYWORD) == 0) {
uparser_load(value);
} else {
UPARSER_EXIT_IF((n=urb_tree_find(&up->long_lookup,
(void*)&key[2],
__uparser_str_cmp)) == &urb_sentinel,
"key '%s' is not recognized", key);
entry = (__uparser_map_t*)n->value;
saved_value = &entry->value;
UPARSER_EXIT_IF(__uparser_opt_isboolean(entry),
"boolean arguments don't take values, "
"found boolean '%s=%s'",
&key[2], value);
if (strlen(value) > strlen(*saved_value)) {
free(*saved_value);
*saved_value =
(char*)malloc(sizeof(char)*strlen(value));
}
sprintf(*saved_value, "%s", value);
}
}
} else if (__uparser_arg_isvalid(up->argv[i])) {
UPARSER_EXIT_IF((n=urb_tree_find(&up->args_index,
(void*)&passed_args,
__uparser_str_cmp)) == &urb_sentinel,
"argument '%s' is not recognized", up->argv[i]);
entry = (__uparser_map_t*)n->value;
entry->value = (char*)malloc(sizeof(char)*strlen(up->argv[i]));
sprintf(entry->value, "%s", up->argv[i]);
passed_args++;
} else { UPARSER_EXIT("invalid option or argument '%s'", key); }
}
if (up->nb_args != passed_args) {
uparser_usage();
UPARSER_EXIT("invalid number of arguments passed (%d/%d)",
passed_args, up->nb_args);
}
}
}
///
/// @details
/// When parsing from a file the syntax should be key=value.
/// "key" can be short or long but the dashes are not needed.
/// Boolean values should be explicit,i.e. key=true or key=false.
///
void uparser_load(const char *filename) {
urb_t *n;
char *s, *ch, *line, *save_line, *save_param;
size_t file_size;
char *file_content, **saved_value;
if (up != NULL) {
UPARSER_EXIT_IF((NULL == filename) ||
(strlen(filename) == 0), "filename not valid");
UPARSER_EXIT_IF(!__uparser_file_exists(filename),
"file '%s' not found", filename);
file_size = __uparser_file_size(filename);
file_content = (char*)malloc(file_size*sizeof(char)+1);
__uparser_file_read(file_content, file_size+1, filename);
for(ch = file_content; ;ch = NULL) {
line = strtok_r(ch, "\n", &save_line);
if (line == NULL) break;
__uparser_line_trim(line);
if ((__uparser_line_isempty(line)) ||
(__uparser_line_iscomment(line))) continue;
UPARSER_EXIT_IF((s=strtok_r(line, "=", &save_param)) == NULL,
"invalid argument '%s' (= not found)", line);
if (strlen(s)==1) {
/// this should be a short_key=value statement.
if (s[0] == 'h') {
s = strtok_r(NULL, "=", &save_param);
UPARSER_EXIT_IF(!__uparser_value_isvalid(s),
"argument value '%s' is not valid", s);
if (__uparser_str_cmp(s, "true") == 0) {
uparser_usage();
break;
}
}
UPARSER_EXIT_IF((n=urb_tree_find(&up->short_lookup,
(void*)&s[0],
__uparser_char_cmp)) == &urb_sentinel,
"key '%c' is not recognized", s[0]);
s = strtok_r(NULL, "=", &save_param);
UPARSER_EXIT_IF(!__uparser_value_isvalid(s),
"argument value '%s' is not valid", s);
saved_value = &((__uparser_map_t*)n->value)->value;
if (strlen(s) > strlen(*saved_value)) {
free(*saved_value);
*saved_value = (char*)malloc(sizeof(char)*strlen(s));
}
sprintf(*saved_value, "%s", s);
} else {
if (__uparser_str_cmp(s, __UPARSER_FILE_KEYWORD) == 0) {
s = strtok_r(NULL, "=", &save_param);
UPARSER_EXIT_IF(!__uparser_value_isvalid(s),
"argument value '%s' is not valid", s);
uparser_load(s);
} else if (__uparser_str_cmp(s, "help") == 0) {
s = strtok_r(NULL, "=", &save_param);
UPARSER_EXIT_IF(!__uparser_value_isvalid(s),
"argument value '%s' is not valid", s);
if (__uparser_str_cmp(s, "true") == 0) {
uparser_usage();
break;
}
} else {
UPARSER_EXIT_IF((n=urb_tree_find(&up->long_lookup,
(void*)s,
__uparser_str_cmp)) == &urb_sentinel,
"key '%s' is not recognized", s);
/// this should be a long_key=value statement.
s = strtok_r(NULL, "=", &save_param);
UPARSER_EXIT_IF(!__uparser_value_isvalid(s),
"argument value '%s' is not valid", s);
saved_value = &((__uparser_map_t*)n->value)->value;
if (strlen(s) > strlen(*saved_value)) {
free(*saved_value);
*saved_value = (char*)malloc(sizeof(char)*strlen(s));
}
sprintf(*saved_value, "%s", s);
}
}
}
free(file_content);
}
}
void uparser_usage() {
if (up != NULL) {
UPARSER_PRINT("");
if (up->nb_opts) {
if (up->nb_args) {
UPARSER_PRINT("USAGE : %s [options] [arguments]", up->exe_name);
} else {
UPARSER_PRINT("USAGE : %s [options]", up->exe_name);
}
} else {
if (up->nb_args) {
UPARSER_PRINT("USAGE : %s [arguments]", up->exe_name);
} else {
UPARSER_PRINT("USAGE : %s", up->exe_name);
}
}
UPARSER_PRINT("");
if (up->nb_opts) {
UPARSER_PRINT("OPTIONS:");
UPARSER_PRINT("\t--%-12s%s%-10s %s",
"help", ", -h ", " ", "show this help message");
UPARSER_PRINT("\t--%-12s%-10s %s",
__UPARSER_FILE_KEYWORD,
" = [value] ", "load parameters from file");
urb_tree_walk(&up->long_lookup, NULL, __uparser_opt_print);
}
if (up->nb_args) {
UPARSER_PRINT("");
UPARSER_PRINT("ARGUMENTS:");
urb_tree_walk(&up->args_index, NULL, __uparser_arg_print);
}
UPARSER_PRINT("");
}
}
void uparser_release() {
if (up != NULL) {
urb_tree_delete( &up->long_lookup, NULL, __uparser_map_del);
urb_tree_delete(&up->short_lookup, NULL, NULL);
urb_tree_delete(&up->args_index, __uparser_ptr_del, NULL);
free(up->exe_name); free(up); up = NULL;
}
}
| 47.69419
| 82
| 0.474801
|
[
"object"
] |
c754ebc7f8167f08b3900f436e0fcd084aaae34c
| 12,203
|
h
|
C
|
src/canoe/simple_overlay.h
|
nrc-cnrc/Portage-SMT-TAS
|
73f5a65de4adfa13008ea9a01758385c97526059
|
[
"MIT"
] | null | null | null |
src/canoe/simple_overlay.h
|
nrc-cnrc/Portage-SMT-TAS
|
73f5a65de4adfa13008ea9a01758385c97526059
|
[
"MIT"
] | null | null | null |
src/canoe/simple_overlay.h
|
nrc-cnrc/Portage-SMT-TAS
|
73f5a65de4adfa13008ea9a01758385c97526059
|
[
"MIT"
] | null | null | null |
/**
* @author Colin Cherry
* @file simple_overlay.h View a search graph as a lattice
*
* Canoe Decoder
*
* Technologies langagieres interactives / Interactive Language Technologies
* Inst. de technologie de l'information / Institute for Information Technology
* Conseil national de recherches Canada / National Research Council Canada
* Copyright 2012, Sa Majeste la Reine du Chef du Canada /
* Copyright 2012, Her Majesty in Right of Canada
*/
#ifndef SIMPLEOVERLAY_H
#define SIMPLEOVERLAY_H
#include <map>
#include "decoder.h"
#include "phrasedecoder_model.h"
#include "wordgraph.h"
#include <boost/operators.hpp>
namespace Portage {
class LatticeEdges;
class SimpleOutEdgeIterator;
typedef vector<DecoderState *>::const_iterator DecoderStateIterator;
typedef vector<DecoderState *>::const_reverse_iterator RDecoderStateIterator;
/**
* An edge in the lattice.
* Abstracts underlying decoder states into three pointers:
*
* From: The recombined original state ("prime state")
* To: The original back-pointer
* Info: The un-recombined state, contains all information about the edge
* from #from to #to.
* Note that we use parent->child terminology throughout this code, in analog
* to the java HyperGraph structure from which most of it is adapted.
* From is the parent, To is the child.
* Thus, FINAL is the root, and all true DecoderStates are its descendants
*/
class LatticeEdge {
private:
DecoderState* m_from;
DecoderState* m_to;
DecoderState* m_info;
static double m_min_score;
public:
/**
* Constructor
* @param from Prime state
* @param to Back-pointer
* @param info Non-prime state that originally pointed to #to
* NULL if #from is the dummy root
*/
LatticeEdge(DecoderState* from, DecoderState* to, DecoderState* info)
{
m_from = from;
m_to = to;
m_info = info;
}
// Accessors
DecoderState* from() const { return m_from; }
DecoderState* to() const {return m_to; }
DecoderState* info() const {return m_info; }
/**
* @return The log probability, or cost of this edge
*/
double score() const;
/**
* @return True if #from is the dummy root
*/
bool isInitial() const {return m_info==0;}
/**
* @return Node id, or FINAL for dummy root
*/
std::string fromName() const;
/**
* @return Unique edge identifier, not necessarily sequential
*/
Uint id() const;
/**
* Set the minscore value
*/
static void setMinScore(double d) {m_min_score = d;}
};
/**
* The lattice overlay. Can answer questions about the lattice in general,
* enumerate its nodes and provide edge lists for each node.
*/
class SimpleOverlay {
DecoderStateIterator initial_states_begin;
DecoderStateIterator initial_states_end;
PhraseDecoderModel& model;
DecoderState* dummy_initial_state;
vector<DecoderState*>* states_inside_order;
bool lattice_log_prob;
public:
/**
* Constructor
* @param initial_states_begin_parm Begin iterator for list of initial states
* @param initial_states_end_parm End iterator for list of initial
* @param model_parm Model that produced the lattice, not currently used
*/
SimpleOverlay(
DecoderStateIterator initial_states_begin_parm,
DecoderStateIterator initial_states_end_parm,
PhraseDecoderModel & model_parm,
bool latticeLogProb)
: initial_states_begin( initial_states_begin_parm ),
initial_states_end( initial_states_end_parm ),
model( model_parm ),
lattice_log_prob(latticeLogProb)
{
dummy_initial_state = new DecoderState;
Uint max_id = 0;
for ( DecoderStateIterator vi = initial_states_begin;
vi != initial_states_end;
++vi )
{
if ( max_id < (*vi)-> id ) {
max_id = (*vi)-> id;
}
// max_id needs to be over both initial states and their
// equivalents
for( DecoderStateIterator ri = (*vi)->recomb.begin();
ri != (*vi)->recomb.end(); ++ri)
{
if ( max_id < (* ri)-> id ) {
max_id = (*ri)-> id;
}
}
}
dummy_initial_state-> id = max_id+1;
dummy_initial_state-> trans = NULL;
dummy_initial_state-> back = NULL;
dummy_initial_state-> refCount = 0;
states_inside_order = NULL;
}
/**
* @return Naive estimate of number of states based on state ids
*/
Uint numStates() const {return dummy_initial_state->id+1;}
/**
* Destructor
*/
~SimpleOverlay() {
delete dummy_initial_state;
if(states_inside_order!=NULL) delete states_inside_order;
}
private:
/// Deactivated copy constructor.
SimpleOverlay( SimpleOverlay const & );
/// Deactivated copy assignment operator
SimpleOverlay operator=( SimpleOverlay const & );
//
// Utility
//
// Helper for various print functions
void insertEscapes(std::string &str, const char *charsToEscape = "\"\\", char escapeChar = '\\');
// Helper to build the inside-order list of nodes lazily
void buildInsideVector();
// Generic function to print only edges that satisfy a boolean predicate (pred)
template<typename Pred>
Uint print_lattice( ostream& file, PrintFunc & print, Pred pred);
//
// Viterbi inside-outside, good for pruning, n-best list construction
//
/**
* Viterbi inside. Find best score up to each node
* @return Map from node id to log inside score
*/
map<Uint,double> inside();
/**
* Viterbi outside. Find best completion score following this node
* @param inside Output from the inside algorithm
* @return Map from node id to log outside score
*/
map<Uint,double> outside(const map<Uint,double>& inside);
/**
* Viterbi inside-outside. For each edge, find the best scoring path
* that includes that edge. Calls inside and outside.
* @return Map from edge id to log inside-outside score
*/
map<Uint,double> insideOutside();
public:
// Accessors
DecoderState * get_initial_state() const { return dummy_initial_state; }
DecoderStateIterator get_initial_states_begin() const {return initial_states_begin;}
DecoderStateIterator get_initial_states_end() const {return initial_states_end;}
PhraseDecoderModel & get_model() const { return model; }
/**
* Is the input state the root?
* @param state State to be tested
* @return true if its the root
*/
bool is_initial_state( DecoderState * const state ) const {
return state == dummy_initial_state;
}
/**
* Is the input state "final"?
* @param state State to be tested
* @return true if it is node 0, which should be the only node with no back-pointers
*/
bool is_final_state( DecoderState * const state ) const {
return state-> id == 0;
}
/**
* Get the edges from an input state
* @param v State to use as from
* @return Iterable of outgoing edges
*/
LatticeEdges getEdges(DecoderState * const & v);
/**
* @return Begin iterator to enumerate the nodes of the lattice in inside order
*/
DecoderStateIterator statesInOrderBegin();
/**
* @return End iterator to enumerate the nodes of the lattice in inside order
*/
DecoderStateIterator statesInOrderEnd();
/**
* @return Begin iterator to enumerate the nodes of the lattice in outside order
*/
RDecoderStateIterator statesOutOrderBegin();
/**
* @return End iterator to enumerate the nodes of the lattice in outside order
*/
RDecoderStateIterator statesOutOrderEnd();
/**
* Print the lattice to a file
* @param file Output stream to print to
* @param print PrintFunc specifies what edge attributes to print
* @return Number of edges printed
*/
Uint print_lattice( ostream& file, PrintFunc & print);
/**
* Print a pruned lattice to a stream
* @param file Output stream
* @param print PrintFunc object specifies what edge attributes to print
* @param density Prune the lattice down to #density x #tgtlen edges
* @param tgtlen Calculate density using this number of tokens,
* sane choices are len of the Vertbi translation or source len
* @param verbosity Higher numbers = more debugging info
* @return Number of edges printed
*/
Uint print_pruned_lattice(ostream& file, PrintFunc & print, double density, Uint tgtlen, Uint verbosity);
/**
* @return The length of the source sentence for this lattice
*/
Uint sourceLen();
};
/**
* Here is where the magic happens. This OutEdgeIterator is where we hide all
* the ugliness of state recombination.
*
* Iterator structure copied from lattice_overlay, but the interaction with the
* underlying search graph is completely different, and results in much more compact
* lattices
*/
class SimpleOutEdgeIterator
: public boost::forward_iterator_helper<SimpleOutEdgeIterator,DecoderState*>
{
DecoderState * from_state;
DecoderStateIterator state_iterator;
enum state_type {
use__state,
use__iterator
};
state_type state_finite_memory;
bool is_initial;
public:
// Empty constructor
SimpleOutEdgeIterator(){}
/// Copy constructor.
/// @param ei edge iterator we want to copy.
SimpleOutEdgeIterator(SimpleOutEdgeIterator const & ei);
/// Copy assignment operator.
/// @param ei edge iterator we want to copy.
SimpleOutEdgeIterator & operator=(SimpleOutEdgeIterator const & ei);
/**
* Swaps two edge iterator.
* @param ei_1 left-hand side operand.
* @param ei_2 right-hand side operand.
*/
void swap(SimpleOutEdgeIterator & ei_1, SimpleOutEdgeIterator & ei_2);
bool operator==(SimpleOutEdgeIterator const & ei) const;
LatticeEdge operator*();
SimpleOutEdgeIterator & operator++();
/**
* Construct a Begin iterator
* @param state State from which to build each edge
* @param g The overlay representing the entire lattice
*/
SimpleOutEdgeIterator(DecoderState * const & state, SimpleOverlay const & g);
/**
* Construct an End iterator.
* @param state State from which to build each edge
* @param g The overlay representing the entire lattice
* @param construct_end_iterator Acts as a flag to differentiate between both constructors.
*/
SimpleOutEdgeIterator(DecoderState * const & state, SimpleOverlay const & g, int construct_end_iterator);
};
/**
* Hey look! It's a begin iterator and an end iterator living together, but it's not a container
* I miss Java's Iterable
* This is how you should interact with SimpleOutEdgeIterator, rather than calling the Iterator
* constructor directly. See print_lattice or buildInsideVector for examples
*/
class LatticeEdges {
SimpleOutEdgeIterator m_begin;
SimpleOutEdgeIterator m_end;
public:
LatticeEdges(DecoderState * const & state, SimpleOverlay const & g) :
m_begin(state,g), m_end(state,g,1) {}
SimpleOutEdgeIterator begin() {return m_begin;}
const SimpleOutEdgeIterator& end() {return m_end;}
};
}
#endif // SIMPLEOVERLAY_H
| 33.250681
| 114
| 0.628698
|
[
"object",
"vector",
"model"
] |
c76399569f365092f05fd3b18cabb888f26fa727
| 11,435
|
h
|
C
|
Rumble3D/include/R3D/RigidBodyEngine/RigidBody.h
|
Nelaty/Rumble3D
|
801b9feec27ceeea91db3b759083f6351634e062
|
[
"MIT"
] | 1
|
2020-01-21T16:01:53.000Z
|
2020-01-21T16:01:53.000Z
|
Rumble3D/include/R3D/RigidBodyEngine/RigidBody.h
|
Nelaty/Rumble3D
|
801b9feec27ceeea91db3b759083f6351634e062
|
[
"MIT"
] | 1
|
2019-10-08T08:25:33.000Z
|
2019-10-09T06:39:06.000Z
|
Rumble3D/include/R3D/RigidBodyEngine/RigidBody.h
|
Nelaty/Rumble3D
|
801b9feec27ceeea91db3b759083f6351634e062
|
[
"MIT"
] | 1
|
2019-05-14T13:48:16.000Z
|
2019-05-14T13:48:16.000Z
|
#pragma once
#include "R3D/Common/Common.h"
#include "R3D/Common/Precision.h"
#include "R3D/Transform3D.h"
#include "R3D/RigidBodyEngine/CollisionObject.h"
#include "R3D/RigidBodyEngine/CollisionCallback.h"
#include "R3D/RigidBodyEngine/CollisionDetection/CollisionMask.h"
#include "R3D/RigidBodyEngine/RigidBodyDef.h"
#include <glm/glm.hpp>
#include <glm/gtc/quaternion.hpp>
namespace r3
{
class PhysicsMaterial;
/**
* \brief A rigid body is a 3D object, which can be moved and
* rotated and has collision properties.
*/
class R3D_DECLSPEC RigidBody : public CollisionObject
{
public:
explicit RigidBody();
/**
* \brief RigidBody constructor.
* \param def Rigid body construction information.
*/
explicit RigidBody(const RigidBodyDef& def);
~RigidBody() = default;
/**
* \brief Initialize all attributes of this rigid body.
* param definition Rigid body construction information.
*/
void init(const RigidBodyDef& definition);
void calculateDerivedData();
/**
* \brief Set the current inertia tensor.
* \param inertiaTensor The new inertia tensor.
*/
void setInertiaTensor(const glm::mat3& inertiaTensor);
/**
* \brief Get the inverted inertia tensor.
* \return The inverse of the current inertia tensor.
*/
glm::mat3 getInverseTensor() const;
/**
* \brief Set the mass of this rigid body.
* \param mass The new mass.
* \details Implicitly sets the inverse mass.
*/
void setMass(real mass);
/**
* \brief Get the mass of this rigid body.
* \return The current mass.
*/
real getMass() const;
/**
* \brief Set the inverse mass of this rigid body.
* \param inverseMass An inverse mass of zero equals a rigid body
* with infinite mass.
* \details Implicitly sets the mass.
*/
void setInverseMass(real inverseMass);
/**
* \brief Get the inverse mass.
* \return The inverse of the current mass.
* \details An inverse mass of zero equals a rigid body
* with infinite mass.
*/
real getInverseMass() const;
/**
* \brief Check if this rigid body has infinite mass.
* \return True if the mass is infinite, false otherwise.
*/
bool hasFiniteMass() const;
/**
* \brief Get the current sum of linear forces.
* \return The force accumulator.
*/
glm::vec3 getForceAccumulated() const;
/**
* \brief Get the current sum of rotational forces.
* \return The torque accumulator.
*/
glm::vec3 getTorqueAccumulated() const;
/**
* \brief Set the position of this rigid body.
* \param centerOfMass The new position.
*/
void setCenterOfMass(const glm::vec3& centerOfMass);
/**
* \brief Set the position of this rigid body.
* \param x The x-component of the new position.
* \param y The y-component of the new position.
* \param z The z-component of the new position.
*/
void setCenterOfMass(real x, real y, real z);
/**
* \brief Get the current position.
* \return The position.
*/
glm::vec3 getCenterOfMass() const;
/**
* \brief Set the current orientation.
* \param orientation The new orientation.
*/
void setOrientation(const glm::quat& orientation);
/**
* \brief Set the current orientation.
* \param r The radius in radians
* \param i The x component of the rotation axis.
* \param j The y component of the rotation axis.
* \param k The z component of the rotation axis.
*/
void setOrientation(real r, real i, real j, real k);
/**
* \brief Get the current orientation.
* \return The orientation.
*/
glm::quat getOrientation() const;
/**
* \brief Set the current orientation.
* \param rotation The new rotation.
*/
void setRotation(const glm::vec3& rotation);
/**
* \brief Set the current orientation.
* \param x The x-component of the new orientation.
* \param y The y-component of the new orientation.
* \param z The z-component of the new orientation.
*/
void setRotation(real x, real y, real z);
/**
* \brief Get the current orientation.
* \return The orientation.
*/
glm::vec3 getRotation() const;
/**
* \brief Set the current velocity.
* \param velocity The new velocity.
*/
void setVelocity(const glm::vec3& velocity);
/**
* \brief Set the current velocity.
* \param x The x-component of the new velocity.
* \param y The y-component of the new velocity.
* \param z The z-component of the new velocity.
*/
void setVelocity(real x, real y, real z);
/**
* \brief Get the current velocity.
* \return The velocity.
*/
glm::vec3 getVelocity() const;
/**
* \brief Set the current acceleration.
* \param acceleration The new acceleration.
*/
void setAcceleration(const glm::vec3& acceleration);
/**
* \brief Set the current acceleration.
* \param x The x-component of the new acceleration
* \param y The y-component of the new acceleration
* \param z The z-component of the new acceleration
*/
void setAcceleration(real x, real y, real z);
/**
* \brief Get the current acceleration.
* \return The acceleration.
*/
const glm::vec3& getAcceleration() const;
/**
* \brief Get the acceleration from the last update.
*/
const glm::vec3& getLastFrameAcceleration() const;
/**
* \brief Set the linear damping coefficient.
* \param linearDamping The new linear damping coefficient.
*/
void setLinearDamping(real linearDamping);
/**
* \brief Get the linear damping factor.
* \return The linear damping coefficient.
*/
real getLinearDamping() const;
/**
* \brief Get the angular damping coefficient.
* \param angularDamping The new angular damping coefficient.
*/
void setAngularDamping(real angularDamping);
/**
* \brief Get the angular damping coefficient.
* \return The angular damping coefficient.
*/
real getAngularDamping() const;
/**
* \brief Get the current transformation matrix.
* \return The transformation matrix.
*/
const glm::mat4& getTransformationMatrix() const;
/**
* \brief Get the inverted inertia tensor in world coordinates.
* \param[out] inverseInertiaTensorWorld Contains the inverted
* inertia tensor.
*/
void getInverseInertiaTensorWorld(glm::mat3* inverseInertiaTensorWorld) const;
/**
* \brief Get the inverted inertia tensor in world coordinates.
*/
const glm::mat3& getInverseInertiaTensorWorld() const;
/**
* \brief Additively increase velocity.
* \param deltaVelocity The summand.
*/
void addVelocity(const glm::vec3& deltaVelocity);
/**
* \brief Additively increase rotation.
* \param deltaRotation The summand.
*/
void addRotation(const glm::vec3& deltaRotation);
/**
* \brief Check if this rigid body is being simulated.
* \return True if it is being siulated, false otherwise.
*/
bool isAwake() const;
/**
* \brief Start or stop simulating this rigid body.
* \param awake The new awake state.
*/
void setAwake(bool awake);
/**
* \brief Check if this body can go to sleep at any time.
*/
bool canSleep() const;
/**
* \brief Set whether the body can got to sleep at any time.
*/
void setCanSleep(bool canSleep);
/**
* \brief Set the physics material.
* \param material The new physics material.
*/
void setPhysicsMaterial(const PhysicsMaterial& material);
/**
* \brief Get the current physics material.
* \return The current physics material if existent, nullptr otherwise.
*/
const PhysicsMaterial& getPhysicsMaterial() const;
/**
* \brief Reset accumulated linear and angular forces.
*/
void clearAccumulators();
/**
* \brief Add a force which's attack point is the bodies
* center of mass.
* \param force The force to be added.
*/
void addForce(const glm::vec3& force);
/**
* \brief Add a force at a specified body point. Can create
* rotational forces.
* \param force The force to be applied
* \param point The attack point of the force in world coordinates.
*/
void addForceAtPoint(const glm::vec3& force, const glm::vec3& point);
/**
* \brief Add a force at a specified body point. Can create
* rotational forces.
* \param force The force to be applied
* \param point The attack point of the force in local body
* coordinates.
*/
void addForceAtBodyPoint(const glm::vec3& force, const glm::vec3& point);
/** \brief Additively increase the torque accumulator.. */
void addTorque(const glm::vec3& torque);
/** \brief Set the currently used callback. */
void setCollisionCallback(const CollisionCallback& callback);
/** \brief Get the collision callback of this body. */
const CollisionCallback& getCollisionCallback() const;
/**
* \brief Convert a point into local body space.
* \param point The point to convert.
* \return The point in local body space.
*/
glm::vec3 getPointInLocalSpace(const glm::vec3& point) const;
/**
* \brief Convert a point from local body space into world
* space.
* \param point A point in local body space.
* \return The converted point in world space.
*/
glm::vec3 getPointInWorldSpace(const glm::vec3& point) const;
/**
* \brief Convert a direction from world space into local
* space.
* \param direction The direction in world space.
* \return The direction in local space.
*/
glm::vec3 getDirectionInLocalSpace(const glm::vec3& direction) const;
/**
* \brief Convert a direction from local body space into
* world space.
* \param direction The direction in local body space.
* \return The direction in world space.
*/
glm::vec3 getDirectionInWorldSpace(const glm::vec3& direction) const;
/**
* \brief Resolve position and rotation
* \param duration The duration to integrate over.
*/
virtual void integrate(real timeDelta);
/**
* \brief Reset the rigid body.
* \param position The initial position
* \param rotation The initial rotation.
*/
void reset(const glm::vec3& position = glm::vec3(0),
const glm::vec3& rotation = glm::vec3(0));
protected:
PhysicsMaterial m_physicsMaterial;
CollisionMask m_collisionMask;
CollisionCallback m_collisionCallback;
real m_mass;
real m_inverseMass{};
real m_linearDamping{};
real m_angularDamping{};
glm::vec3 m_velocity;
glm::vec3 m_acceleration;
glm::vec3 m_lastFrameAcceleration;
glm::vec3 m_rotation;
glm::mat4 m_transformationMatrix;
glm::mat3 m_inverseInertiaTensor;
glm::mat3 m_inverseInertiaTensorWorld;
/* Accumulators */
glm::vec3 m_forceAccumulated;
glm::vec3 m_torqueAccumulated;
bool m_awake{};
real m_motion;
real m_sleepEpsilon;
bool m_canSleep{};
/**
* \brief Calculate a transformation matrix from given parameters.
* \param[out] transformationMatrix The calculated transformation
* matrix.
* \param position The translation applied to the matrix.
* \param orientation The orientation applied to the matrix.
*/
static void calculateTransformationMatrix(glm::mat4& transformationMatrix,
const glm::vec3& position,
const glm::mat3& orientation);
/**
* \brief \todo: Refactor names and function implementation.
*/
static void transformInertiaTensor(glm::mat3& iitWorld,
const glm::mat3& iit,
const glm::mat4& rotMat);
};
}
| 28.949367
| 80
| 0.679318
|
[
"object",
"3d"
] |
c768e90687ca0b2b9fa1756bc256350a1be82ffb
| 1,655
|
c
|
C
|
examples/random_code_search.c
|
Alan-Robertson/qecode
|
fc01e6f66a7fbe4f6b2bda6b3874fd6e5b10c628
|
[
"MIT"
] | 4
|
2018-03-07T06:15:32.000Z
|
2021-11-17T10:50:15.000Z
|
examples/random_code_search.c
|
Alan-Robertson/qecode
|
fc01e6f66a7fbe4f6b2bda6b3874fd6e5b10c628
|
[
"MIT"
] | null | null | null |
examples/random_code_search.c
|
Alan-Robertson/qecode
|
fc01e6f66a7fbe4f6b2bda6b3874fd6e5b10c628
|
[
"MIT"
] | null | null | null |
//#include <iostream>
#include "../codes.h"
#include "../error_models.h"
#include "../destabilisers.h"
#include "../sym_iter.h"
#include "../tailored.h"
#include "../decoders.h"
#include "characterise.h"
#include "random_code_search.h"
#include "channel.h"
/*
IF YOU SEE A RECOVERABLE MEMORY LEAK of 72,704 bytes in 1 block, it's caused by iostream
*/
int main()
{
/*
Error Model
*/
unsigned n_qubits = 7, n_logicals = 1, distance = 3;
unsigned n_codes_searched = 1000;
// Setup the error model
error_model_f error_model = error_model_iid;
// Setup the model data
iid_model_data model_data;
model_data.n_qubits = n_qubits;
model_data.p_error = 0.1;
/*
QECC
*/
struct random_search_results r = random_code_search_best_of_n_codes_with_stats(
n_qubits,
n_logicals,
distance,
error_model,
(void*)&model_data,
n_codes_searched);
sym* code = r.code;
sym* logicals = r.logicals;
sym** decoder_data = tailor_decoder(code, logicals, error_model, (void*)&model_data);
decoder_f decoder = decoder_tailored;
/*
Printing and Cleanup
*/
double average = 0;
for (size_t i = 0; i < n_codes_searched; i++)
{
average += r.probs[i];
}
average /= n_codes_searched;
printf(" Codes Searched: %d\n Average correction probability: %e\n Best performance: %e\n", n_codes_searched, average, r.p_best);
sym_print(code);
sym_print(logicals);
printf("Logical Error Channel\n");
MatrixXcd lc = channel_logical(code, logicals, error_model, (void*)&model_data, decoder, (void*)&decoder_data);
std::cout << lc << std::endl;
// Free allocated objects
sym_free(code);
sym_free(logicals);
free(r.probs);
return 0;
}
| 21.776316
| 130
| 0.703927
|
[
"model"
] |
c787dffd0a8fba3bd0d4fec706ae4f2003c80bab
| 572
|
h
|
C
|
Game.h
|
koadma/robotization-dilemma
|
e7ecdb669346f39490de29d55bac99e25f7ab85f
|
[
"MIT"
] | 1
|
2017-02-12T13:23:24.000Z
|
2017-02-12T13:23:24.000Z
|
Game.h
|
koadma/robotization-dilemma
|
e7ecdb669346f39490de29d55bac99e25f7ab85f
|
[
"MIT"
] | 1
|
2017-02-15T23:25:49.000Z
|
2017-02-16T20:19:30.000Z
|
Game.h
|
koadma/robotization-dilemma
|
e7ecdb669346f39490de29d55bac99e25f7ab85f
|
[
"MIT"
] | null | null | null |
#ifndef __GAME_H__
#define __GAME_H__
#include <vector>
#include "Bubble.h"
class Game
{
private:
unsigned int numOfShips;
std::vector<Ship> ships;
int roundNumber = 0;
WinManager winManager;
BubbleManager bubbles;
std::vector<int> projectiles;
void askToContinue(int nextPlayer) const;
void giveInformation(int currentPlayer) const;
void mainGameLoop();
void playRound();
void manageBubbles();
void manageProjectiles();
void moveShips();
void manageDetections();
void giveWinScreen();
public:
Game(unsigned int numOfShips);
};
#endif
| 18.451613
| 48
| 0.734266
|
[
"vector"
] |
c78a4a75bd4beaf46b6af885de7f021172391e7a
| 693
|
h
|
C
|
leetcode-cpp/lib/Tree.h
|
emacslisp/cpp
|
8230f81117d6f64adaa1696b0943cdb47505335a
|
[
"Apache-2.0"
] | null | null | null |
leetcode-cpp/lib/Tree.h
|
emacslisp/cpp
|
8230f81117d6f64adaa1696b0943cdb47505335a
|
[
"Apache-2.0"
] | null | null | null |
leetcode-cpp/lib/Tree.h
|
emacslisp/cpp
|
8230f81117d6f64adaa1696b0943cdb47505335a
|
[
"Apache-2.0"
] | null | null | null |
#include <vector>
#include <iostream>
#include <queue>
#include <algorithm>
#include "TreeNode.h"
#include "Node.h"
using namespace std;
class Tree
{
public:
Tree();
~Tree();
TreeNode* buildNode(vector<int> a);
Node* buildNaryTreeNode(vector<int> c);
vector<int> buildTreeToArray(TreeNode* root);
TreeNode* buildBalancedTreeFromArray(vector<int> a);
int TreeHeight(TreeNode *root);
int TreeHeight(Node *root);
TreeNode* searchNodeByValue(TreeNode *root, int x);
int longestPathInTree(TreeNode *root);
int pathSum(TreeNode* root, int sum);
void MainEntry();
private:
TreeNode* buildBalancedTree(vector<int>& nums, int start, int end);
};
| 23.1
| 71
| 0.695527
|
[
"vector"
] |
c78bc09f0e8d92ac329214ef28b7bdf90e458079
| 15,767
|
h
|
C
|
include/cutlass/epilogue/threadblock/convolution_epilogue_tensor_op.h
|
MegEngine/cutlass
|
31798848e40c2752d4b3db193491a63b77455029
|
[
"BSD-3-Clause"
] | 44
|
2020-09-15T05:31:25.000Z
|
2022-03-22T08:02:02.000Z
|
include/cutlass/epilogue/threadblock/convolution_epilogue_tensor_op.h
|
MegEngine/cutlass
|
31798848e40c2752d4b3db193491a63b77455029
|
[
"BSD-3-Clause"
] | null | null | null |
include/cutlass/epilogue/threadblock/convolution_epilogue_tensor_op.h
|
MegEngine/cutlass
|
31798848e40c2752d4b3db193491a63b77455029
|
[
"BSD-3-Clause"
] | 7
|
2020-09-16T15:18:21.000Z
|
2022-03-28T10:06:11.000Z
|
/***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Epilogue for threadblock scoped GEMMs using SIMT.
The epilogue rearranges the result of a matrix product through shared memory
to match canonical tensor layouts in global memory. Epilogues support
conversion and reduction operations.
*/
/**
* \file include/cutlass/epilogue/threadblock/convolution_epilogue_tensor_op.h
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#pragma once
#include "cutlass/array.h"
#include "cutlass/cutlass.h"
#include "cutlass/numeric_types.h"
#include "cutlass/gemm/gemm.h"
#include "cutlass/epilogue/thread/conversion_op.h"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/thread/linear_combination_clamp.h"
#include "cutlass/epilogue/thread/reduction_op.h"
#include "cutlass/transform/threadblock/regular_tile_iterator_pitch_linear.h"
#include "cutlass/epilogue/threadblock/convolution_thread_map_tensor_op.h"
#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h"
#include "cutlass/epilogue/warp/interleaved_tile_iterator_tensor_op.h"
#include "cutlass/epilogue/threadblock/bias_tile_iterator.h"
#include "cutlass/epilogue/threadblock/convolution_epilogue.h"
#include "cutlass/epilogue/threadblock/epilogue.h"
#include "cutlass/epilogue/threadblock/interleaved_shared_load_iterator_tensor_op.h"
#include "cutlass/epilogue/threadblock/tensor_predicated_tile_iterator_tensor_op.h"
#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace epilogue {
namespace threadblock {
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Shape_, ///< Threadblock-level tile size (concept:
///< GemmShape)
typename LayoutDst_, ///< Layout type for output tensor
typename LayoutBias_, ///< Layout type for bias tensor
typename WarpMmaTensorOp_, ///< Warp-level mma operator
typename OutputOp_, ///< Thread-level epilogue operator
int ElementsPerAccess, ///< Elements per access
bool WithoutSharedLoad = false>
struct ConvolutionEpilogueTensorOp;
/////////////////////////////////////////////////////////////////////////////////////////////////
template <typename Shape_, typename WarpMmaTensorOp_, typename OutputOp_,
int ElementsPerAccess>
struct ConvolutionEpilogueTensorOp<Shape_, layout::TensorNHWC,
layout::TensorNHWC, WarpMmaTensorOp_,
OutputOp_, ElementsPerAccess, false> {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static const int kPartitionsK = Shape::kK / WarpMmaTensorOp::Shape::kK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutDst = layout::TensorNHWC;
using ElementBias = typename OutputOp::ElementBias;
using LayoutBias = layout::TensorNHWC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap =
typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape, typename WarpMmaTensorOp::Shape, kPartitionsK,
ElementOutput, kElementsPerAccess>::Type;
using OutputTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap, ElementOutput>;
using AccumulatorFragmentIterator =
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
typename WarpMmaTensorOp::LayoutC>;
/// Support several implementations depending on structure of epilogue
using DefaultIterators = detail::DefaultIteratorsTensorOp<
ElementOutput, ElementAccumulator, kElementsPerAccess, Shape,
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename OutputTileThreadMap::CompactedThreadMap>;
using WarpTileIterator = typename DefaultIterators::WarpTileIterator;
using SharedLoadIterator = typename DefaultIterators::SharedLoadIterator;
/// Hard-coded padding elements added
using Padding = cutlass::MatrixShape<
0, 64 / sizeof_bits<ElementAccumulator>::value * 4>;
using BiasTileIterator = cutlass::epilogue::threadblock::
PerChannelBiasPredicatedTileIteratorTensorOp<
OutputTileThreadMap, LayoutBias, ElementBias,
OutputTileThreadMap::kElementsPerAccess, false>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::ConvolutionEpilogue<
Shape, LayoutDst, kPartitionsK, WarpMmaTensorOp, OutputTileIterator,
AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator,
BiasTileIterator, OutputOp, Padding>;
};
template <typename Shape_, typename WarpMmaTensorOp_, typename OutputOp_,
int Interleaved, int ElementsPerAccess>
struct ConvolutionEpilogueTensorOp<Shape_, layout::TensorNCxHWx<Interleaved>,
layout::TensorNCxHWx<Interleaved>,
WarpMmaTensorOp_, OutputOp_,
ElementsPerAccess, false> {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
static const int kPartitionsK = Shape::kK / WarpMmaTensorOp::Shape::kK;
static int const kInterleaved = Interleaved;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutDst = layout::TensorNCxHWx<kInterleaved>;
using ElementBias = typename OutputOp::ElementBias;
using LayoutBias = layout::TensorNCxHWx<kInterleaved>;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::
ConvolutionThreadMapTensorOp<
Shape, typename WarpMmaTensorOp::Shape, LayoutDst,
typename WarpMmaTensorOp::Policy, ElementOutput,
kElementsPerAccess>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::
TensorPredicatedTileIteratorTensorOp<OutputTileThreadMap, LayoutDst,
ElementOutput>;
using AccumulatorFragmentIterator =
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
typename WarpMmaTensorOp::LayoutC, LayoutDst>;
using WarpTileIterator =
cutlass::epilogue::warp::InterleavedTileIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
ElementAccumulator, typename WarpMmaTensorOp::LayoutC,
LayoutDst>;
using SharedLoadIterator = cutlass::epilogue::threadblock::
InterleavedSharedLoadIteratorTensorOp<
typename OutputTileThreadMap::CompactedThreadMap,
ElementAccumulator, kInterleaved>;
using BiasTileIterator = cutlass::epilogue::threadblock::
PerChannelBiasPredicatedTileIteratorTensorOp<
OutputTileThreadMap, LayoutBias, ElementBias,
OutputTileThreadMap::kElementsPerAccess>;
/// Hard-coded padding elements added
using Padding = typename WarpTileIterator::Padding;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::ConvolutionEpilogue<
Shape, LayoutDst, kPartitionsK, WarpMmaTensorOp, OutputTileIterator,
AccumulatorFragmentIterator, WarpTileIterator, SharedLoadIterator,
BiasTileIterator, OutputOp, Padding, true>;
};
template <typename Shape_, typename WarpMmaTensorOp_, typename OutputOp_,
int ElementsPerAccess>
struct ConvolutionEpilogueTensorOp<Shape_, layout::TensorNHWC,
layout::TensorNHWC, WarpMmaTensorOp_,
OutputOp_, ElementsPerAccess, true> {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
static const int kPartitionsK = Shape::kK / WarpMmaTensorOp::Shape::kK;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutDst = layout::TensorNHWC;
using ElementBias = typename OutputOp::ElementBias;
using LayoutBias = layout::TensorNHWC;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap =
typename cutlass::epilogue::threadblock::DefaultThreadMapTensorOp<
Shape, typename WarpMmaTensorOp::Shape, kPartitionsK,
ElementOutput, kElementsPerAccess>::Type;
using OutputTileIterator =
cutlass::epilogue::threadblock::PredicatedTileIterator<
OutputTileThreadMap, ElementOutput>;
using AccumulatorFragmentIterator =
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
typename WarpMmaTensorOp::LayoutC>;
using BiasTileIterator = cutlass::epilogue::threadblock::
PerChannelBiasPredicatedTileIteratorTensorOp<
OutputTileThreadMap, LayoutBias, ElementBias,
OutputTileThreadMap::kElementsPerAccess, false>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::
ConvolutionEpilogueWithoutSharedLoad<
Shape, LayoutDst, kPartitionsK, WarpMmaTensorOp,
OutputTileIterator, AccumulatorFragmentIterator,
BiasTileIterator, OutputOp>;
};
template <typename Shape_, typename WarpMmaTensorOp_, typename OutputOp_,
int Interleaved, int ElementsPerAccess>
struct ConvolutionEpilogueTensorOp<Shape_, layout::TensorNCxHWx<Interleaved>,
layout::TensorNCxHWx<Interleaved>,
WarpMmaTensorOp_, OutputOp_,
ElementsPerAccess, true> {
using Shape = Shape_;
using WarpMmaTensorOp = WarpMmaTensorOp_;
using OutputOp = OutputOp_;
static int const kElementsPerAccess = ElementsPerAccess;
static const int kPartitionsK = Shape::kK / WarpMmaTensorOp::Shape::kK;
static int const kInterleaved = Interleaved;
using ElementOutput = typename OutputOp::ElementOutput;
using LayoutDst = layout::TensorNCxHWx<kInterleaved>;
using ElementBias = typename OutputOp::ElementBias;
using LayoutBias = layout::TensorNCxHWx<kInterleaved>;
using ElementAccumulator = typename WarpMmaTensorOp::ElementC;
//
// Thread map
//
using OutputTileThreadMap = typename cutlass::epilogue::threadblock::
InterleavedConvolutionThreadMapTensorOp<
Shape, typename WarpMmaTensorOp::Shape, kPartitionsK,
ElementOutput, kElementsPerAccess, kInterleaved>::Type;
using OutputTileIterator = cutlass::epilogue::threadblock::
InterleavedConvPredicatedTileIterator<OutputTileThreadMap,
ElementOutput, kInterleaved>;
using AccumulatorFragmentIterator =
cutlass::epilogue::warp::FragmentIteratorTensorOp<
typename WarpMmaTensorOp::Shape,
typename WarpMmaTensorOp::Policy::Operator::Shape,
typename WarpMmaTensorOp::Policy::Operator::ElementC,
typename WarpMmaTensorOp::Policy::Operator::FragmentC,
// can reuse the gemm version here to do element selection
layout::ColumnMajorInterleaved<kInterleaved>>;
using BiasTileIterator = cutlass::epilogue::threadblock::
PerChannelBiasPredicatedTileIteratorTensorOp<
OutputTileThreadMap, LayoutBias, ElementBias,
OutputTileThreadMap::kElementsPerAccess, false>;
//
// Define the epilogue
//
using Epilogue = cutlass::epilogue::threadblock::
ConvolutionEpilogueWithoutSharedLoad<
Shape, LayoutDst, kPartitionsK, WarpMmaTensorOp,
OutputTileIterator, AccumulatorFragmentIterator,
BiasTileIterator, OutputOp>;
};
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace threadblock
} // namespace epilogue
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| 46.373529
| 100
| 0.658337
|
[
"shape",
"transform"
] |
c78e20f88abc7ce5e3444e7cf889bb57bcd9e622
| 3,455
|
h
|
C
|
component/oai-udr/src/api_server/model/NetworkAreaInfo_2.h
|
kukkalli/oai-cn5g-fed
|
15634fac935ac8671b61654bdf75bf8af07d3c3a
|
[
"Apache-2.0"
] | null | null | null |
component/oai-udr/src/api_server/model/NetworkAreaInfo_2.h
|
kukkalli/oai-cn5g-fed
|
15634fac935ac8671b61654bdf75bf8af07d3c3a
|
[
"Apache-2.0"
] | null | null | null |
component/oai-udr/src/api_server/model/NetworkAreaInfo_2.h
|
kukkalli/oai-cn5g-fed
|
15634fac935ac8671b61654bdf75bf8af07d3c3a
|
[
"Apache-2.0"
] | null | null | null |
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the
* License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* [email protected]
*/
/**
* Nudr_DataRepository API OpenAPI file
* Unified Data Repository Service. © 2020, 3GPP Organizational Partners (ARIB,
* ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved.
*
* The version of the OpenAPI document: 2.1.2
*
*
* NOTE: This class is auto generated by OpenAPI Generator
* (https://openapi-generator.tech). https://openapi-generator.tech Do not edit
* the class manually.
*/
/*
* NetworkAreaInfo_2.h
*
* Describes a network area information in which the NF service consumer
* requests the number of UEs.
*/
#ifndef NetworkAreaInfo_2_H_
#define NetworkAreaInfo_2_H_
#include <nlohmann/json.hpp>
#include <vector>
#include "Ecgi.h"
#include "GlobalRanNodeId.h"
#include "Ncgi.h"
#include "Tai.h"
namespace oai::udr::model {
/// <summary>
/// Describes a network area information in which the NF service consumer
/// requests the number of UEs.
/// </summary>
class NetworkAreaInfo_2 {
public:
NetworkAreaInfo_2();
virtual ~NetworkAreaInfo_2();
void validate();
/////////////////////////////////////////////
/// NetworkAreaInfo_2 members
/// <summary>
/// Contains a list of E-UTRA cell identities.
/// </summary>
std::vector<Ecgi> &getEcgis();
void setEcgis(std::vector<Ecgi> const &value);
bool ecgisIsSet() const;
void unsetEcgis();
/// <summary>
/// Contains a list of NR cell identities.
/// </summary>
std::vector<Ncgi> &getNcgis();
void setNcgis(std::vector<Ncgi> const &value);
bool ncgisIsSet() const;
void unsetNcgis();
/// <summary>
/// Contains a list of NG RAN nodes.
/// </summary>
std::vector<GlobalRanNodeId> &getGRanNodeIds();
void setGRanNodeIds(std::vector<GlobalRanNodeId> const &value);
bool gRanNodeIdsIsSet() const;
void unsetGRanNodeIds();
/// <summary>
/// Contains a list of tracking area identities.
/// </summary>
std::vector<Tai> &getTais();
void setTais(std::vector<Tai> const &value);
bool taisIsSet() const;
void unsetTais();
friend void to_json(nlohmann::json &j, const NetworkAreaInfo_2 &o);
friend void from_json(const nlohmann::json &j, NetworkAreaInfo_2 &o);
protected:
std::vector<Ecgi> m_Ecgis;
bool m_EcgisIsSet;
std::vector<Ncgi> m_Ncgis;
bool m_NcgisIsSet;
std::vector<GlobalRanNodeId> m_GRanNodeIds;
bool m_GRanNodeIdsIsSet;
std::vector<Tai> m_Tais;
bool m_TaisIsSet;
};
} // namespace oai::udr::model
#endif /* NetworkAreaInfo_2_H_ */
| 30.575221
| 81
| 0.693198
|
[
"vector",
"model"
] |
c79727a050f0f36ec7f21ab177fdc4460a89351d
| 33,294
|
c
|
C
|
Swoosh.c
|
pyroticinsanity/tachyon
|
fbde4cecb8e4f6707f458c1f51b7d0b82e8fcde5
|
[
"MIT"
] | 3
|
2019-08-26T18:12:24.000Z
|
2020-10-25T21:14:16.000Z
|
Swoosh.c
|
pyroticinsanity/tachyon
|
fbde4cecb8e4f6707f458c1f51b7d0b82e8fcde5
|
[
"MIT"
] | null | null | null |
Swoosh.c
|
pyroticinsanity/tachyon
|
fbde4cecb8e4f6707f458c1f51b7d0b82e8fcde5
|
[
"MIT"
] | 1
|
2020-05-07T14:59:02.000Z
|
2020-05-07T14:59:02.000Z
|
#include "stdafx.h"
#include "UI.h"
#include "Objects.h"
#include "Dispatch.h"
#include "CSB.h"
#include "Transition.h"
#include <string.h>
//#include <TextUtils.h>
#define A0B(x) ( (unsigned char*)A0)[x]
#define A0W(x) ((unsigned short*)(((unsigned char*)A0)+x))[0]
#define A0L(x) ((unsigned long*)(((unsigned char*)A0)+x))[0]
#define A1B(x) ( (unsigned char*)A1)[x]
#define A1W(x) ((unsigned short*)(((unsigned char*)A1)+x))[0]
#define A1L(x) ((unsigned long*)(((unsigned char*)A1)+x))[0]
#define A5B(x) ( (unsigned char*)A5)[x]
#define A5W(x) ((unsigned short*)(((unsigned char*)A5)+x))[0]
#define A5L(x) ((unsigned long*)(((unsigned char*)A5)+x))[0]
#define A6B(x) ( (unsigned char*)A6)[x]
#define A6W(x) ((unsigned short*)(((unsigned char*)A6)+x))[0]
#define A6L(x) ((unsigned long*)(((unsigned char*)A6)+x))[0]
static unsigned char sectors3_3 [1536] = {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x55,0x50,0x45,
0x52,0x47,0x41,0x55,0x30,0x30,0x30,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x41,0x55,0x54,0x4F,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF4,0x01,
0x00,0x02,0x02,0x00,0x00,0x00,0x00,0x00,0xE5,0x55,0x4E,0x47,0x45,0x4F,0x4E,0x20,0x50,0x52,0x47,0x20,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x57,0x00,0x74,0x0B,0x09,0x00,0xAC,0x29,0x02,0x00,0x44,0x55,0x4E,0x47,0x45,0x4F,0x4E,0x20,0x43,0x46,0x50,0x20,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x39,0x01,0x47,0x90,0x40,0x00,0xC6,0x4A,0x00,0x00,0x44,0x55,0x4E,0x47,0x45,0x4F,
0x4E,0x20,0x44,0x41,0x54,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3C,0x01,0x47,0x90,0x53,0x00,0xA2,0x82,0x00,0x00,
0x44,0x55,0x4E,0x47,0x45,0x4F,0x4E,0x20,0x45,0x58,0x45,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x43,0x01,0x47,0x90,
0x74,0x00,0xAC,0x29,0x02,0x00,0x47,0x52,0x41,0x50,0x48,0x49,0x43,0x53,0x44,0x41,0x54,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x51,0x01,0x47,0x90,0xFF,0x00,0x2D,0x26,0x04,0x00,0x53,0x45,0x43,0x54,0x4F,0x52,0x53,0x20,0x20,0x20,0x20,0x20,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x6F,0x01,0x47,0x90,0x09,0x02,0x00,0x10,0x00,0x00,0x53,0x57,0x4F,0x4F,0x53,0x48,0x20,0x20,
0x49,0x4D,0x47,0x20,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x71,0x01,0x47,0x90,0x0D,0x02,0xC8,0x0B,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00};
static char bootsector[512] = {0x60,0x38,0x4C,0x6F,0x61,0x64,0x65,0x72,0x00,0x00,0x00,0x00,0x02,0x02,0x01,0x00,0x02,0x10,0x00,0x24,0x03,0xF8,0x02,0x00,0x0A,0x00,
0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x57,0x4F,0x4F,0x53,0x48,
0x20,0x20,0x49,0x4D,0x47,0x00,0x70,0x07,0x32,0x3C,0x07,0x77,0x48,0xE7,0xC0,0x00,0x3F,0x3C,0x00,0x25,0x4E,0x4E,0x54,0x8F,0x4C,0xDF,
0x00,0x03,0x20,0x7C,0xFF,0xFF,0x82,0x40,0x30,0x81,0x04,0x41,0x01,0x11,0x51,0xC8,0xFF,0xE2,0x20,0x7C,0xFF,0xFF,0x82,0x40,0x70,0x0F,
0x42,0x58,0x51,0xC8,0xFF,0xFC,0x33,0xFA,0xFF,0xAE,0x00,0x00,0x04,0x82,0x3F,0x39,0x00,0x00,0x04,0x46,0x3F,0x3C,0x00,0x07,0x4E,0x4D,
0x58,0x4F,0x4A,0x80,0x67,0x00,0x00,0xF6,0x2A,0x40,0x41,0xFA,0xFF,0x9C,0x4A,0x90,0x66,0x06,0x20,0xB9,0x00,0x00,0x04,0x32,0x30,0x2D,
0x00,0x08,0xE1,0x48,0xD0,0x80,0x38,0x40,0xD9,0xFA,0xFF,0x84,0x30,0x3A,0xFF,0x76,0x67,0x10,0x3C,0x3A,0xFF,0x72,0x38,0x3A,0xFF,0x70,
0x26,0x7A,0xFF,0x6E,0x60,0x00,0x00,0xB4,0x3C,0x2D,0x00,0x0A,0x38,0x2D,0x00,0x08,0xD8,0x6D,0x00,0x06,0x26,0x7A,0xFF,0x5E,0x61,0x00,
0x00,0xB0,0x66,0x00,0x00,0xAA,0x20,0x4C,0x30,0x2D,0x00,0x06,0xE1,0x48,0xE3,0x48,0x41,0xF0,0x00,0x00,0x43,0xFA,0xFF,0x48,0x90,0xFC,
0x00,0x20,0xB1,0xCC,0x6D,0x00,0x00,0x8E,0x70,0x0A,0x12,0x30,0x00,0x00,0xB2,0x31,0x00,0x00,0x66,0xEA,0x51,0xC8,0xFF,0xF4,0x7E,0x00,
0x1E,0x28,0x00,0x1B,0xE1,0x4F,0x1E,0x28,0x00,0x1A,0x2C,0x7A,0xFF,0x1A,0x26,0x7A,0xFF,0x12,0x42,0x84,0x0C,0x47,0x0F,0xF0,0x6C,0x52,
0x36,0x07,0x55,0x43,0xC6,0xED,0x00,0x02,0xD6,0x6D,0x00,0x0C,0x0C,0x44,0x00,0x40,0x6C,0x08,0x4A,0x44,0x67,0x0E,0xB6,0x45,0x67,0x10,
0x61,0x46,0x66,0x42,0xE1,0x8C,0xE3,0x8C,0xD7,0xC4,0x3C,0x03,0x3A,0x03,0x42,0x84,0xD8,0x6D,0x00,0x02,0xDA,0x6D,0x00,0x02,0x34,0x07,
0xE2,0x4A,0xD4,0x47,0x12,0x36,0x20,0x01,0xE1,0x49,0x12,0x36,0x20,0x00,0x08,0x07,0x00,0x00,0x67,0x02,0xE8,0x49,0x02,0x41,0x0F,0xFF,
0x3E,0x01,0x60,0xA8,0x4A,0x44,0x67,0x04,0x61,0x0A,0x66,0x06,0x2F,0x3A,0xFE,0xAC,0x4E,0x75,0x60,0xFE,0x3F,0x39,0x00,0x00,0x04,0x46,
0x3F,0x06,0x3F,0x04,0x2F,0x0B,0x42,0x67,0x3F,0x3C,0x00,0x04,0x4E,0x4D,0xDE,0xFC,0x00,0x0E,0x4A,0x40,0x4E,0x75,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x24,0x7D};
struct bpb
{
WORD recsiz; // sector size in bytes //
WORD clsiz; // cluster size in sectors //
WORD clsizb; // cluster size in bytes //
WORD rdlen; // root directory length in records //
WORD fsiz; // fat size in records //
WORD fatrec; // first fat record (of last fat) //
WORD datrec; // first data record //
WORD numcl; // number of data clusters available //
WORD b_flags;
};
static struct flop_info {
WORD cur_track;
WORD rate;
WORD spt; // number of sectors per track //
WORD sides; // number of sides, or -1 if geometry not inited //
BYTE serial[3]; // the serial number taken from the bootsector //
BYTE wp; // != 0 means write protected //
} finfo[2];
static struct bpb flop_bpb[2];
struct bs {
/* 0 */ UBYTE bra[2];
/* 2 */ UBYTE loader[6];
/* 8 */ UBYTE serial[3];
/* b */ UBYTE bps[2]; // bytes per sector */
/* d */ UBYTE spc; // sectors per cluster */
/* e */ UBYTE res[2]; // number of reserved sectors */
/* 10 */ UBYTE fat; // number of FATs */
/* 11 */ UBYTE dir[2]; // number of DIR root entries */
/* 13 */ UBYTE sec[2]; // total number of sectors */
/* 15 */ UBYTE media; // media descriptor */
/* 16 */ UBYTE spf[2]; // sectors per FAT */
/* 18 */ UBYTE spt[2]; // sectors per track */
/* 1a */ UBYTE sides[2]; // number of sides */
/* 1c */ UBYTE hid[2]; // number of hidden sectors */
/* 1e */ UBYTE data[0x1e0];
/* 1fe */ UBYTE cksum[2];
};
typedef struct bpb BPB;
static char memory[8192]={'m','e','m','o','r','y',0,};
static unsigned char _bsdata[] = { /*0xA10*/0x00,0x00,
/*0xA12*/0x00,0x00,0x00,0x00,
/*0xA16*/0x00,0x04,0x00,0x00,
/*0xA1A*/0x00,0x00,0x00,0x00,
/*0xA1E*/'S','W','O','O','S','H',' ',' ','I','M','G',0x00};
static unsigned short* A10=(unsigned short*)&_bsdata[0];
static unsigned short* A12=(unsigned short*)&_bsdata[2];
static unsigned short* A14=(unsigned short*)&_bsdata[4];
static unsigned long* A16=(unsigned long*)&_bsdata[6];
static unsigned long* A1A=(unsigned long*)&_bsdata[10];
static unsigned char* A1E=&_bsdata[14];
static long _432 = (long)memory;
i32 _0DD78B70(i16,i16,i32);
void A16LDCode();
/*
0- 1 bra
2- 7 loader
8- a serial number
b- c bytes per sector
d sectors per cluster
e- f number of reserved sectors
10 number of fatek
11-12 number of root dir entries
13-14 total number of sectors
15 media descriptor
16-17 sectors per fat
18-19 sectors per track
1a-1b number of sides
1c-1d number of hidden sectors
1e-1fd data
1fe-1ff checksum
*/
static UWORD getiword(UBYTE *addr)
{
UWORD value;
value = (((UWORD)addr[1])<<8) + addr[0];
return value;
}
static LONG flop_getbpb(WORD dev)
{
struct bs *b;
LONG tmp;
WORD err;
if(dev < 0 || dev > 1) return 0;
/* read bootsector */
b = (struct bs *)bootsector;
flop_bpb[dev].recsiz = getiword(b->bps);
flop_bpb[dev].clsiz = b->spc;
flop_bpb[dev].clsizb = flop_bpb[dev].clsiz * flop_bpb[dev].recsiz;
tmp = getiword(b->dir);
flop_bpb[dev].rdlen = (tmp * 32) / flop_bpb[dev].recsiz;
flop_bpb[dev].fsiz = getiword(b->spf);
/* the structure of the floppy is assumed to be:
* - bootsector
* - fats
* - dir
* - data clusters
* TODO: understand what to do with reserved or hidden sectors.
*/
flop_bpb[dev].fatrec = 1 + flop_bpb[dev].fsiz;
flop_bpb[dev].datrec = flop_bpb[dev].fatrec + flop_bpb[dev].fsiz
+ flop_bpb[dev].rdlen;
flop_bpb[dev].numcl = (getiword(b->sec) - flop_bpb[dev].datrec) / b->spc;
flop_bpb[dev].b_flags = 0; // assume floppies are always in FAT12 //
// additional geometry info //
finfo[dev].sides = getiword(b->sides);
finfo[dev].spt = getiword(b->spt);
finfo[dev].serial[0] = b->serial[0];
finfo[dev].serial[1] = b->serial[1];
finfo[dev].serial[2] = b->serial[2];
return (LONG) &flop_bpb[dev];
}
/* 0DD78B70 MOVE.W $00000446,-(A7) | 3F39 0000 0446
0DD78B76 MOVE.W D6,-(A7) | 3F06
0DD78B78 MOVE.W D4,-(A7) | 3F04
0DD78B7A MOVE.L A3,-(A7) | 2F0B
0DD78B7C CLR.W -(A7) | 4267
0DD78B7E MOVE.W #$0004,-(A7) | 3F3C 0004
*/
static short RWABS(short dev, short sector, short count, long addr, short flag, short sel)
{
dev = dev;
sector = sector;
count = count;
addr = addr;
flag = flag;
if(sector == 3 && count == 3)
{
memmove((void*)addr,sectors3_3,1536);
}
return 0;
}
static i8 SWOOSHScreen[320*200/2];
static i16 FFFF8240[16] = { 0x777, 0x700, 0x070, 0x770, 0x007, 0x707, 0x077, 0x555,
0x333, 0x733, 0x373, 0x773, 0x337, 0x737, 0x377, 0x000};
void swoosh_bootsector();
extern void UpdateScreenArea(
i8 *STScreen,
int x0,
int y0,
int width,
int height,
i16 *palette,
bool paletteChanged,
i32 *pOldChecksum);
void VSYNC()
{
i32 ignore;
UpdateScreenArea(SWOOSHScreen,0,0,320,200,FFFF8240,TRUE,&ignore);
}
void swoosh()
{
i32 ignore;
UpdateScreenArea(SWOOSHScreen,0,0,320,200,FFFF8240,TRUE,&ignore);
swoosh_bootsector();
}
void swoosh_bootsector()
{
dReg D0,D1,D2,D3,D4,D5,D6,D7;
unsigned long A0,A1,A2,A3,A4,A5,A6,A7;
/* 0DD789F0 BRA.S *+$003A ; 0DD78A2A | 6038
*/ goto _0DD78A2A;
/* 0DD789F2 TDIVU.L $6572(A7),D4:D6 | 4C6F 6164 6572
0DD789F8 ORI.B #$00,D0 | 0000 0000
0DD789FC ANDI.B #$0100,D2 | 0202 0100
0DD78A00 ANDI.B #$24,(A0) ; '$' | 0210 0024
0DD78A04 BSET D1,SPAlarm | 03F8 0200
0DD78A08 EORI.B #$0100,D0 | 0A00 0100
0DD78A0C ORI.B #$00,D0 | 0000 0000
0DD78A10 ORI.B #$00,D0 | 0000 0000
0DD78A14 ORI.B #$04,D0 | 0000 0004
0DD78A18 ORI.B #$00,D0 | 0000 0000
0DD78A1C ORI.B #$5357,D0 ; 'W' | 0000 5357
0DD78A20 DC.W $4F4F ; ???? | 4F4F
0DD78A22 SUBQ.W #$1,A0 | 5348
0DD78A24 MOVE.L -(A0),D0 | 2020
0DD78A26 DC.W $494D ; ???? | 494D
0DD78A28 CHK.L D0,D3 | 4700
*/
_0DD78A2A:
// 0DD78A2A MOVEQ #$07,D0 | 7007
// 0DD78A2C MOVE.W #$0777,D1 | 323C 0777
D0L = 7;
D1W = 0x777;
// 0DD78A30 MOVEM.L D0/D1,-(A7) | 48E7 C000
// 0DD78A34 MOVE.W #$0025,-(A7) | 3F3C 0025
// 0DD78A38 TRAP #$E | 4E4E
// 0DD78A3A ADDQ.L #$2,A7 | 548F
_0DD78A30:
VSYNC();
// 0DD78A3C MOVEM.L (A7)+,D0/D1 | 4CDF 0003
// 0DD78A40 MOVEA.L #$FFFF8240,A0 | 207C FFFF 8240
A0 = (long)FFFF8240;
// 0DD78A46 MOVE.W D1,(A0) | 3081
// 0DD78A48 SUBI.W #$0111,D1 | 0441 0111
// 0DD78A4C DBF D0,*-$001C ; 0DD78A30 | 51C8 FFE2
A0W(0) = D1W;
D1W -= 0x111;
if(D0W-->=0) goto _0DD78A30;
// 0DD78A50 MOVEA.L #$FFFF8240,A0 | 207C FFFF 8240
// 0DD78A56 MOVEQ #$0F,D0 | 700F
// 0DD78A58 CLR.W (A0)+ | 4258
// 0DD78A5A DBF D0,*-$0002 ; 0DD78A58 | 51C8 FFFC
A0 = (long)FFFF8240;
D0L = 0xF;
_0DD78A58:
A0W(0) = 0; A0+=2;
if(D0W-->=0) goto _0DD78A58;
// 0DD78A5E MOVE.W *-$0050,$00000482 ; 0DD78A0E | 33FA FFAE 0000
// 0DD78A66 MOVE.W $00000446,-(A7) | 3F39 0000 0446
// 0DD78A6C MOVE.W #$0007,-(A7) | 3F3C 0007
// 0DD78A70 TRAP #$D | 4E4D
// 0DD78A72 ADDQ.W #$4,A7 | 584F
// 0DD78A74 TST.L D0 | 4A80
// 0DD78A76 BEQ *+$00F8 ; 0DD78B6E | 6700 00F6
//
// reading bios parameter block. we already have it in memory :-)
D0L = flop_getbpb(0);
if(!D0L) goto _0DD78B6E;
// 0DD78A7A MOVEA.L D0,A5 | 2A40
// 0DD78A7C LEA *-$0062,A0 ; 0DD78A1A | 41FA FF9C
// 0DD78A80 TST.L (A0) | 4A90
// 0DD78A82 BNE.S *+$0008 ; 0DD78A8A | 6606
// 0DD78A84 MOVE.L $00000432,(A0) | 20B9 0000 0432
A5 = D0L;
A0 = (long)A1A;
if(A0L(0)!=0) goto _0DD78A8A;
A0L(0) = _432;
_0DD78A8A:
// 0DD78A8A MOVE.W $0008(A5),D0 | 302D 0008
// 0DD78A8E LSL.W #$8,D0 | E148
// 0DD78A90 ADD.L D0,D0 | D080
// 0DD78A92 MOVEA.W D0,A4 | 3840
D0L = A5W(8);
D0W = D0W << 8;
D0L = D0L + D0L;
A4 = D0W;
// 0DD78A94 ADDA.L *-$007A,A4 ; 0DD78A1A | D9FA FF84
// 0DD78A98 MOVE.W *-$0088,D0 ; 0DD78A10 | 303A FF76
// 0DD78A9C BEQ.S *+$0012 ; 0DD78AAE | 6710
A4 += A1A[0];
D0L = A10[0];
if(!D0W) goto _0DD78AAE;
// 0DD78A9E MOVE.W *-$008C,D6 ; 0DD78A12 | 3C3A FF72
// 0DD78AA2 MOVE.W *-$008E,D4 ; 0DD78A14 | 383A FF70
// 0DD78AA6 MOVEA.L *-$0090,A3 ; 0DD78A16 | 267A FF6E
// 0DD78AAA BRA *+$00B6 ; 0DD78B60 | 6000 00B4
D6L = A12[0];
D4L = A14[0];
A3 = (long)A16[0];
goto _0DD78B60;
_0DD78AAE:
// 0DD78AAE MOVE.W $000A(A5),D6 | 3C2D 000A
// 0DD78AB2 MOVE.W $0008(A5),D4 | 382D 0008
// 0DD78AB6 ADD.W $0006(A5),D4 | D86D 0006
// 0DD78ABA MOVEA.L *-$00A0,A3 ; 0DD78A1A | 267A FF5E
// 0DD78ABE BSR *+$00B2 ; 0DD78B70 | 6100 00B0
// 0DD78AC2 BNE *+$00AC ; 0DD78B6E | 6600 00AA
D6L = A5W(0xA);
D4L = A5W(0x8);
D4W += A5W(0x6);
A3 = (long)A1A[0];
D0L = _0DD78B70(D6W,D4W,A3);
if(D0L) goto _0DD78B6E;
// 0DD78AC6 MOVEA.L A4,A0 | 204C
// 0DD78AC8 MOVE.W $0006(A5),D0 | 302D 0006
// 0DD78ACC LSL.W #$8,D0 | E148
// 0DD78ACE LSL.W #$1,D0 | E348
// 0DD78AD0 LEA $00(A0,D0.W),A0 | 41F0 0000
// 0DD78AD4 LEA *-$00B6,A1 ; 0DD78A1E | 43FA FF48
A0 = A4;
D0L = A5W(6);
D0W <<= 9;
A0 = A0 + D0W;
A1 = (long)A1E;
_0DD78AD8:
// 0DD78AD8 SUBA.W #$0020,A0 | 90FC 0020
// 0DD78ADC CMPA.L A4,A0 | B1CC
// 0DD78ADE BLT *+$0090 ; 0DD78B6E | 6D00 008E
A0 -= 0x20;
if(A0 < A4) goto _0DD78B6E;
// 0DD78AE2 MOVEQ #$0A,D0 | 700A
D0L = 0xA;
_0DD78AE4:
// 0DD78AE4 MOVE.B $00(A0,D0.W),D1 | 1230 0000
// 0DD78AE8 CMP.B $00(A1,D0.W),D1 | B231 0000
// 0DD78AEC BNE.S *-$0014 ; 0DD78AD8 | 66EA
// 0DD78AEE DBF D0,*-$000A ; 0DD78AE4 | 51C8 FFF4
D1B = A0B(D0W);
if(A1B(D0W) != D1B) goto _0DD78AD8;
if(D0W-->=0) goto _0DD78AE4;
// 0DD78AF2 MOVEQ #$00,D7 | 7E00
// 0DD78AF4 MOVE.B $001B(A0),D7 | 1E28 001B
// 0DD78AF8 LSL.W #$8,D7 | E14F
// 0DD78AFA MOVE.B $001A(A0),D7 | 1E28 001A
// 0DD78AFE MOVEA.L *-$00E4,A6 ; 0DD78A1A | 2C7A FF1A
// 0DD78B02 MOVEA.L *-$00EC,A3 ; 0DD78A16 | 267A FF12
// 0DD78B06 CLR.L D4 | 4284
D7L = 0;
D7B = A0B(0x1B);
D7W <<= 8;
D7B = A0B(0x1A);
A6 = (long)A1A[0];
A3 = (long)A16[0];
D4L = 0;
_0DD78B08:
// 0DD78B08 CMPI.W #$0FF0,D7 | 0C47 0FF0
// 0DD78B0C BGE.S *+$0054 ; 0DD78B60 | 6C52
// 0DD78B0E MOVE.W D7,D3 | 3607
// 0DD78B10 SUBQ.W #$2,D3 | 5543
// 0DD78B12 MULU.W $0002(A5),D3 | C6ED 0002
// 0DD78B16 ADD.W $000C(A5),D3 | D66D 000C
// 0DD78B1A CMPI.W #$0040,D4 | 0C44 0040
// 0DD78B1E BGE.S *+$000A ; 0DD78B28 | 6C08
// 0DD78B20 TST.W D4 | 4A44
// 0DD78B22 BEQ.S *+$0010 ; 0DD78B32 | 670E
// 0DD78B24 CMP.W D5,D3 | B645
// 0DD78B26 BEQ.S *+$0012 ; 0DD78B38 | 6710
if(D7W >= 0xFF0) goto _0DD78B60;
D3L = D7W;
D3W-=2;
D3W*=A5W(2);
D3W+=A5W(0xC);
if(D4W >= 0x40) goto _0DD78B28;
if(D4W == 0) goto _0DD78B32;
if(D5W == D3W) goto _0DD78B38;
_0DD78B28:
// 0DD78B28 BSR.S *+$0048 ; 0DD78B70 | 6146
// 0DD78B2A BNE.S *+$0044 ; 0DD78B6E | 6642
// 0DD78B2C LSL.L #$8,D4 | E18C
// 0DD78B2E LSL.L #$1,D4 | E38C
// 0DD78B30 ADDA.L D4,A3 | D7C4
D0L = _0DD78B70(D6W,D4W,A3);
if(D0W) goto _0DD78B6E;
D4L <<= 9;
A3 += D4L;
_0DD78B32:
// 0DD78B32 MOVE.W D3,D6 | 3C03
// 0DD78B34 MOVE.W D3,D5 | 3A03
// 0DD78B36 CLR.L D4 | 4284
D6W = D3W;
D5W = D3W;
D4L = 0;
_0DD78B38:
// 0DD78B38 ADD.W $0002(A5),D4 | D86D 0002
// 0DD78B3C ADD.W $0002(A5),D5 | DA6D 0002
// 0DD78B40 MOVE.W D7,D2 | 3407
// 0DD78B42 LSR.W #$1,D2 | E24A
// 0DD78B44 ADD.W D7,D2 | D447
// 0DD78B46 MOVE.B $01(A6,D2.W),D1 | 1236 2001
// 0DD78B4A LSL.W #$8,D1 | E149
// 0DD78B4C MOVE.B $00(A6,D2.W),D1 | 1236 2000
// 0DD78B50 BTST #$00,D7 | 0807 0000
// 0DD78B54 BEQ.S *+$0004 ; 0DD78B58 | 6702
// 0DD78B56 LSR.W #$4,D1 | E849
D4W += A5W(2);
D5W += A5W(2);
D2W = D7W;
D2W >>= 1;
D2W += D7W;
D1L = A6B(1+D2W);
D1W <<= 8;
D1L = A6B(D2W);
if(!(D7W&1)) goto _0DD78B58;
D1W >>= 4;
_0DD78B58:
// 0DD78B58 ANDI.W #$0FFF,D1 | 0241 0FFF
// 0DD78B5C MOVE.W D1,D7 | 3E01
// 0DD78B5E BRA.S *-$0056 ; 0DD78B08 | 60A8
D1W &= 0xFFF;
D7W = D1W;
goto _0DD78B08;
_0DD78B60:
// 0DD78B60 TST.W D4 | 4A44
// 0DD78B62 BEQ.S *+$0006 ; 0DD78B68 | 6704
// 0DD78B64 BSR.S *+$000C ; 0DD78B70 | 610A
// 0DD78B66 BNE.S *+$0008 ; 0DD78B6E | 6606
if(!D4W) goto _0DD78B68;
D0L = _0DD78B70(D6W,D4W,A3);
if(D0L) goto _0DD78B6E;
_0DD78B68:
// 0DD78B68 MOVE.L *-$0152,-(A7) ; 0DD78A16 | 2F3A FEAC
// 0DD78B6C RTS | 4E75
A16LDCode();
_0DD78B6E: //DebugStr("\pST Code hanged here!");
/*0DD78B6E BRA.S *+$0000 ; 0DD78B6E | 60FE
code here converted to subroutine _0DD78B70
0DD78B8C ORI.B #$00,D0 | 0000 0000
0DD78B90 ORI.B #$00,D0 | 0000 0000
0DD78B94 ORI.B #$00,D0 | 0000 0000
0DD78B98 ORI.B #$00,D0 | 0000 0000
0DD78B9C ORI.B #$00,D0 | 0000 0000
0DD78BA0 ORI.B #$00,D0 | 0000 0000
0DD78BA4 ORI.B #$00,D0 | 0000 0000
0DD78BA8 ORI.B #$00,D0 | 0000 0000
0DD78BAC ORI.B #$00,D0 | 0000 0000
0DD78BB0 ORI.B #$00,D0 | 0000 0000
0DD78BB4 ORI.B #$00,D0 | 0000 0000
0DD78BB8 ORI.B #$00,D0 | 0000 0000
0DD78BBC ORI.B #$00,D0 | 0000 0000
0DD78BC0 ORI.B #$00,D0 | 0000 0000
0DD78BC4 ORI.B #$00,D0 | 0000 0000
0DD78BC8 ORI.B #$00,D0 | 0000 0000
0DD78BCC ORI.B #$00,D0 | 0000 0000
0DD78BD0 ORI.B #$00,D0 | 0000 0000
0DD78BD4 ORI.B #$00,D0 | 0000 0000
0DD78BD8 ORI.B #$00,D0 | 0000 0000
0DD78BDC ORI.B #$00,D0 | 0000 0000
0DD78BE0 ORI.B #$00,D0 | 0000 0000
0DD78BE4 ORI.B #$00,D0 | 0000 0000
0DD78BE8 ORI.B #$00,D0 | 0000 0000
0DD78BEC ORI.B #$247D,D0 ; '}' | 0000 247D
*/
{}
}
i32 _0DD78B70(i16 d6, i16 d4, i32 a3)
{
// 0DD78B70 MOVE.W $00000446,-(A7) | 3F39 0000 0446
// 0DD78B76 MOVE.W D6,-(A7) | 3F06
// 0DD78B78 MOVE.W D4,-(A7) | 3F04
// 0DD78B7A MOVE.L A3,-(A7) | 2F0B
// 0DD78B7C CLR.W -(A7) | 4267
// 0DD78B7E MOVE.W #$0004,-(A7) | 3F3C 0004
// 0DD78B82 TRAP #$D | 4E4D
// 0DD78B84 ADDA.W #$000E,A7 | DEFC 000E
// 0DD78B88 TST.W D0 | 4A40
// 0DD78B8A RTS | 4E75
return RWABS(0,d6,d4,a3,0,4);
}
void A16LDCode()
{
//Debugger();
}
| 53.613527
| 174
| 0.51655
|
[
"geometry"
] |
c7998e9f0bccf8a305980d420c9caa1d8f1ef0d2
| 819
|
h
|
C
|
Source/Runtime/Engine/Public/ModelAsset.h
|
RadTuna/SoftRenderer
|
a2b2f652d13f80dc967279426ccadf0232a6b512
|
[
"MIT"
] | 2
|
2019-11-01T02:08:24.000Z
|
2021-08-06T03:42:52.000Z
|
Source/Runtime/Engine/Public/ModelAsset.h
|
RadTuna/SoftRenderer
|
a2b2f652d13f80dc967279426ccadf0232a6b512
|
[
"MIT"
] | null | null | null |
Source/Runtime/Engine/Public/ModelAsset.h
|
RadTuna/SoftRenderer
|
a2b2f652d13f80dc967279426ccadf0232a6b512
|
[
"MIT"
] | null | null | null |
#pragma once
#include <memory>
#include <vector>
#include <sstream>
#include "Asset.h"
struct VertexDataType
{
Vector4 Position;
Vector2 UV;
Vector3 Normal;
};
// Use OBJ File
class ModelAsset final : public Asset
{
public:
explicit ModelAsset() noexcept = default;
virtual ~ModelAsset() noexcept = default;
// without IBO
void Load(const std::string& InPath) override;
private:
std::unique_ptr<VertexDataType[]> mVertices;
std::unique_ptr<UINT[]> mIndices;
UINT mVerticesLength;
UINT mIndicesLength;
public:
FORCEINLINE VertexDataType* GetVertices() const { return mVertices.get(); }
FORCEINLINE UINT* GetIndices() const { return mIndices.get(); }
FORCEINLINE UINT GetVerticesLength() const { return mVerticesLength; }
FORCEINLINE UINT GetIndicesLength() const { return mIndicesLength; }
};
| 19.046512
| 76
| 0.74359
|
[
"vector"
] |
c7bb25f3d7f1fc96695df2268d1dfd5929b885fd
| 6,304
|
h
|
C
|
3rdparty/aws-sdk-cpp-master/aws-cpp-sdk-logs/include/aws/logs/model/MetricTransformation.h
|
prateek-s/mesos
|
4b81147797e4d9a45e0b2f5e5634d4a214dbc4e8
|
[
"Apache-2.0"
] | 2
|
2019-02-08T21:29:57.000Z
|
2021-07-27T06:59:19.000Z
|
3rdparty/aws-sdk-cpp-master/aws-cpp-sdk-logs/include/aws/logs/model/MetricTransformation.h
|
prateek-s/mesos
|
4b81147797e4d9a45e0b2f5e5634d4a214dbc4e8
|
[
"Apache-2.0"
] | null | null | null |
3rdparty/aws-sdk-cpp-master/aws-cpp-sdk-logs/include/aws/logs/model/MetricTransformation.h
|
prateek-s/mesos
|
4b81147797e4d9a45e0b2f5e5634d4a214dbc4e8
|
[
"Apache-2.0"
] | null | null | null |
/*
* Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#pragma once
#include <aws/logs/CloudWatchLogs_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
namespace Aws
{
namespace Utils
{
namespace Json
{
class JsonValue;
} // namespace Json
} // namespace Utils
namespace CloudWatchLogs
{
namespace Model
{
class AWS_CLOUDWATCHLOGS_API MetricTransformation
{
public:
MetricTransformation();
MetricTransformation(const Aws::Utils::Json::JsonValue& jsonValue);
MetricTransformation& operator=(const Aws::Utils::Json::JsonValue& jsonValue);
Aws::Utils::Json::JsonValue Jsonize() const;
/**
* <p>Name of the metric.</p>
*/
inline const Aws::String& GetMetricName() const{ return m_metricName; }
/**
* <p>Name of the metric.</p>
*/
inline void SetMetricName(const Aws::String& value) { m_metricNameHasBeenSet = true; m_metricName = value; }
/**
* <p>Name of the metric.</p>
*/
inline void SetMetricName(Aws::String&& value) { m_metricNameHasBeenSet = true; m_metricName = value; }
/**
* <p>Name of the metric.</p>
*/
inline void SetMetricName(const char* value) { m_metricNameHasBeenSet = true; m_metricName.assign(value); }
/**
* <p>Name of the metric.</p>
*/
inline MetricTransformation& WithMetricName(const Aws::String& value) { SetMetricName(value); return *this;}
/**
* <p>Name of the metric.</p>
*/
inline MetricTransformation& WithMetricName(Aws::String&& value) { SetMetricName(value); return *this;}
/**
* <p>Name of the metric.</p>
*/
inline MetricTransformation& WithMetricName(const char* value) { SetMetricName(value); return *this;}
/**
* <p>Namespace to which the metric belongs.</p>
*/
inline const Aws::String& GetMetricNamespace() const{ return m_metricNamespace; }
/**
* <p>Namespace to which the metric belongs.</p>
*/
inline void SetMetricNamespace(const Aws::String& value) { m_metricNamespaceHasBeenSet = true; m_metricNamespace = value; }
/**
* <p>Namespace to which the metric belongs.</p>
*/
inline void SetMetricNamespace(Aws::String&& value) { m_metricNamespaceHasBeenSet = true; m_metricNamespace = value; }
/**
* <p>Namespace to which the metric belongs.</p>
*/
inline void SetMetricNamespace(const char* value) { m_metricNamespaceHasBeenSet = true; m_metricNamespace.assign(value); }
/**
* <p>Namespace to which the metric belongs.</p>
*/
inline MetricTransformation& WithMetricNamespace(const Aws::String& value) { SetMetricNamespace(value); return *this;}
/**
* <p>Namespace to which the metric belongs.</p>
*/
inline MetricTransformation& WithMetricNamespace(Aws::String&& value) { SetMetricNamespace(value); return *this;}
/**
* <p>Namespace to which the metric belongs.</p>
*/
inline MetricTransformation& WithMetricNamespace(const char* value) { SetMetricNamespace(value); return *this;}
/**
* <p> A string representing a value to publish to this metric when a filter
* pattern matches a log event. </p>
*/
inline const Aws::String& GetMetricValue() const{ return m_metricValue; }
/**
* <p> A string representing a value to publish to this metric when a filter
* pattern matches a log event. </p>
*/
inline void SetMetricValue(const Aws::String& value) { m_metricValueHasBeenSet = true; m_metricValue = value; }
/**
* <p> A string representing a value to publish to this metric when a filter
* pattern matches a log event. </p>
*/
inline void SetMetricValue(Aws::String&& value) { m_metricValueHasBeenSet = true; m_metricValue = value; }
/**
* <p> A string representing a value to publish to this metric when a filter
* pattern matches a log event. </p>
*/
inline void SetMetricValue(const char* value) { m_metricValueHasBeenSet = true; m_metricValue.assign(value); }
/**
* <p> A string representing a value to publish to this metric when a filter
* pattern matches a log event. </p>
*/
inline MetricTransformation& WithMetricValue(const Aws::String& value) { SetMetricValue(value); return *this;}
/**
* <p> A string representing a value to publish to this metric when a filter
* pattern matches a log event. </p>
*/
inline MetricTransformation& WithMetricValue(Aws::String&& value) { SetMetricValue(value); return *this;}
/**
* <p> A string representing a value to publish to this metric when a filter
* pattern matches a log event. </p>
*/
inline MetricTransformation& WithMetricValue(const char* value) { SetMetricValue(value); return *this;}
/**
* <p>(Optional) A default value to emit when a filter pattern does not match a log
* event. Can be null.</p>
*/
inline double GetDefaultValue() const{ return m_defaultValue; }
/**
* <p>(Optional) A default value to emit when a filter pattern does not match a log
* event. Can be null.</p>
*/
inline void SetDefaultValue(double value) { m_defaultValueHasBeenSet = true; m_defaultValue = value; }
/**
* <p>(Optional) A default value to emit when a filter pattern does not match a log
* event. Can be null.</p>
*/
inline MetricTransformation& WithDefaultValue(double value) { SetDefaultValue(value); return *this;}
private:
Aws::String m_metricName;
bool m_metricNameHasBeenSet;
Aws::String m_metricNamespace;
bool m_metricNamespaceHasBeenSet;
Aws::String m_metricValue;
bool m_metricValueHasBeenSet;
double m_defaultValue;
bool m_defaultValueHasBeenSet;
};
} // namespace Model
} // namespace CloudWatchLogs
} // namespace Aws
| 34.075676
| 127
| 0.67703
|
[
"model"
] |
c7c5332dea226f72e35879aff9c92a244e16594e
| 28,599
|
h
|
C
|
src/surface.h
|
pyokagan/pycsdl2
|
4bd77699d753e53dc01f3f488f924e3262a3bd26
|
[
"Zlib"
] | null | null | null |
src/surface.h
|
pyokagan/pycsdl2
|
4bd77699d753e53dc01f3f488f924e3262a3bd26
|
[
"Zlib"
] | null | null | null |
src/surface.h
|
pyokagan/pycsdl2
|
4bd77699d753e53dc01f3f488f924e3262a3bd26
|
[
"Zlib"
] | null | null | null |
/*
* pycsdl2
* Copyright (c) 2015 Paul Tan <[email protected]>
*
* This software is provided 'as-is', without any express or implied warranty.
* In no event will the authors be held liable for any damages arising from
* the use of this software.
*
* Permission is granted to anyone to use this software for any purpose,
* including commercial applications, and to alter it and redistribute it
* freely, subject to the following restrictions:
*
* 1. The origin of this software must not be misrepresented; you must
* not claim that you wrote the original software. If you use this
* software in a product, an acknowledgment in the product
* documentation would be appreciated but is not required.
* 2. Altered source versions must be plainly marked as such, and must
* not be misrepresented as being the original software.
* 3. This notice may not be removed or altered from any source
* distribution.
*/
/**
* \file surface.h
* \brief Bindings for SDL_surface.h
*
* Implements bindings for SDL's surface creation and drawing API
* (SDL_surface.h)
*/
#ifndef _PYCSDL2_SURFACE_H_
#define _PYCSDL2_SURFACE_H_
#include <Python.h>
#include <SDL_surface.h>
#include "../include/pycsdl2.h"
#include "util.h"
#include "error.h"
#include "pixels.h"
#include "rwops.h"
/** \brief Instance data for PyCSDL2_SurfacePixelsType */
typedef struct PyCSDL2_SurfacePixels {
PyCSDL2_BufferHEAD
/** \brief Head of weak reference list */
PyObject *in_weakreflist;
/** \brief Pointer to the SDL_Surface that owns the pixels buffer */
SDL_Surface *surface;
} PyCSDL2_SurfacePixels;
/** \brief Destructor for PyCSDL2_SurfacePixelsType */
static void
PyCSDL2_SurfacePixelsDealloc(PyCSDL2_SurfacePixels *self)
{
PyObject_ClearWeakRefs((PyObject*) self);
if (self->surface)
SDL_FreeSurface(self->surface);
Py_TYPE(self)->tp_free((PyObject*) self);
}
/**
* \brief Validates the PyCSDL2_SurfacePixels object.
*
* A PyCSDL2_SurfacePixels object is valid if self->pixels and self->surface
* are not NULL.
*
* \returns 1 if the object is valid, 0 with an exception set otherwise.
*/
static int
PyCSDL2_SurfacePixelsValid(PyCSDL2_SurfacePixels *self)
{
if (!PyCSDL2_Assert(self))
return 0;
if (!self->surface) {
PyErr_SetString(PyExc_ValueError, "invalid SDL_SurfacePixels");
return 0;
}
if (!PyCSDL2_BufferValid((PyCSDL2_Buffer*) self))
return 0;
return 1;
}
/** \brief getbufferproc implementation for PyCSDL2_SurfacePixelsType */
static int
PyCSDL2_SurfacePixelsGetBuffer(PyCSDL2_SurfacePixels *self, Py_buffer *view,
int flags)
{
if (!PyCSDL2_SurfacePixelsValid(self))
return -1;
return PyCSDL2_BufferGetBuffer((PyCSDL2_Buffer*) self, view, flags);
}
/** \brief Buffer protocol definition for PyCSDL2_SurfacePixelsType */
static PyBufferProcs PyCSDL2_SurfacePixelsBufferProcs = {
(getbufferproc) PyCSDL2_SurfacePixelsGetBuffer,
(releasebufferproc) PyCSDL2_BufferReleaseBuffer
};
/** \brief Type definition for csdl2.SDL_SurfacePixels */
static PyTypeObject PyCSDL2_SurfacePixelsType = {
PyVarObject_HEAD_INIT(NULL, 0)
/* tp_name */ "csdl2.SDL_SurfacePixels",
/* tp_basicsize */ sizeof(PyCSDL2_SurfacePixels),
/* tp_itemsize */ 0,
/* tp_dealloc */ (destructor) PyCSDL2_SurfacePixelsDealloc,
/* tp_print */ 0,
/* tp_getattr */ 0,
/* tp_setattr */ 0,
/* tp_reserved */ 0,
/* tp_repr */ 0,
/* tp_as_number */ 0,
/* tp_as_sequence */ &PyCSDL2_BufferAsSequence,
/* tp_as_mapping */ &PyCSDL2_BufferAsMapping,
/* tp_hash */ 0,
/* tp_call */ 0,
/* tp_str */ 0,
/* tp_getattro */ 0,
/* tp_setattro */ 0,
/* tp_as_buffer */ &PyCSDL2_SurfacePixelsBufferProcs,
/* tp_flags */ Py_TPFLAGS_DEFAULT,
/* tp_doc */ "Owner of SDL_Surface pixels buffer",
/* tp_traverse */ 0,
/* tp_clear */ 0,
/* tp_richcompare */ 0,
/* tp_weaklistoffset */ offsetof(PyCSDL2_SurfacePixels, in_weakreflist)
};
/**
* \brief Creates an instance of PyCSDL2_SurfacePixelsType
*
* This will steal the reference to the SDL_Surface.
* */
static PyCSDL2_SurfacePixels *
PyCSDL2_SurfacePixelsCreate(SDL_Surface *surface)
{
PyCSDL2_SurfacePixels *self;
PyTypeObject *type = &PyCSDL2_SurfacePixelsType;
if (!PyCSDL2_Assert(surface))
return NULL;
if (!PyCSDL2_Assert(surface->pixels))
return NULL;
if (!(self = (PyCSDL2_SurfacePixels*)type->tp_alloc(type, 0)))
return NULL;
PyCSDL2_BufferInit((PyCSDL2_Buffer*) self, CTYPE_UCHAR, surface->pixels,
surface->h * surface->pitch, 0);
surface->refcount += 1;
self->surface = surface;
return self;
}
/** \brief Instance data for PyCSDL2_SurfaceRectType */
typedef struct PyCSDL2_SurfaceRect {
PyObject_HEAD
/** \brief Head of weak reference list */
PyObject *in_weakreflist;
/** \brief The SDL_Rect */
SDL_Rect *rect;
/** \brief Pointer to the SDL_Surface that owns the rect */
SDL_Surface *surface;
} PyCSDL2_SurfaceRect;
/** \brief Destructor for PyCSDL2_SurfaceRectType */
static void
PyCSDL2_SurfaceRectDealloc(PyCSDL2_SurfaceRect *self)
{
PyObject_ClearWeakRefs((PyObject*) self);
if (self->surface)
SDL_FreeSurface(self->surface);
Py_TYPE(self)->tp_free((PyObject*) self);
}
/**
* \brief Validates the PyCSDL2_SurfaceRect object.
*
* A PyCSDL2_SurfaceRect object is valid if self->rect and self->surface are
* not NULL.
*
* \returns 1 if the object is valid, 0 with an exception set otherwise.
*/
static int
PyCSDL2_SurfaceRectValid(PyCSDL2_SurfaceRect *self)
{
if (!PyCSDL2_Assert(self))
return 0;
if (!self->surface) {
PyErr_SetString(PyExc_ValueError, "invalid SDL_SurfaceRect");
return 0;
}
if (!PyCSDL2_Assert(self->rect))
return 0;
return 1;
}
/** \brief getbufferproc implementation for PyCSDL2_SurfaceRectType */
static int
PyCSDL2_SurfaceRectGetBuffer(PyCSDL2_SurfaceRect *self, Py_buffer *view,
int flags)
{
if (!PyCSDL2_SurfaceRectValid(self))
return -1;
if ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE) {
PyErr_SetString(PyExc_BufferError, "Object is not writable.");
return -1;
}
view->buf = self->rect;
Py_INCREF((PyObject*) self);
view->obj = (PyObject*) self;
view->len = sizeof(SDL_Rect);
view->readonly = 1;
view->itemsize = sizeof(SDL_Rect);
view->format = "iiii";
view->ndim = 0;
view->shape = NULL;
view->strides = NULL;
view->suboffsets = NULL;
view->internal = NULL;
return 0;
}
/** \brief Buffer protocol definition for PyCSDL2_SurfaceRectType */
static PyBufferProcs PyCSDL2_SurfaceRectBufferProcs = {
(getbufferproc) PyCSDL2_SurfaceRectGetBuffer,
(releasebufferproc) NULL
};
/** \brief Getter for SDL_SurfaceRect.x */
static PyObject *
PyCSDL2_SurfaceRectGetX(PyCSDL2_SurfaceRect *self, void *closure)
{
if (!PyCSDL2_SurfaceRectValid(self))
return NULL;
return PyLong_FromLong(self->rect->x);
}
/** \brief Getter for SDL_SurfaceRect.y */
static PyObject *
PyCSDL2_SurfaceRectGetY(PyCSDL2_SurfaceRect *self, void *closure)
{
if (!PyCSDL2_SurfaceRectValid(self))
return NULL;
return PyLong_FromLong(self->rect->y);
}
/** \brief Getter for SDL_SurfaceRect.w */
static PyObject *
PyCSDL2_SurfaceRectGetW(PyCSDL2_SurfaceRect *self, void *closure)
{
if (!PyCSDL2_SurfaceRectValid(self))
return NULL;
return PyLong_FromLong(self->rect->w);
}
/** \brief Getter for SDL_SurfaceRect.h */
static PyObject *
PyCSDL2_SurfaceRectGetH(PyCSDL2_SurfaceRect *self, void *closure)
{
if (!PyCSDL2_SurfaceRectValid(self))
return NULL;
return PyLong_FromLong(self->rect->h);
}
/** \brief List of properties for PyCSDL2_SurfaceRectType */
static PyGetSetDef PyCSDL2_SurfaceRectGetSetters[] = {
{"x",
(getter) PyCSDL2_SurfaceRectGetX,
(setter) NULL,
"(readonly) The x location of the rectangle's upper left corner.",
NULL},
{"y",
(getter) PyCSDL2_SurfaceRectGetY,
(setter) NULL,
"(readonly) The y location of the rectangle's upper left corner.",
NULL},
{"w",
(getter) PyCSDL2_SurfaceRectGetW,
(setter) NULL,
"(readonly) The width of the rectangle.",
NULL},
{"h",
(getter) PyCSDL2_SurfaceRectGetH,
(setter) NULL,
"(readonly) The height of the rectangle.",
NULL},
{NULL}
};
/** \brief Type definition for csdl2.SDL_SurfaceRect */
static PyTypeObject PyCSDL2_SurfaceRectType = {
PyVarObject_HEAD_INIT(NULL, 0)
/* tp_name */ "csdl2.SDL_SurfaceRect",
/* tp_basicsize */ sizeof(PyCSDL2_SurfaceRect),
/* tp_itemsize */ 0,
/* tp_dealloc */ (destructor) PyCSDL2_SurfaceRectDealloc,
/* tp_print */ 0,
/* tp_getattr */ 0,
/* tp_setattr */ 0,
/* tp_reserved */ 0,
/* tp_repr */ 0,
/* tp_as_number */ 0,
/* tp_as_sequence */ 0,
/* tp_as_mapping */ 0,
/* tp_hash */ 0,
/* tp_call */ 0,
/* tp_str */ 0,
/* tp_getattro */ 0,
/* tp_setattro */ 0,
/* tp_as_buffer */ &PyCSDL2_SurfaceRectBufferProcs,
/* tp_flags */ Py_TPFLAGS_DEFAULT,
/* tp_doc */
"A 2d rectangle with its origin at the upper left corner.\n",
/* tp_traverse */ 0,
/* tp_clear */ 0,
/* tp_richcompare */ 0,
/* tp_weaklistoffset */ offsetof(PyCSDL2_SurfaceRect, in_weakreflist),
/* tp_iter */ 0,
/* tp_iternext */ 0,
/* tp_methods */ 0,
/* tp_members */ 0,
/* tp_getset */ PyCSDL2_SurfaceRectGetSetters
};
/** \brief Creates an instance of PyCSDL2_SurfaceRect */
PyCSDL2_SurfaceRect *
PyCSDL2_SurfaceRectCreate(SDL_Surface *surface, SDL_Rect *rect)
{
PyCSDL2_SurfaceRect *self;
PyTypeObject *type = &PyCSDL2_SurfaceRectType;
if (!PyCSDL2_Assert(surface))
return NULL;
if (!PyCSDL2_Assert(rect))
return NULL;
if (!(self = (PyCSDL2_SurfaceRect*)type->tp_alloc(type, 0)))
return NULL;
surface->refcount += 1;
self->surface = surface;
self->rect = rect;
return self;
}
/** \brief Instance data for PyCSDL2_SurfaceType */
typedef struct PyCSDL2_Surface {
PyObject_HEAD
/** \brief Head of weak reference list */
PyObject *in_weakreflist;
/** \brief Pointer to the SDL_Surface that this instance owns */
SDL_Surface *surface;
/** \brief stores "format" object for Python access */
PyCSDL2_PixelFormat *format;
/** \brief stores "pixels" object for Python access */
PyObject *pixels;
/** \brief holds the buffer to the "pixels" object */
Py_buffer pixels_buf;
/** \brief stores "userdata" attribute for Python access */
PyObject *userdata;
/** \brief stores "clip_rect" object for Python access */
PyCSDL2_SurfaceRect *clip_rect;
} PyCSDL2_Surface;
static PyTypeObject PyCSDL2_SurfaceType;
/** \brief Traversal function for PyCSDL2_SurfaceType */
static int
PyCSDL2_SurfaceTraverse(PyCSDL2_Surface *self, visitproc visit, void *arg)
{
Py_VISIT(self->format);
Py_VISIT(self->pixels);
if (self->pixels_buf.obj)
Py_VISIT(self->pixels_buf.obj);
Py_VISIT(self->userdata);
Py_VISIT(self->clip_rect);
return 0;
}
/** \brief Clear function for PyCSDL2_SurfaceType */
static int
PyCSDL2_SurfaceClear(PyCSDL2_Surface *self)
{
if (self->pixels_buf.obj) {
if (self->surface)
self->surface->pixels = NULL;
PyBuffer_Release(&self->pixels_buf);
}
Py_CLEAR(self->format);
Py_CLEAR(self->pixels);
Py_CLEAR(self->userdata);
Py_CLEAR(self->clip_rect);
if (self->surface)
SDL_FreeSurface(self->surface);
self->surface = NULL;
return 0;
}
/** \brief Destructor for PyCSDL2_SurfaceType */
static void
PyCSDL2_SurfaceDealloc(PyCSDL2_Surface *self)
{
PyCSDL2_SurfaceClear(self);
PyObject_ClearWeakRefs((PyObject*) self);
Py_TYPE(self)->tp_free((PyObject*) self);
}
/**
* \brief Validates a PyCSDL2_Surface object.
*
* A PyCSDL2_Surface object is valid if its internal SDL_Surface pointer is not
* NULL. Otherwise, raise a ValueError;
*
* \param surface PyCSDL2_Surface object to validate.
* \returns 1 if surface is valid, 0 with an exception set otherwise.
*/
static int
PyCSDL2_SurfaceValid(PyCSDL2_Surface *surface)
{
if (!PyCSDL2_Assert(surface))
return 0;
if (Py_TYPE(surface) != &PyCSDL2_SurfaceType) {
PyCSDL2_RaiseTypeError(NULL, "SDL_Surface", (PyObject*)surface);
return 0;
}
if (!surface->surface) {
PyErr_SetString(PyExc_ValueError, "Invalid SDL_Surface");
return 0;
}
return 1;
}
/**
* \brief Borrow the SDL_Surface managed by the PyCSDL2_Surface object.
*
* \param obj The SDL_Surface object
* \param[out] out Output pointer.
* \returns 1 on success, 0 if an exception occurred.
*/
static int
PyCSDL2_SurfacePtr(PyObject *obj, SDL_Surface **out)
{
PyCSDL2_Surface *self = (PyCSDL2_Surface*)obj;
if (!PyCSDL2_SurfaceValid(self))
return 0;
if (out)
*out = self->surface;
return 1;
}
/** \brief Getter for SDL_Surface.flags */
static PyObject *
PyCSDL2_SurfaceGetFlags(PyCSDL2_Surface *self, void *closure)
{
if (!PyCSDL2_SurfaceValid(self))
return NULL;
return PyLong_FromUnsignedLong(self->surface->flags);
}
/** \brief Getter for SDL_Surface.format */
static PyObject *
PyCSDL2_SurfaceGetFormat(PyCSDL2_Surface *self, void *closure)
{
if (!PyCSDL2_SurfaceValid(self))
return NULL;
return PyCSDL2_Get((PyObject*) self->format);
}
/** \brief Getter for SDL_Surface.w */
static PyObject *
PyCSDL2_SurfaceGetW(PyCSDL2_Surface *self, void *closure)
{
if (!PyCSDL2_SurfaceValid(self))
return NULL;
return PyLong_FromLong(self->surface->w);
}
/** \brief Getter for SDL_Surface.h */
static PyObject *
PyCSDL2_SurfaceGetH(PyCSDL2_Surface *self, void *closure)
{
if (!PyCSDL2_SurfaceValid(self))
return NULL;
return PyLong_FromLong(self->surface->h);
}
/** \brief Getter for SDL_Surface.pitch */
static PyObject *
PyCSDL2_SurfaceGetPitch(PyCSDL2_Surface *self, void *closure)
{
if (!PyCSDL2_SurfaceValid(self))
return NULL;
return PyLong_FromLong(self->surface->pitch);
}
/** \brief Getter for SDL_Surface.pixels */
static PyObject *
PyCSDL2_SurfaceGetPixels(PyCSDL2_Surface *self, void *closure)
{
if (!PyCSDL2_SurfaceValid(self))
return NULL;
return PyCSDL2_Get(self->pixels);
}
/** \brief Getter for SDL_Surface.userdata */
static PyObject *
PyCSDL2_SurfaceGetUserdata(PyCSDL2_Surface *self, void *closure)
{
if (!PyCSDL2_SurfaceValid(self))
return NULL;
return PyCSDL2_Get(self->userdata);
}
/** \brief Setter for SDL_Surface.userdata */
static int
PyCSDL2_SurfaceSetUserdata(PyCSDL2_Surface *self, PyObject *value,
void *closure)
{
if (!PyCSDL2_SurfaceValid(self))
return -1;
PyCSDL2_Set(self->userdata, value);
return 0;
}
/** \brief Getter for SDL_Surface.locked */
static PyObject *
PyCSDL2_SurfaceGetLocked(PyCSDL2_Surface *self, void *closure)
{
if (!PyCSDL2_SurfaceValid(self))
return NULL;
return PyBool_FromLong(self->surface->locked);
}
/** \brief Getter for SDL_Surface.refcount */
static PyObject *
PyCSDL2_SurfaceGetRefcount(PyCSDL2_Surface *self, void *closure)
{
if (!PyCSDL2_SurfaceValid(self))
return NULL;
return PyLong_FromLong(self->surface->refcount);
}
/** \brief Getter for SDL_Surface.clip_rect */
static PyObject *
PyCSDL2_SurfaceGetClipRect(PyCSDL2_Surface *self, void *closure)
{
if (!PyCSDL2_SurfaceValid(self))
return NULL;
return PyCSDL2_Get((PyObject*) self->clip_rect);
}
/** \brief List of properties for PyCSDL2_SurfaceType */
static PyGetSetDef PyCSDL2_SurfaceGetSetters[] = {
{"flags",
(getter) PyCSDL2_SurfaceGetFlags,
(setter) NULL,
"(readonly) Flags set on the surface. For internal use.",
NULL},
{"format",
(getter) PyCSDL2_SurfaceGetFormat,
(setter) NULL,
"(readonly) Format of pixels stored in the surface.",
NULL},
{"w",
(getter) PyCSDL2_SurfaceGetW,
(setter) NULL,
"(readonly) Width of surface in pixels.",
NULL},
{"h",
(getter) PyCSDL2_SurfaceGetH,
(setter) NULL,
"(readonly) Height of surface in pixels.",
NULL},
{"pitch",
(getter) PyCSDL2_SurfaceGetPitch,
(setter) NULL,
"(readonly) The length of a row of pixels in bytes.",
NULL},
{"pixels",
(getter) PyCSDL2_SurfaceGetPixels,
(setter) NULL,
"The actual pixel data.",
NULL},
{"userdata",
(getter) PyCSDL2_SurfaceGetUserdata,
(setter) PyCSDL2_SurfaceSetUserdata,
"Application-specific data associated with the surface.",
NULL},
{"locked",
(getter) PyCSDL2_SurfaceGetLocked,
(setter) NULL,
"(readonly) True if the surface is locked.",
NULL},
{"clip_rect",
(getter) PyCSDL2_SurfaceGetClipRect,
(setter) NULL,
"(readonly) Clipping information for the surface.",
NULL},
{"refcount",
(getter) PyCSDL2_SurfaceGetRefcount,
(setter) NULL,
"(readonly) SDL's reference count of the surface.",
NULL},
{NULL}
};
/** \brief Type definition for csdl2.SDL_Surface */
static PyTypeObject PyCSDL2_SurfaceType = {
PyVarObject_HEAD_INIT(NULL, 0)
/* tp_name */ "csdl2.SDL_Surface",
/* tp_basicsize */ sizeof(PyCSDL2_Surface),
/* tp_itemsize */ 0,
/* tp_dealloc */ (destructor) PyCSDL2_SurfaceDealloc,
/* tp_print */ 0,
/* tp_getattr */ 0,
/* tp_setattr */ 0,
/* tp_reserved */ 0,
/* tp_repr */ 0,
/* tp_as_number */ 0,
/* tp_as_sequence */ 0,
/* tp_as_mapping */ 0,
/* tp_hash */ 0,
/* tp_call */ 0,
/* tp_str */ 0,
/* tp_getattro */ 0,
/* tp_setattro */ 0,
/* tp_as_buffer */ 0,
/* tp_flags */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
/* tp_doc */
"SDL_Surface",
/* tp_traverse */ (traverseproc) PyCSDL2_SurfaceTraverse,
/* tp_clear */ (inquiry) PyCSDL2_SurfaceClear,
/* tp_richcompare */ 0,
/* tp_weaklistoffset */ offsetof(PyCSDL2_Surface, in_weakreflist),
/* tp_iter */ 0,
/* tp_iternext */ 0,
/* tp_methods */ 0,
/* tp_members */ 0,
/* tp_getset */ PyCSDL2_SurfaceGetSetters,
/* tp_base */ 0,
/* tp_dict */ 0,
/* tp_descr_get */ 0,
/* tp_descr_set */ 0,
/* tp_dictoffset */ 0,
/* tp_init */ 0,
/* tp_alloc */ 0,
/* tp_new */ 0
};
/**
* \brief Creates a new instance of PyCSDL2_SurfaceType.
*
* This will steal the reference to the SDL_Surface.
*
* \param surface The SDL_Surface to wrap. The reference to the SDL_Surface
* will be stolen.
* \param pixels Python object owning the surface->pixels buffer. It must
* implement the buffer protocol. If NULL, it is assumed that the
* SDL surface owns the pixel buffer, and a memoryview will be
* created.
*/
static PyObject *
PyCSDL2_SurfaceCreate(SDL_Surface *surface, PyObject *pixels)
{
PyCSDL2_Surface *self;
PyTypeObject *type = &PyCSDL2_SurfaceType;
if (!PyCSDL2_Assert(surface))
return NULL;
if (!PyCSDL2_Assert(surface->format))
return NULL;
if (!(self = (PyCSDL2_Surface*)type->tp_alloc(type, 0)))
return NULL;
surface->format->refcount += 1;
self->format = (PyCSDL2_PixelFormat*)PyCSDL2_PixelFormatCreate(surface->format);
if (!self->format) {
surface->format->refcount -= 1;
Py_DECREF(self);
return NULL;
}
if (pixels) {
if (PyObject_GetBuffer(pixels, &self->pixels_buf,
PyBUF_WRITABLE | PyBUF_ND)) {
Py_DECREF(self);
return NULL;
}
Py_INCREF(pixels);
self->pixels = pixels;
surface->pixels = self->pixels_buf.buf;
} else if (surface->pixels) {
PyCSDL2_SurfacePixels *pixels;
if (!(pixels = PyCSDL2_SurfacePixelsCreate(surface))) {
Py_DECREF(self);
return NULL;
}
self->pixels = (PyObject*) pixels;
}
if (!(self->clip_rect = PyCSDL2_SurfaceRectCreate(surface,
&surface->clip_rect))) {
Py_DECREF(self);
return NULL;
}
self->surface = surface;
return (PyObject*)self;
}
/**
* \brief Implements csdl2.SDL_MUSTLOCK()
*
* \code{.py}
* SDL_MUSTLOCK(surface: SDL_Surface) -> bool
* \endcode
*/
static PyObject *
PyCSDL2_MUSTLOCK(PyObject *module, PyObject *args, PyObject *kwds)
{
PyCSDL2_Surface *surface;
static char *kwlist[] = {"surface", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!", kwlist,
&PyCSDL2_SurfaceType, &surface))
return NULL;
if (!PyCSDL2_SurfaceValid(surface))
return NULL;
return PyBool_FromLong(SDL_MUSTLOCK(surface->surface));
}
/**
* \brief Implements csdl2.SDL_CreateRGBSurface()
*
* \code{.py}
* SDL_CreateRGBSurface(flags: int, width: int, height: int, depth: int,
* Rmask: int, Gmask: int, Bmask: int, Amask: int)
* -> SDL_Surface
* \endcode
*/
static PyObject *
PyCSDL2_CreateRGBSurface(PyObject *module, PyObject *args, PyObject *kwds)
{
Uint32 flags, Rmask, Gmask, Bmask, Amask;
int width, height, depth;
SDL_Surface *ret;
PyObject *out;
static char *kwlist[] = {"flags", "width", "height", "depth", "Rmask",
"Gmask", "Bmask", "Amask", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, Uint32_UNIT "iii" Uint32_UNIT
Uint32_UNIT Uint32_UNIT Uint32_UNIT,
kwlist, &flags, &width, &height, &depth,
&Rmask, &Gmask, &Bmask, &Amask))
return NULL;
if (!(ret = SDL_CreateRGBSurface(flags, width, height, depth, Rmask, Gmask,
Bmask, Amask)))
return PyCSDL2_RaiseSDLError();
if (!(out = PyCSDL2_SurfaceCreate(ret, NULL))) {
SDL_FreeSurface(ret);
return NULL;
}
return out;
}
/**
* \brief Implements csdl2.SDL_CreateRGBSurfaceFrom()
*
* \code{.py}
* SDL_CreateRGBSurfaceFrom(pixels: buffer, width: int, height: int,
* depth: int, pitch: int, Rmask: int, Gmask: int,
* Bmask: int, Amask: int) -> SDL_Surface
* \endcode
*/
static PyObject *
PyCSDL2_CreateRGBSurfaceFrom(PyObject *module, PyObject *args, PyObject *kwds)
{
int width, height, depth, pitch;
Uint32 Rmask, Gmask, Bmask, Amask;
PyObject *pixels;
Py_buffer buf;
SDL_Surface *surface;
PyObject *out;
static char *kwlist[] = {"pixels", "width", "height", "depth", "pitch",
"Rmask", "Gmask", "Bmask", "Amask", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "Oiiii" Uint32_UNIT
Uint32_UNIT Uint32_UNIT Uint32_UNIT,
kwlist, &pixels, &width, &height, &depth,
&pitch, &Rmask, &Gmask, &Bmask, &Amask))
return NULL;
if (pixels == Py_None) {
buf.obj = NULL;
buf.buf = NULL;
} else if (PyObject_GetBuffer(pixels, &buf, PyBUF_WRITABLE | PyBUF_ND)) {
return NULL;
} else if (!PyBuffer_IsContiguous(&buf, 'C')) {
PyErr_SetString(PyExc_BufferError, "Pixels buffer is not "
"C Contiguous");
PyBuffer_Release(&buf);
return NULL;
} else if (buf.len != pitch * height) {
Py_ssize_t expected = pitch * height;
PyErr_Format(PyExc_BufferError, "Invalid pixels buffer size. "
"Expected: %zd. Got: %zd.", expected, buf.len);
PyBuffer_Release(&buf);
return NULL;
}
surface = SDL_CreateRGBSurfaceFrom(buf.buf, width, height, depth, pitch,
Rmask, Gmask, Bmask, Amask);
if (!surface) {
PyBuffer_Release(&buf);
return PyCSDL2_RaiseSDLError();
}
if (pitch < surface->format->BytesPerPixel * surface->w) {
int expected = surface->format->BytesPerPixel * surface->w;
SDL_FreeSurface(surface);
PyBuffer_Release(&buf);
PyErr_Format(PyExc_ValueError, "Invalid pitch. "
"Expected at least: %d. Got: %d", expected, pitch);
return NULL;
}
if (!(out = PyCSDL2_SurfaceCreate(surface, pixels))) {
SDL_FreeSurface(surface);
PyBuffer_Release(&buf);
return NULL;
}
PyBuffer_Release(&buf);
return out;
}
/**
* \brief Implements csdl2.SDL_FreeSurface()
*
* \code{.py}
* SDL_FreeSurface(surface: SDL_Surface) -> NULL
* \endcode
*/
static PyObject *
PyCSDL2_FreeSurface(PyObject *module, PyObject *args, PyObject *kwds)
{
PyCSDL2_Surface *surface;
static char *kwlist[] = {"surface", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!", kwlist,
&PyCSDL2_SurfaceType, &surface))
return NULL;
if (!PyCSDL2_SurfaceValid(surface))
return NULL;
PyCSDL2_SurfaceClear(surface);
Py_RETURN_NONE;
}
/**
* \brief Implements csdl2.SDL_LoadBMP_RW()
*
* \code{.py}
* SDL_LoadBMP_RW(src: SDL_RWops, freesrc: bool) -> SDL_Surface
* \endcode
*/
static PyObject *
PyCSDL2_LoadBMP_RW(PyObject *module, PyObject *args, PyObject *kwds)
{
PyCSDL2_RWops *src_obj;
SDL_RWops *src = NULL;
int freesrc;
SDL_Surface *ret;
static char *kwlist[] = {"src", "freesrc", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!p", kwlist,
&PyCSDL2_RWopsType, &src_obj, &freesrc))
return NULL;
if (!PyCSDL2_RWopsPtr((PyObject*)src_obj, &src))
return NULL;
Py_BEGIN_ALLOW_THREADS
ret = SDL_LoadBMP_RW(src, freesrc);
Py_END_ALLOW_THREADS
if (freesrc)
PyCSDL2_RWopsDetach(src_obj);
if (!ret)
return PyCSDL2_RaiseSDLError();
return PyCSDL2_SurfaceCreate(ret, NULL);
}
/**
* \brief Implements csdl2.SDL_LoadBMP()
*
* \code{.py}
* SDL_LoadBMP(file: str) -> SDL_Surface
* \endcode
*/
static PyObject *
PyCSDL2_LoadBMP(PyObject *module, PyObject *args, PyObject *kwds)
{
PyObject *file_obj;
const char *file;
SDL_Surface *ret;
static char *kwlist[] = {"file", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&", kwlist,
PyUnicode_FSConverter, &file_obj))
return NULL;
file = PyBytes_AsString(file_obj);
if (!file) {
Py_DECREF(file_obj);
return NULL;
}
Py_BEGIN_ALLOW_THREADS
ret = SDL_LoadBMP(file);
Py_END_ALLOW_THREADS
Py_DECREF(file_obj);
if (!ret)
return PyCSDL2_RaiseSDLError();
return PyCSDL2_SurfaceCreate(ret, NULL);
}
/**
* \brief Initializes bindings to SDL_surface.h
*
* Adds constants defined in SDL_surface.h to module.
*
* \param module csdl2 module object
* \returns 1 on success, 0 if an exception occurred.
*/
static int
PyCSDL2_initsurface(PyObject *module)
{
static const PyCSDL2_Constant constants[] = {
{"SDL_SWSURFACE", SDL_SWSURFACE},
{"SDL_PREALLOC", SDL_PREALLOC},
{"SDL_RLEACCEL", SDL_RLEACCEL},
{"SDL_DONTFREE", SDL_DONTFREE},
{NULL, 0}
};
if (PyCSDL2_PyModuleAddConstants(module, constants) < 0)
return 0;
if (PyType_Ready(&PyCSDL2_SurfacePixelsType)) { return 0; }
if (PyType_Ready(&PyCSDL2_SurfaceRectType)) { return 0; }
if (PyCSDL2_PyModuleAddType(module, &PyCSDL2_SurfaceType) < 0)
return 0;
return 1;
}
#endif /* _PYCSDL2_SURFACE_H_ */
| 29.574974
| 84
| 0.631875
|
[
"object",
"shape"
] |
c7ce870d0ab04cce77d0e43b8369bf57f375fdd0
| 5,552
|
h
|
C
|
sqldata/sqlasaapi.h
|
strk/sqlines
|
82676a3b672bc218a024712abeb85279ae75495c
|
[
"Apache-2.0"
] | null | null | null |
sqldata/sqlasaapi.h
|
strk/sqlines
|
82676a3b672bc218a024712abeb85279ae75495c
|
[
"Apache-2.0"
] | null | null | null |
sqldata/sqlasaapi.h
|
strk/sqlines
|
82676a3b672bc218a024712abeb85279ae75495c
|
[
"Apache-2.0"
] | null | null | null |
// SqlAsaApi Sybase SQL Anywhere API
// Copyright (c) 2012 SQLines. All rights reserved
#ifndef sqlines_sqlasaapi_h
#define sqlines_sqlasaapi_h
#if defined(WIN32) || defined(WIN64)
#include <windows.h>
#endif
#include <sql.h>
#include <sqlext.h>
#include <sqltypes.h>
#include <oci.h>
#include <ctpublic.h>
#include "sqlapibase.h"
#include "sqldb.h"
class SqlAsaApi : public SqlApiBase
{
// Environment and connection handles
SQLHENV _henv;
SQLHDBC _hdbc;
SQLHANDLE _hstmt_cursor;
// Connection information
std::string _user;
std::string _pwd;
std::string _dsn;
// Attribute to store last number of fetched rows (SQL_ATTR_ROWS_FETCHED_PTR)
SQLULEN _cursor_fetched;
// Extract all character data as 2-byte Unicode (UTF-16/UCS-2)
bool _char_as_wchar;
public:
SqlAsaApi();
~SqlAsaApi();
// Initialize API
virtual int Init();
// Set additional information about the driver type
virtual void SetDriverType(const char * /*info*/) {}
// Set the connection string in the API object
virtual void SetConnectionString(const char *conn);
// Connect to the database
virtual int Connect(size_t *time_spent);
// Disconnect from the database
virtual void Disconnect();
// Deallocate the driver
virtual void Deallocate();
// Get row count for the specified object
virtual int GetRowCount(const char *object, long *count, size_t *time_spent);
// Execute the statement and get scalar result
virtual int ExecuteScalar(const char *query, long *result, size_t *time_spent);
// Execute the statement
virtual int ExecuteNonQuery(const char *query, size_t *time_spent);
// Open cursor and allocate buffers
virtual int OpenCursor(const char *query, long buffer_rows, long buffer_memory, long *col_count, long *allocated_array_rows,
long *rows_fetched, SqlCol **cols, size_t *time_spent, bool catalog_query = false,
std::list<SqlDataTypeMap> *dtmap = NULL);
// Fetch next portion of data to allocate buffers
virtual int Fetch(long *rows_fetched, size_t *time_spent);
// Close the cursor and deallocate buffers
virtual int CloseCursor();
// Initialize the bulk copy from one database into another
virtual int InitBulkTransfer(const char *table, long col_count, long allocated_array_rows, SqlCol *s_cols, SqlCol **t_cols);
// Transfer rows between databases
virtual int TransferRows(SqlCol *s_cols, long rows_fetched, long *rows_written, size_t *bytes_written,
size_t *time_spent);
// Specifies whether API allows to parallel reading from this API and write to another API
virtual bool CanParallelReadWrite();
// Complete bulk transfer
virtual int CloseBulkTransfer();
// Drop the table
virtual int DropTable(const char* table, size_t *time_spent, std::string &drop_stmt);
// Remove foreign key constraints referencing to the parent table
virtual int DropReferences(const char* table, size_t *time_spent);
// Get the length of LOB column in the open cursor
virtual int GetLobLength(long row, long column, long *length);
// Get LOB content
virtual int GetLobContent(long row, long column, void *data, long length, long *len_ind);
// Get partial LOB content
virtual int GetLobPart(long row, long column, void *data, long length, long *len_ind);
// Get the list of available tables
virtual int GetAvailableTables(std::string &select, std::string &exclude, std::list<std::string> &tables);
// Read schema information
virtual int ReadSchema(const char *select, const char *exclude, bool read_cns = true, bool read_idx = true);
// Read specific catalog information
virtual int ReadConstraintTable(const char *schema, const char *constraint, std::string &table);
virtual int ReadConstraintColumns(const char *schema, const char *table, const char *constraint, std::string &cols);
// Get a list of columns for specified primary or unique key
virtual int GetKeyConstraintColumns(SqlConstraints &cns, std::list<std::string> &output, std::list<std::string> * = NULL);
// Get a list of columns for specified foreign key
virtual int GetForeignKeyConstraintColumns(SqlConstraints &cns, std::list<std::string> &fcols, std::list<std::string> &pcols, std::string &ptable);
// Get a list of columns for specified index
virtual int GetIndexColumns(SqlIndexes &idx, std::list<std::string> &idx_cols, std::list<std::string> &idx_sorts);
// Get database type and subtype
virtual short GetType() { return SQLDATA_ASA; }
virtual short GetSubType() { return 0; }
// Get data type name by code
void GetDataTypeName(SQLSMALLINT native_dt, std::string &name);
// Check whether identifier must be quoted
bool IsQuoteRequired(const char *name);
private:
int InitSession();
// Set session attributes
int SetSession();
// Set version of the connected database
void SetVersion();
// Metadata reading functions
int ReadTableColumns(std::string &condition);
int ReadTableConstraints(std::string &condition);
int ReadIndexes(std::string &condition);
int ReadIndexColumns(std::string &condition);
int ReadReferences(std::string &condition);
int ReadForeignKeyColumns(std::string &condition);
int ReadForeignKeyActions();
int ReadCheckConstraints(std::string &condition);
int ReadReservedWords();
void SetPrimaryKeyColumn(SqlColMeta &col_meta);
void SetUniqueConstraintColumn(SqlIndColumns &idx_col);
void SetDefault(SqlColMeta &col_meta, const char *value, int len);
void SetIdentity(SqlColMeta &col_meta, int id_max);
// Set error code and message for the last API call
void SetError(SQLSMALLINT handle_type, SQLHANDLE handle);
};
#endif // sqlines_sqlasaapi
| 34.7
| 148
| 0.758465
|
[
"object"
] |
c7e2aa82b1bce35eedc996f1877ab8ed9a971bfb
| 8,501
|
h
|
C
|
inc/tree/tree.h
|
ancientlore/hermit
|
0b6b5b3e364fe9a7080517a7f3ddb8cdb03f5b14
|
[
"MIT"
] | 1
|
2020-07-15T19:39:49.000Z
|
2020-07-15T19:39:49.000Z
|
inc/tree/tree.h
|
ancientlore/hermit
|
0b6b5b3e364fe9a7080517a7f3ddb8cdb03f5b14
|
[
"MIT"
] | null | null | null |
inc/tree/tree.h
|
ancientlore/hermit
|
0b6b5b3e364fe9a7080517a7f3ddb8cdb03f5b14
|
[
"MIT"
] | null | null | null |
/* ---------------------------------------------------------------------------
AVL Tree Object
An AVL tree (Adel'son-Vel'skii & Landis) is a binary search
tree in which the heights of the left and right subtrees
of the root differ by at most one and in which the left
and right subtrees are again AVL trees.
With each node of an AVL tree is associated a balance factor
that is Left High, Equal, or Right High according,
respectively, as the left subtree has height greater than,
equal to, or less than that of the right subtree.
The AVL tree is, in practice, balanced quite well. It can
(at the worst case) become skewed to the left or right,
but never so much that it becomes inefficient. The
balancing is done as items are added or deleted.
You will use two classes:
Tree
TreeIterator
See also: Robert L. Kruse, Data Structures and Program Design, 2nd Ed.
Prentice-Hall
Copyright (C) 1993 Michael D. Lore
All Rights Reserved.
--------------------------------------------------------------------------- */
#ifndef TREE_H
#define TREE_H
#include "common.h"
#include "tree/heap.h"
#include "tree/sortable.h"
// NOTE: The default heap page size works well for the Tree. The node
// size is currently 14 bytes, which results in pages of 64 * 14 bytes
// (896 bytes). This is not overly wasteful for small trees (less than
// 300 elements) and still improves performance significantly on large
// trees (up to 5000 elements). You may use the following constants to
// determine the correct heap page size:
const int HEAP_TREE_PAGE_SIZE = HEAP_DEF_PAGE_SIZE; // default, < 5,000 elem.
const int HEAP_TREE_GT_5K = 256; // 5,000 to 50,000 elements
const int HEAP_TREE_GT_50K = 512; // more than 50,000 elements
// --------------------------------------------------------------------------
// Node Object
// --------------------------------------------------------------------------
enum BalanceFactor { LeftHigh, Equal, RightHigh };
struct Node
{
const Sortable *ptr; // pointer to data in tree
unsigned char bal; // balance factor
unsigned long size; // number of nodes in subtree
Node *left; // left node pointer
Node *right; // right node pointer
// Node cannot get an error so no Error() is provided.
unsigned long leftSize()
{
if (left) return left->size;
return 0;
}
unsigned long rightSize()
{
if (right) return right->size;
return 0;
}
void init(const Sortable *obj);
void kill();
};
// --------------------------------------------------------------------------
// Function Types
// --------------------------------------------------------------------------
typedef int (*CompareFunc)(const Sortable&, const Sortable&);
typedef void (*IterateFunc)(const Sortable&);
// --------------------------------------------------------------------------
// Tree Object
// --------------------------------------------------------------------------
class Tree
{
// Pointer to data set:
Node *root; // root node
Heap nodeHeap; // for node allocation
int treeFlags; // whether tree owns elements
// These help us keep from passing lots of stuff on the stack:
const Sortable *lookingFor; // pointer to search/insert data
CompareFunc compare; // compare function
IterateFunc _iterate; // iterate function
unsigned lineDepth; // used in printing the tree
unsigned itemSize; // used in printing the tree
char *array; // used in printing the tree
const Sortable *duplicate; // points to duplicate found in add
// Maintain maximum level we encounter during add:
unsigned currentLevel; // current level number (root = 1)
unsigned maxLevel; // maximum encountered level
// Internal routines used to add to the tree:
Node *newNode(const Sortable *obj);
const Sortable *add(Node *&node, int& taller);
void leftBalance(Node *&node, int& taller);
void rightBalance(Node *&node, int& taller);
// Internal routines
void rotateRight(Node *&node);
void rotateLeft(Node *&node);
// Internal routines used to delete from the tree:
Sortable *remove(Node *&node, int& shorter);
Node *findPredecessor(Node *node);
void removePredecessor(Node *&node, int& shorter);
void remLeftSubBalance(Node *&node, int& shorter);
void remRightSubBalance(Node *&node, int& shorter);
void remLeftBalance(Node *&node, int& shorter);
void remRightBalance(Node *&node, int& shorter);
void remNode(Node *&node);
Sortable *remove(Node *&node, unsigned long index, int& shorter);
// Internal routines used for search/iterate/flush:
Node *finder(Node *node) const;
void iterator(Node *node) const;
void flusher(Node *node);
void printer(Node *node) const;
void levelCalc(Node *node) const;
Node *indexer(Node *node, unsigned long index) const;
// standard Sortable compare function
static int SortableCompare(const Sortable&, const Sortable&);
// No copy of a tree
Tree (const Tree&);
Tree& operator = (const Tree&);
public:
enum { OwnsElements = 1, AllowDuplicates = 2 };
// Public Routines:
Tree(int flags = Tree::OwnsElements,
CompareFunc func = Tree::SortableCompare,
unsigned pageCnt = HEAP_TREE_PAGE_SIZE);
~Tree();
// You must give Add a pointer that will exist for the lifetime of the tree
// If Add returns 0, it failed. if you set dupl, it will point to the
// node where we would have added the ptr but found a duplicate (useful
// for updating records)
const Sortable *add(const Sortable *ptr, const Sortable **dupl = 0);
// Remove returns a pointer to the object removed which YOU MUST DELETE!
// The ptr is just used for comparison
Sortable *remove(const Sortable *ptr);
Sortable *remove(unsigned long index);
// Flush will delete elements if ownsElements is true
void flush();
// Find returns a pointer to an element in the tree; do not delete it
// key is just used for comparison
const Sortable *find(const Sortable *key) const;
const Sortable *operator [] (unsigned long index) const;
void iterate(IterateFunc func) const;
// determine number of levels in tree and number of elements
unsigned getMaxLevel() const;
unsigned long size() const
{
if (root) return root->size;
return 0;
}
unsigned long getMaxItems() const;
// determine amount of node space wasted
unsigned long nodeUsed() const { return nodeHeap.used(); }
unsigned long nodeAvail() const { return nodeHeap.avail(); }
unsigned nodePages() const { return nodeHeap.pageCount(); }
// print the tree format (for debugging)
void printTree(IterateFunc func, unsigned itemSiz) const;
friend class TreeIterator;
};
// --------------------------------------------------------------------------
// TreeIterator Object
// --------------------------------------------------------------------------
// ----- Node visited enumeration -----
enum NodeVisited
{
VisitNone = 0,
VisitLeft = 1,
VisitThis = 2,
VisitRight = 4
};
// ----- Node Holder (item on the node stack) -----
struct NodeHolder
{
Node *node;
int visited;
NodeHolder(Node *nod = 0);
// NodeHolder cannot get an error so no Error() is provided.
};
// ----- Tree Iterator -----
class TreeIterator
{
Tree& tree; // tree to iterate
NodeHolder *stack; // stack
unsigned stackSize; // size of stack
unsigned stackPtr; // ptr to next item to add to stack
NodeHolder empty; // returned for errors in stack
int direction; // direction of iteration
void push(NodeHolder& node);
NodeHolder& pop();
NodeHolder& top() const;
// No copy of a tree iterator
TreeIterator (const TreeIterator&);
TreeIterator& operator = (const TreeIterator&);
public:
enum { Forward, Reverse };
TreeIterator(Tree& aTree, int direc = Forward); // throws ERR_OUT_OF_MEMORY
~TreeIterator();
// int Error() { return (stack == 0 && tree.GetMaxLevel() > 0); }
void reset(); // throws OutOfMemory
int done() const;
// do not delete elements returned from operator const Sortable * ; they
// are in the tree
operator const Sortable * () const;
TreeIterator& operator ++ (int);
};
#endif
/* End */
| 29.313793
| 80
| 0.607811
|
[
"object"
] |
b5131331f559446647d112d912ffe161ddd11194
| 3,486
|
h
|
C
|
hcp/src/general/sets/hcp_static_polytope.h
|
bgaldrikian/homogeneous_closest_point
|
2d27c9a1710b0513be63e33a53527737776f349e
|
[
"MIT"
] | null | null | null |
hcp/src/general/sets/hcp_static_polytope.h
|
bgaldrikian/homogeneous_closest_point
|
2d27c9a1710b0513be63e33a53527737776f349e
|
[
"MIT"
] | null | null | null |
hcp/src/general/sets/hcp_static_polytope.h
|
bgaldrikian/homogeneous_closest_point
|
2d27c9a1710b0513be63e33a53527737776f349e
|
[
"MIT"
] | null | null | null |
// Copyright (c) 2014-2018 NVIDIA Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef _HCP_STATIC_POLYTOPE_H_
#define _HCP_STATIC_POLYTOPE_H_
#include "math/linalg.h"
#include <limits>
/**
Implementation of HCP_Halfspace_Set, an explicit representation of a finite number of half-spaces.
The shape represented is the interesection of the half-spaces.
*/
struct HCP_Static_Polytope : public virtual HCP_Halfspace_Set
{
/** Default ctor, creates a blank object. */
HCP_Static_Polytope() : m_D(0), m_halfspace_count(0), m_halfspaces(nullptr) {}
/**
Constructor which sets the number of spatial dimensions D, but keeps the default empty list of half-spaces.
\param[in] D the number of spatial dimensions.
*/
HCP_Static_Polytope(uint D) : m_D(D), m_halfspace_count(0), m_halfspaces(nullptr) {}
/**
Constructor which sets the number of spatial dimensions D and sets the list of half-spaces.
\param[in] D the number of spatial dimensions.
\param[in] halfspace_count the number of half-spaces.
\param[in] halfspaces an array of half-space vectors, stored as contiguous (D+1)-vectors which represent the plane equations of the half-space bounding planes.
*/
HCP_Static_Polytope(uint D, size_t halfspace_count, const real* halfspaces) : m_D(D) { set_halfspaces(halfspace_count, halfspaces); }
/**
Set the half-spaces represented by this object.
\param[in] halfspace_count the number of half-spaces.
\param[in] halfspaces an array of half-space vectors, stored as contiguous (D+1)-vectors which represent the plane equation of the half-space bounding planes.
*/
void
set_halfspaces(size_t halfspace_count, const real* halfspaces)
{
m_halfspace_count = halfspace_count;
m_halfspaces = halfspaces;
}
/** Implementation of HCP_Halfspace_Set interface. */
real
farthest_halfspace(real* plane, const real* point) const override
{
const real* halfspace = m_halfspaces;
real greatest_s = -std::numeric_limits<real>().max();
const real* h = m_halfspaces;
const real* stop = h + (m_D + 1)*m_halfspace_count;
for (; h < stop; h += (m_D + 1))
{
const real s = la::dot(point, h, m_D + 1);
if (s > greatest_s)
{
greatest_s = s;
halfspace = h;
}
}
if (halfspace) la::cpy(plane, halfspace, m_D + 1);
else la::zero(plane, m_D + 1);
return greatest_s;
}
protected:
uint m_D;
size_t m_halfspace_count;
const real* m_halfspaces;
};
#endif // #ifndef _HCP_STATIC_POLYTOPE_H_
| 35.212121
| 161
| 0.742972
|
[
"object",
"shape"
] |
b51532d533c1767227cbc8952129dc6843270ba7
| 4,185
|
c
|
C
|
src/tss2-init-helper.c
|
flihp/gio-tss2-async-example
|
d39773b5df439d2e90f2c4c3b558b487622e5142
|
[
"BSD-2-Clause"
] | 1
|
2018-07-05T22:04:54.000Z
|
2018-07-05T22:04:54.000Z
|
src/tss2-init-helper.c
|
flihp/gio-tss2-async-example
|
d39773b5df439d2e90f2c4c3b558b487622e5142
|
[
"BSD-2-Clause"
] | null | null | null |
src/tss2-init-helper.c
|
flihp/gio-tss2-async-example
|
d39773b5df439d2e90f2c4c3b558b487622e5142
|
[
"BSD-2-Clause"
] | null | null | null |
/*
* Copyright (c) 2018, Intel Corporation
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <dlfcn.h>
#include <errno.h>
#include <glib.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <tss2/tss2_tpm2_types.h>
#include <tss2/tss2_tcti.h>
#include "tss2-init-helper.h"
/*
* This function does all the dl* magic required to get a reference to a TCTI
* modules TSS2_TCTI_INFO structure. A successful call will return
* TSS2_RC_SUCCESS, a reference to the info structure in the 'info' parameter
* and a reference to the dlhandle returned by dlopen. The caller will need
* to close this handle after they're done using the TCTI.
*/
TSS2_RC
tcti_get_info (const char *filename,
const TSS2_TCTI_INFO **info,
void **tcti_dl_handle)
{
TSS2_TCTI_INFO_FUNC info_func;
gchar filename_xfrm [PATH_MAX];
size_t size;
g_debug ("%s", __func__);
*tcti_dl_handle = dlopen (filename, RTLD_LAZY);
if (*tcti_dl_handle == NULL) {
size = snprintf (filename_xfrm,
sizeof (filename_xfrm),
"libtss2-tcti-%s.so.0",
filename);
if (size >= sizeof (filename_xfrm)) {
g_critical ("TCTI name truncated in transform.");
return TSS2_TCTI_RC_BAD_VALUE;
}
g_debug ("dlopen failed on \"%s\", trying \"%s\"",
filename, filename_xfrm);
*tcti_dl_handle = dlopen (filename_xfrm, RTLD_LAZY);
if (*tcti_dl_handle == NULL) {
g_warning ("failed to dlopen library: %s", filename);
return TSS2_TCTI_RC_BAD_VALUE;
}
}
info_func = dlsym (*tcti_dl_handle, TSS2_TCTI_INFO_SYMBOL);
if (info_func == NULL) {
g_warning ("Failed to get reference to symbol: %s", dlerror ());
dlclose (*tcti_dl_handle);
return TSS2_TCTI_RC_BAD_VALUE;
}
*info = info_func ();
return TSS2_RC_SUCCESS;
}
/*
* This function allocates and initializes a TCTI context structure using the
* initialization function in the provide 'info' parameter according to the
* provided configuration string. The caller must deallocate the reference
* returned in the 'context' parameter when TSS2_RC_SUCCESS is returned.
*/
TSS2_RC
tcti_init_from_info (const TSS2_TCTI_INFO *info,
const char *conf,
TSS2_TCTI_CONTEXT **context)
{
TSS2_RC rc = TSS2_RC_SUCCESS;
size_t ctx_size;
g_debug ("%s", __func__);
if (info == NULL || info->init == NULL) {
g_warning ("%s: TCTI_INFO structure or init function pointer is NULL, "
"cannot initialize context.", __func__);
return TSS2_TCTI_RC_BAD_VALUE;
}
rc = info->init (NULL, &ctx_size, NULL);
if (rc != TSS2_RC_SUCCESS) {
g_warning ("failed to get size for device TCTI context structure: "
"0x%x", rc);
goto out;
}
*context = g_malloc0 (ctx_size);
if (*context == NULL) {
g_warning ("failed to allocate memory");
rc = TSS2_TCTI_RC_GENERAL_FAILURE;
goto out;
}
rc = info->init (*context, &ctx_size, conf);
if (rc != TSS2_RC_SUCCESS) {
g_warning ("failed to initialize device TCTI context: 0x%x", rc);
g_free (*context);
*context = NULL;
}
out:
return rc;
}
TSS2_SYS_CONTEXT*
sys_init_from_tcti (TSS2_TCTI_CONTEXT *tcti_ctx)
{
TSS2_SYS_CONTEXT *sapi_ctx;
TSS2_RC rc;
size_t size;
TSS2_ABI_VERSION abi_version = TSS2_ABI_VERSION_CURRENT;
g_debug ("%s", __func__);
size = Tss2_Sys_GetContextSize (0);
g_debug ("TSS2 SYS context size: %zu", size);
sapi_ctx = (TSS2_SYS_CONTEXT*)calloc (1, size);
if (sapi_ctx == NULL) {
g_critical ("Failed to allocate 0x%zx bytes for the SAPI contextn",
size);
return NULL;
}
rc = Tss2_Sys_Initialize (sapi_ctx, size, tcti_ctx, &abi_version);
if (rc != TSS2_RC_SUCCESS) {
g_critical ("Failed to initialize SAPI context: 0x%xn", rc);
free (sapi_ctx);
return NULL;
}
return sapi_ctx;
}
| 32.192308
| 79
| 0.624134
|
[
"transform"
] |
b51f35e84eeec8f88d7242ce956c1672775fe721
| 1,141
|
h
|
C
|
cpp/include/krypto/instruments/server.h
|
krypto-org/krypto
|
13cf9fd70df5da9f4d804d02a0a93c5ebf7142f2
|
[
"Apache-2.0"
] | 1
|
2019-04-09T22:37:46.000Z
|
2019-04-09T22:37:46.000Z
|
cpp/include/krypto/instruments/server.h
|
kapilsh/krypto
|
13cf9fd70df5da9f4d804d02a0a93c5ebf7142f2
|
[
"Apache-2.0"
] | 19
|
2019-06-19T01:15:10.000Z
|
2019-09-06T00:27:20.000Z
|
cpp/include/krypto/instruments/server.h
|
krypto-org/krypto
|
13cf9fd70df5da9f4d804d02a0a93c5ebf7142f2
|
[
"Apache-2.0"
] | null | null | null |
#pragma once
#include <unordered_set>
#include <boost/algorithm/string.hpp>
#include <krypto/utils/common.h>
#include <krypto/network/rpc/worker.h>
#include <krypto/config.h>
#include <krypto/instruments/loader.h>
#include <krypto/serialization/serialization_generated.h>
#include <krypto/utils/types.h>
namespace krypto::instruments {
class Server final {
private:
InstrumentLoader store_;
std::vector<krypto::utils::Instrument> cache_;
std::unique_ptr<zmq::socket_t> socket_;
std::string endpoint_;
std::atomic_bool running_;
flatbuffers::FlatBufferBuilder fb_builder_;
std::unordered_set<std::string> active_instruments_;
std::unordered_set<std::string> sandbox_instruments_;
bool process(const zmq::message_t &msg, krypto::utils::MsgType msg_type);
void process_request(const krypto::serialization::InstrumentRequest *request);
static std::vector<std::string> parse_symbols(std::string list);
public:
Server(zmq::context_t &context, const krypto::Config &config);
void start();
void stop();
};
}
| 26.534884
| 86
| 0.693252
|
[
"vector"
] |
b5285bd48c71cbe3b25241c1bc23d54ec52150c3
| 3,317
|
h
|
C
|
pxr/imaging/hdx/task.h
|
yurivict/USD
|
3b097e3ba8fabf1777a1256e241ea15df83f3065
|
[
"Apache-2.0"
] | 1
|
2022-03-16T01:40:10.000Z
|
2022-03-16T01:40:10.000Z
|
pxr/imaging/hdx/task.h
|
yurivict/USD
|
3b097e3ba8fabf1777a1256e241ea15df83f3065
|
[
"Apache-2.0"
] | null | null | null |
pxr/imaging/hdx/task.h
|
yurivict/USD
|
3b097e3ba8fabf1777a1256e241ea15df83f3065
|
[
"Apache-2.0"
] | 1
|
2018-10-03T19:08:33.000Z
|
2018-10-03T19:08:33.000Z
|
//
// Copyright 2018 Pixar
//
// Licensed under the Apache License, Version 2.0 (the "Apache License")
// with the following modification; you may not use this file except in
// compliance with the Apache License and the following modification to it:
// Section 6. Trademarks. is deleted and replaced with:
//
// 6. Trademarks. This License does not grant permission to use the trade
// names, trademarks, service marks, or product names of the Licensor
// and its affiliates, except as required to comply with Section 4(c) of
// the License and to reproduce the content of the NOTICE file.
//
// You may obtain a copy of the Apache License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the Apache License with the above modification is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the Apache License for the specific
// language governing permissions and limitations under the Apache License.
//
#ifndef PXR_IMAGING_HDX_TASK_H
#define PXR_IMAGING_HDX_TASK_H
#include "pxr/pxr.h"
#include "pxr/imaging/hdx/api.h"
#include "pxr/imaging/hd/task.h"
PXR_NAMESPACE_OPEN_SCOPE
class Hgi;
/// \class HdxTask
///
/// Base class for (some) tasks in Hdx that provides common progressive
/// rendering and Hgi functionality.
///
/// Tasks that require neither progressive rendering nor Hgi can continue to
/// derive directly from HdTask.
///
class HdxTask : public HdTask
{
public:
HDX_API
HdxTask(SdfPath const& id);
HDX_API
~HdxTask() override;
/// This function returns true when a (progressive) task considers its
/// execution results converged. Usually this means that a progressive
/// render delegate is finished rendering into the HdRenderBuffers used by
/// this task.
/// Returns true by default which is a good default for rasterizers.
///
/// Applications with data-driven task lists can determine their convergence
/// state by determining which tasks are HdxTasks and then querying
/// specifically those tasks for IsConverged.
HDX_API
virtual bool IsConverged() const;
/// We override HdTask::Sync, but make it 'final' to make sure derived
/// classes can't override it and instead override _Sync.
/// This 'non-virtual interface'-like pattern allows us to ensure we always
/// initialized Hgi during the Sync task so derived classes don't have to.
void Sync(
HdSceneDelegate* delegate,
HdTaskContext* ctx,
HdDirtyBits* dirtyBits) override final;
protected:
// This is called during the hydra Sync Phase via HdxTask::Sync.
// Please see HdTask::Sync for Sync Phase documentation.
virtual void _Sync(
HdSceneDelegate* delegate,
HdTaskContext* ctx,
HdDirtyBits* dirtyBits) = 0;
// Swaps the color target and colorIntermediate target.
// This is used when a task wishes to read from the color and also write
// to it. We use two color targets and ping-pong between them.
void _ToggleRenderTarget(HdTaskContext* ctx);
// Return pointer to Hydra Graphics Interface.
HDX_API
Hgi* _GetHgi() const;
Hgi* _hgi;
};
PXR_NAMESPACE_CLOSE_SCOPE
#endif
| 33.846939
| 80
| 0.720832
|
[
"render"
] |
b53222d9a6cc967c14a07e3c8c615996f18c4592
| 2,953
|
h
|
C
|
warzone usermode/DirectOverlay.h
|
ALEHACKsp/Warzone-Cheat
|
ed3796bee212aa3229c33ebc562f05d6d4e6815c
|
[
"MIT"
] | 6
|
2021-09-08T17:37:10.000Z
|
2022-01-04T17:17:08.000Z
|
warzone usermode/DirectOverlay.h
|
xEnething/Warzone-Cheat
|
ed3796bee212aa3229c33ebc562f05d6d4e6815c
|
[
"MIT"
] | null | null | null |
warzone usermode/DirectOverlay.h
|
xEnething/Warzone-Cheat
|
ed3796bee212aa3229c33ebc562f05d6d4e6815c
|
[
"MIT"
] | 5
|
2021-09-24T17:10:59.000Z
|
2022-01-26T05:20:50.000Z
|
#include "driver.h"
#include <iostream>
#include "xor.hpp"
#include <vector>
#include <string>
#include <d3d9.h>
#include <d3dx9.h>
// Link the static library (make sure that file is in the same directory as this file)
#pragma comment(lib, "D2DOverlay.lib")
// Requires the targetted window to be active and the foreground window to draw.
#define D2DOV_REQUIRE_FOREGROUND (1 << 0)
// Draws the FPS of the overlay in the top-right corner
#define D2DOV_DRAW_FPS (1 << 1)
// Attempts to limit the frametimes so you don't render at 500fps
#define D2DOV_VSYNC (1 << 2)
// Sets the text font to Calibri
#define D2DOV_FONT_CALIBRI (1 << 3)
// Sets the text font to Arial
#define D2DOV_FONT_ARIAL (1 << 4)
// Sets the text font to Courier
#define D2DOV_FONT_COURIER (1 << 5)
// Sets the text font to Gabriola
#define D2DOV_FONT_GABRIOLA (1 << 6)
// Sets the text font to Impact
#define D2DOV_FONT_IMPACT (1 << 7)
// The function you call to set up the above options. Make sure its called before the DirectOverlaySetup function
void DirectOverlaySetOption(DWORD option);
// typedef for the callback function, where you'll do the drawing.
typedef void(*DirectOverlayCallback)(int width, int height);
// Initializes a the overlay window, and the thread to run it. Input your callback function.
// Uses the first window in the current process to target. If you're external, use the next function
void DirectOverlaySetup(DirectOverlayCallback callbackFunction);
// Used to specify the window manually, to be used with externals.
void DirectOverlaySetup(DirectOverlayCallback callbackFunction, HWND targetWindow);
// Draws a line from (x1, y1) to (x2, y2), with a specified thickness.
// Specify the color, and optionally an alpha for the line.
void DrawLine(float x1, float y1, float x2, float y2, float thickness, float r, float g, float b, float a = 1);
// Draws a rectangle on the screen. Width and height are relative to the coordinates of the box.
// Use the "filled" bool to make it a solid rectangle; ignore the thickness.
// To just draw the border around the rectangle, specify a thickness and pass "filled" as false.
void DrawBox(float x, float y, float width, float height, float thickness, float r, float g, float b, float a, bool filled);
// Draws a circle. As with the DrawBox, the "filled" bool will make it a solid circle, and thickness is only used when filled=false.
void DrawCircle(float x, float y, float radius, float thickness, float r, float g, float b, float a, bool filled);
// Allows you to draw an elipse. Same as a circle, except you have two different radii, for width and height.
void DrawEllipse(float x, float y, float width, float height, float thickness, float r, float g, float b, float a, bool filled);
// Draw a string on the screen. Input is in the form of an std::string.
void DrawString(std::string str, float fontSize, float x, float y, float r, float g, float b, float a = 1);
| 45.430769
| 133
| 0.740941
|
[
"render",
"vector",
"solid"
] |
b53e8c25b5fa02f9f46c675c9e80a60be003f9bc
| 3,361
|
h
|
C
|
GCanvas/ios/Classes/GCVCommon.h
|
nicholasalx/GCanvas
|
b65dc955e62cad69b65cad35c1e7c920eaf9cdfd
|
[
"Apache-2.0"
] | 1,780
|
2018-02-26T12:39:01.000Z
|
2022-03-31T05:26:52.000Z
|
GCanvas/ios/Classes/GCVCommon.h
|
nicholasalx/GCanvas
|
b65dc955e62cad69b65cad35c1e7c920eaf9cdfd
|
[
"Apache-2.0"
] | 163
|
2018-02-28T09:46:00.000Z
|
2022-02-12T03:39:27.000Z
|
GCanvas/ios/Classes/GCVCommon.h
|
nicholasalx/GCanvas
|
b65dc955e62cad69b65cad35c1e7c920eaf9cdfd
|
[
"Apache-2.0"
] | 208
|
2018-03-01T13:12:02.000Z
|
2022-03-24T06:50:00.000Z
|
/**
* Created by G-Canvas Open Source Team.
* Copyright (c) 2017, Alibaba, Inc. All rights reserved.
*
* This source code is licensed under the Apache Licence 2.0.
* For the full copyright and license information, please view
* the LICENSE file in the root directory of this source tree.
*/
#ifndef GCVCommon_h
#define GCVCommon_h
#import <Foundation/Foundation.h>
#import <GLKit/GLKit.h>
#import <CoreText/CoreText.h>
#import "GCVLog.h"
#define GCVWeakSelf __weak __typeof(self) weakSelf = self;
#define GCVStrongSelf __strong __typeof(weakSelf) strongSelf = weakSelf;
#define GCVStrongSelfSafe GCVStrongSelf;if (!strongSelf) return;
#define GCVSharedInstanceIMP \
static id sharedInstance = nil; \
static dispatch_once_t onceToken; \
dispatch_once(&onceToken, ^{ \
sharedInstance = [[self alloc] init]; \
}); \
return sharedInstance;
/**
* Called when image load finished.
* @param image The current image
* @param error error
* @param finished image load reuslt
* @param imageURL URL
*/
typedef void(^GCVLoadImageCompletion)(UIImage *image, NSError *error, BOOL finished, NSURL *imageURL);
/**
* GCVImageLoaderProtocol, definiton load image Protocol.
* plugin implement use SDWebImage
*/
@protocol GCVImageLoaderProtocol
@optional
/**
* Load image from url, call completion.
* @param url image url
* @param completion GCVLoadImageCompletion
*/
- (void)loadImage:(NSURL*)url completed:(GCVLoadImageCompletion)completion;
@end
/**
* GCVImageCache, image relevant class with property, UIImage、textureId and id form JS image
*/
@interface GCVImageCache : NSObject
@property(nonatomic, assign) CGFloat width;
@property(nonatomic, assign) CGFloat height;
@property(nonatomic, strong) UIImage* image;
/**
* UIImage bind to textureId, see GCVCommon:bindTexture.
*/
//@property(nonatomic, assign) GLuint textureId;
/**
* An auto increase id, match with imageStr, from JavaScript
*/
@property (nonatomic, assign) NSUInteger jsTextreId;
@end
@interface GCVCommon : NSObject
/**
* GCVImageLoaderProtocol delegate
*/
@property(nonatomic, weak) id<GCVImageLoaderProtocol> imageLoader;
+ (instancetype)sharedInstance;
/**
* Bind a UIImage to a textureId.
* @param image current image
*
* return textureId
*/
+ (GLuint)bindTexture:(UIImage *)image;
/**
* Create image with text 2d textureId.
* @param target [in] textureId
* @param image [out] UIImage
*/
//+ (void)textImage2D:(GLenum)target withImage:(UIImage *)image;
/**
* Preload image with imageStr and idFromJS, when image load finished call completion with GCVImageCache.
* @param imageStr current image source
* @param completion callback preload
*/
- (void)addPreLoadImage:(NSString *)imageStr completion:(void (^)(GCVImageCache*, BOOL))completion;
/**
* Fetch cached GCVImageCache with imageStr.
* @param imageStrkey current image source as key, format:instance_url
*
* return GCVImageCache object if exist, or nil
*/
- (GCVImageCache *)fetchLoadImage:(NSString *)imageStrkey;
/**
* remove cached GCVImageCache with imageStr.
* @param imageStrKey current image source as key, format:instance_url
*
*/
- (void)removeLoadImage:(NSString*)imageStrKey;
/**
* Clear all preload and hadload image cache.
*/
- (void)clearLoadImageDict;
@end
#endif /* GCVCommon_h */
| 24.179856
| 105
| 0.725974
|
[
"object"
] |
b53f26f56ed19a9f1c92a259442940f44bd6e7ff
| 647
|
h
|
C
|
JRPG/JRPG/BattleTick.h
|
JackMalone1/JRPG
|
f148b8f9ccef86d95dd8ccc165167ece83c3b0ea
|
[
"MIT"
] | 1
|
2020-05-18T18:51:16.000Z
|
2020-05-18T18:51:16.000Z
|
JRPG/JRPG/BattleTick.h
|
JackMalone1/JRPG
|
f148b8f9ccef86d95dd8ccc165167ece83c3b0ea
|
[
"MIT"
] | null | null | null |
JRPG/JRPG/BattleTick.h
|
JackMalone1/JRPG
|
f148b8f9ccef86d95dd8ccc165167ece83c3b0ea
|
[
"MIT"
] | null | null | null |
#pragma once
#include "IState.h"
#include "StateMachine.h"
#include "Action.h"
class BattleTick :
public IState
{
StateMachine& states;
std::vector<std::shared_ptr<Action>>& actions;
public:
BattleTick(StateMachine& stateMachine, std::vector<std::shared_ptr<Action>>& t_actions);
virtual void update(float dt) override;
virtual void render(sf::RenderWindow& window) override;
virtual void onEnter() override;
virtual void onEnter(std::string mapName) override;
virtual void onEnter(std::vector<std::shared_ptr<Entity>> t_entities) override;
virtual void onEnter(std::shared_ptr<Action>) override {};
virtual void onExit() override;
};
| 30.809524
| 89
| 0.763524
|
[
"render",
"vector"
] |
b53f98b80322a537fe2d275bb1b5eee9fb78eaf3
| 44,553
|
c
|
C
|
test/vstore/src/MountContentTypeRefs.c
|
uhayat/corto
|
159c741a7a8cd3b0ecfd57d40e91aaa5ad1a1a06
|
[
"MIT"
] | 95
|
2015-09-24T02:08:51.000Z
|
2021-01-27T05:15:12.000Z
|
test/vstore/src/MountContentTypeRefs.c
|
uhayat/corto
|
159c741a7a8cd3b0ecfd57d40e91aaa5ad1a1a06
|
[
"MIT"
] | 368
|
2015-08-27T03:58:07.000Z
|
2018-11-27T18:32:35.000Z
|
test/vstore/src/MountContentTypeRefs.c
|
Seldomberry/cortex
|
159c741a7a8cd3b0ecfd57d40e91aaa5ad1a1a06
|
[
"MIT"
] | 16
|
2015-11-05T06:00:15.000Z
|
2019-04-03T19:27:21.000Z
|
/* This is a managed file. Do not delete this comment. */
#include <include/test.h>
void test_MountContentTypeRefs_setup(
test_MountContentTypeRefs this)
{
corto_create(root_o, "config/helloworld", corto_void_o);
corto_create(root_o, "data/foo", corto_void_o);
corto_create(root_o, "data/foo/bar", corto_void_o);
corto_create(root_o, "data/foo/bar/hello", corto_void_o);
corto_create(root_o, "data/foo/bar/hello/world", corto_void_o);
corto_create(root_o, "data/foo/bar/hello/world/test", corto_void_o);
}
void test_MountContentTypeRefs_tc_selectDataFromMountAtData(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("refs")
.from("/data")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo\","
"\"inside_from\":\"foo/bar\","
"\"inside_from_nested1\":\"foo/bar/hello\","
"\"inside_from_nested2\":\"foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, ".");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectDataFromMountAtDataNested1(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data/parent");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("parent/refs")
.from("/data")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"parent/foo\","
"\"inside_from\":\"parent/foo/bar\","
"\"inside_from_nested1\":\"parent/foo/bar/hello\","
"\"inside_from_nested2\":\"parent/foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\".\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, "parent");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectDataFromMountAtDataNested2(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data/grandparent/parent");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("grandparent/parent/refs")
.from("/data")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"grandparent/parent/foo\","
"\"inside_from\":\"grandparent/parent/foo/bar\","
"\"inside_from_nested1\":\"grandparent/parent/foo/bar/hello\","
"\"inside_from_nested2\":\"grandparent/parent/foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\".\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, "grandparent/parent");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectDataFromMountAtRoot(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("refs")
.from("/")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo\","
"\"inside_from\":\"foo/bar\","
"\"inside_from_nested1\":\"foo/bar/hello\","
"\"inside_from_nested2\":\"foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, ".");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectDataNested1FromMountAtData(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("../refs")
.from("/data/foo")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\".\","
"\"inside_from\":\"bar\","
"\"inside_from_nested1\":\"bar/hello\","
"\"inside_from_nested2\":\"bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, "..");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectDataNested1FromMountAtDataNested1(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data/parent");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("refs")
.from("/data/parent")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo\","
"\"inside_from\":\"foo/bar\","
"\"inside_from_nested1\":\"foo/bar/hello\","
"\"inside_from_nested2\":\"foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, ".");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectDataNested1FromMountAtDataNested2(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data/grandparent/parent");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("parent/refs")
.from("/data/grandparent")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"parent/foo\","
"\"inside_from\":\"parent/foo/bar\","
"\"inside_from_nested1\":\"parent/foo/bar/hello\","
"\"inside_from_nested2\":\"parent/foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, "parent");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectDataNested1FromMountAtRoot(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("../../refs")
.from("/data/parent")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"/foo\","
"\"inside_from\":\"/foo/bar\","
"\"inside_from_nested1\":\"/foo/bar/hello\","
"\"inside_from_nested2\":\"/foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, "../..");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectDataNested2FromMountAtData(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("../../refs")
.from("/data/foo/bar")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"/data/foo\","
"\"inside_from\":\".\","
"\"inside_from_nested1\":\"hello\","
"\"inside_from_nested2\":\"hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, "../..");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectDataNested2FromMountAtDataNested1(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data/parent");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("../refs")
.from("/data/parent/foo")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\".\","
"\"inside_from\":\"bar\","
"\"inside_from_nested1\":\"bar/hello\","
"\"inside_from_nested2\":\"bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, "..");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectDataNested2FromMountAtDataNested2(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data/grandparent/parent");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("refs")
.from("/data/grandparent/parent")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo\","
"\"inside_from\":\"foo/bar\","
"\"inside_from_nested1\":\"foo/bar/hello\","
"\"inside_from_nested2\":\"foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, ".");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectDataNested2FromMountAtRoot(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("../../../refs")
.from("/data/foo/bar")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"/foo\","
"\"inside_from\":\"/foo/bar\","
"\"inside_from_nested1\":\"/foo/bar/hello\","
"\"inside_from_nested2\":\"/foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, "../../..");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectRootFromMountAtData(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("data/refs")
.from("/")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"config\","
"\"nested_sibling\":\"config/helloworld\","
"\"at_from\":\"data/foo\","
"\"inside_from\":\"data/foo/bar\","
"\"inside_from_nested1\":\"data/foo/bar/hello\","
"\"inside_from_nested2\":\"data/foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"data\","
"\"from\":\"data\","
"\"root\":\".\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, "data");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectRootFromMountAtDataNested1(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data/parent");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("data/parent/refs")
.from("/")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"config\","
"\"nested_sibling\":\"config/helloworld\","
"\"at_from\":\"data/parent/foo\","
"\"inside_from\":\"data/parent/foo/bar\","
"\"inside_from_nested1\":\"data/parent/foo/bar/hello\","
"\"inside_from_nested2\":\"data/parent/foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"data\","
"\"from\":\"data\","
"\"root\":\".\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, "data/parent");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectRootFromMountAtDataNested2(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/data/grandparent/parent");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("data/grandparent/parent/refs")
.from("/")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
char *expect =
"{\"sibling\":\"config\","
"\"nested_sibling\":\"config/helloworld\","
"\"at_from\":\"data/grandparent/parent/foo\","
"\"inside_from\":\"data/grandparent/parent/foo/bar\","
"\"inside_from_nested1\":\"data/grandparent/parent/foo/bar/hello\","
"\"inside_from_nested2\":\"data/grandparent/parent/foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"data\","
"\"from\":\"data\","
"\"root\":\".\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, "data/grandparent/parent");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_selectRootFromMountAtRoot(
test_MountContentTypeRefs this)
{
test_RefMount m = test_RefMount__create(NULL, NULL, "/");
test_assert(m != NULL);
ut_iter it;
int16_t ret = corto_select("refs")
.from("/")
.format("text/json")
.iter(&it);
test_assert(ret == 0);
/* Because the mount point and 'from' are equal, no conversion is performed.
* This has as side-effect that the absolute references in the value are not
* turned into relative references. This may be counter-intuitive, but is
* still 'correct', as the identifiers still unambiguously refer to the
* right object. */
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo\","
"\"inside_from\":\"foo/bar\","
"\"inside_from_nested1\":\"foo/bar/hello\","
"\"inside_from_nested2\":\"foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assert(ut_iter_hasNext(&it) != 0);
corto_record *r = ut_iter_next(&it);
test_assert(r != NULL);
test_assertstr(r->id, "refs");
test_assertstr(r->parent, ".");
test_assertstr(r->type, "test/Refs");
test_assertstr((char*)r->value, expect);
test_assert(ut_iter_hasNext(&it) == 0);
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataNested1ToMountAtData(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"bar\","
"\"inside_from\":\"bar/hello\","
"\"inside_from_nested1\":\"bar/hello/world\","
"\"inside_from_nested2\":\"bar/hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo/bar\","
"\"inside_from\":\"foo/bar/hello\","
"\"inside_from_nested1\":\"foo/bar/hello/world\","
"\"inside_from_nested2\":\"foo/bar/hello/world/test\","
"\"parent_from\":\".\","
"\"at_root\":\".\","
"\"from\":\"foo\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data/foo", "obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, "foo");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataNested1ToMountAtDataNested1(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data/foo")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"bar\","
"\"inside_from\":\"bar/hello\","
"\"inside_from_nested1\":\"bar/hello/world\","
"\"inside_from_nested2\":\"bar/hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"bar\","
"\"inside_from\":\"bar/hello\","
"\"inside_from_nested1\":\"bar/hello/world\","
"\"inside_from_nested2\":\"bar/hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data/foo", "obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, ".");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataNested1ToMountAtDataNested2(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data/foo/bar")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"bar\","
"\"inside_from\":\"bar/hello\","
"\"inside_from_nested1\":\"bar/hello/world\","
"\"inside_from_nested2\":\"bar/hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\".\","
"\"inside_from\":\"hello\","
"\"inside_from_nested1\":\"hello/world\","
"\"inside_from_nested2\":\"hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\"/data/foo\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data/foo", "bar/obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, ".");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataNested1ToMountAtRoot(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"bar\","
"\"inside_from\":\"bar/hello\","
"\"inside_from_nested1\":\"bar/hello/world\","
"\"inside_from_nested2\":\"bar/hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"config\","
"\"nested_sibling\":\"config/helloworld\","
"\"at_from\":\"data/foo/bar\","
"\"inside_from\":\"data/foo/bar/hello\","
"\"inside_from_nested1\":\"data/foo/bar/hello/world\","
"\"inside_from_nested2\":\"data/foo/bar/hello/world/test\","
"\"parent_from\":\"data\","
"\"at_root\":\"data\","
"\"from\":\"data/foo\","
"\"root\":\".\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data/foo", "obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, "data/foo");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataNested2ToMountAtData(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\".\","
"\"inside_from\":\"hello\","
"\"inside_from_nested1\":\"hello/world\","
"\"inside_from_nested2\":\"hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo/bar\","
"\"inside_from\":\"foo/bar/hello\","
"\"inside_from_nested1\":\"foo/bar/hello/world\","
"\"inside_from_nested2\":\"foo/bar/hello/world/test\","
"\"parent_from\":\".\","
"\"at_root\":\".\","
"\"from\":\"foo/bar\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data/foo/bar", "obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, "foo/bar");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataNested2ToMountAtDataNested1(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data/foo")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\".\","
"\"inside_from\":\"hello\","
"\"inside_from_nested1\":\"hello/world\","
"\"inside_from_nested2\":\"hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"bar\","
"\"inside_from\":\"bar/hello\","
"\"inside_from_nested1\":\"bar/hello/world\","
"\"inside_from_nested2\":\"bar/hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\"bar\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data/foo/bar", "obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, "bar");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataNested2ToMountAtDataNested2(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data/foo/bar")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\".\","
"\"inside_from\":\"hello\","
"\"inside_from_nested1\":\"hello/world\","
"\"inside_from_nested2\":\"hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\".\","
"\"inside_from\":\"hello\","
"\"inside_from_nested1\":\"hello/world\","
"\"inside_from_nested2\":\"hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data/foo/bar", "obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, ".");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataNested2ToMountAtRoot(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\".\","
"\"inside_from\":\"hello\","
"\"inside_from_nested1\":\"hello/world\","
"\"inside_from_nested2\":\"hello/world/test\","
"\"parent_from\":\"/data\","
"\"at_root\":\"/data\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"config\","
"\"nested_sibling\":\"config/helloworld\","
"\"at_from\":\"data/foo/bar\","
"\"inside_from\":\"data/foo/bar/hello\","
"\"inside_from_nested1\":\"data/foo/bar/hello/world\","
"\"inside_from_nested2\":\"data/foo/bar/hello/world/test\","
"\"parent_from\":\"data\","
"\"at_root\":\"data\","
"\"from\":\"data/foo/bar\","
"\"root\":\".\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data/foo/bar", "obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, "data/foo/bar");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataToMountAtData(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo\","
"\"inside_from\":\"foo/bar\","
"\"inside_from_nested1\":\"foo/bar/hello\","
"\"inside_from_nested2\":\"foo/bar/hello/world\","
"\"parent_from\":\"/\","
"\"at_root\":\"/data\"," /* Deliberately use full path to validate that
* no serialization takes place (should
* otherwise be '.') */
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo\","
"\"inside_from\":\"foo/bar\","
"\"inside_from_nested1\":\"foo/bar/hello\","
"\"inside_from_nested2\":\"foo/bar/hello/world\","
"\"parent_from\":\"/\","
"\"at_root\":\"/data\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data", "obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, ".");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataToMountAtDataNested1(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data/foo")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo\","
"\"inside_from\":\"foo/bar\","
"\"inside_from_nested1\":\"foo/bar/hello\","
"\"inside_from_nested2\":\"foo/bar/hello/world\","
"\"parent_from\":\"/\","
"\"at_root\":\".\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\".\","
"\"inside_from\":\"bar\","
"\"inside_from_nested1\":\"bar/hello\","
"\"inside_from_nested2\":\"bar/hello/world\","
"\"parent_from\":\"/\","
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data", "foo/obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, ".");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataToMountAtDataNested2(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data/foo/bar")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo\","
"\"inside_from\":\"foo/bar\","
"\"inside_from_nested1\":\"foo/bar/hello\","
"\"inside_from_nested2\":\"foo/bar/hello/world\","
"\"parent_from\":\"/\","
"\"at_root\":\".\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"/data/foo\","
"\"inside_from\":\".\","
"\"inside_from_nested1\":\"hello\","
"\"inside_from_nested2\":\"hello/world\","
"\"parent_from\":\"/\","
"\"at_root\":\"/data\","
"\"from\":\"/data\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data", "foo/bar/obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, ".");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromDataToMountAtRoot(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("data/*")
.from("/")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"foo\","
"\"inside_from\":\"foo/bar\","
"\"inside_from_nested1\":\"foo/bar/hello\","
"\"inside_from_nested2\":\"foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\".\","
"\"from\":\".\","
"\"root\":\"/\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"config\","
"\"nested_sibling\":\"config/helloworld\","
"\"at_from\":\"data/foo\","
"\"inside_from\":\"data/foo/bar\","
"\"inside_from_nested1\":\"data/foo/bar/hello\","
"\"inside_from_nested2\":\"data/foo/bar/hello/world\","
"\"parent_from\":null,"
"\"at_root\":\"data\","
"\"from\":\"data\","
"\"root\":\".\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/data", "obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, "data");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromRootToMountAtData(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"config\","
"\"nested_sibling\":\"config/helloworld\","
"\"at_from\":\"data\","
"\"inside_from\":\"data/foo\","
"\"inside_from_nested1\":\"data/foo/bar\","
"\"inside_from_nested2\":\"data/foo/bar/hello\","
"\"parent_from\":null,"
"\"at_root\":\"data\","
"\"from\":\".\","
"\"root\":\".\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\".\","
"\"inside_from\":\"foo\","
"\"inside_from_nested1\":\"foo/bar\","
"\"inside_from_nested2\":\"foo/bar/hello\","
"\"parent_from\":null,"
"\"at_root\":\".\","
"\"from\":\"/\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/", "/data/obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, ".");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromRootToMountAtDataNested1(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data/foo")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"config\","
"\"nested_sibling\":\"config/helloworld\","
"\"at_from\":\"data\","
"\"inside_from\":\"data/foo\","
"\"inside_from_nested1\":\"data/foo/bar\","
"\"inside_from_nested2\":\"data/foo/bar/hello\","
"\"parent_from\":null,"
"\"at_root\":\"data\","
"\"from\":\".\","
"\"root\":\".\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"/data\","
"\"inside_from\":\".\","
"\"inside_from_nested1\":\"bar\","
"\"inside_from_nested2\":\"bar/hello\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/", "data/foo/obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, ".");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromRootToMountAtDataNested2(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/data/foo/bar")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"config\","
"\"nested_sibling\":\"config/helloworld\","
"\"at_from\":\"data\","
"\"inside_from\":\"data/foo\","
"\"inside_from_nested1\":\"data/foo/bar\","
"\"inside_from_nested2\":\"data/foo/bar/hello\","
"\"parent_from\":null,"
"\"at_root\":\"data\","
"\"from\":\".\","
"\"root\":\".\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"/config\","
"\"nested_sibling\":\"/config/helloworld\","
"\"at_from\":\"/data\","
"\"inside_from\":\"/data/foo\","
"\"inside_from_nested1\":\".\","
"\"inside_from_nested2\":\"hello\","
"\"parent_from\":null,"
"\"at_root\":\"/data\","
"\"from\":\"/\","
"\"root\":\"/\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/", "data/foo/bar/obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, ".");
test_assert(corto_delete(m) == 0);
}
void test_MountContentTypeRefs_tc_publishFromRootToMountAtRoot(
test_MountContentTypeRefs this)
{
test_RefMount m = (test_RefMount)corto_subscribe("*")
.from("/")
.queue()
.mount(test_RefMount_o, NULL);
test_assert(m != NULL);
char *json =
"{\"sibling\":\"config\","
"\"nested_sibling\":\"config/helloworld\","
"\"at_from\":\"data\","
"\"inside_from\":\"data/foo\","
"\"inside_from_nested1\":\"data/foo/bar\","
"\"inside_from_nested2\":\"data/foo/bar/hello\","
"\"parent_from\":null,"
"\"at_root\":\"data\","
"\"from\":\".\","
"\"root\":\".\","
"\"null_ref\":null}";
char *expect =
"{\"sibling\":\"config\","
"\"nested_sibling\":\"config/helloworld\","
"\"at_from\":\"data\","
"\"inside_from\":\"data/foo\","
"\"inside_from_nested1\":\"data/foo/bar\","
"\"inside_from_nested2\":\"data/foo/bar/hello\","
"\"parent_from\":null,"
"\"at_root\":\"data\","
"\"from\":\".\","
"\"root\":\".\","
"\"null_ref\":null}";
test_assertstr(m->last_json, NULL);
test_assert(
corto_publish(
CORTO_UPDATE, "/", "data/obj", "test/Refs", "text/json", json) == 0);
test_assertstr(m->last_json, expect);
test_assertstr(m->last_id, "obj");
test_assertstr(m->last_parent, "data");
test_assert(corto_delete(m) == 0);
}
| 31.134172
| 89
| 0.540884
|
[
"object"
] |
b54b98ca9d5e9e9d964c44d5aff30a38b1606f37
| 1,237
|
h
|
C
|
GraphicsTechniquesInSiv3D/ShadowMapping.h
|
tana/GraphicsTechniquesInSiv3D
|
a129a673eefdf9a7691338593b914f0432e6ff11
|
[
"MIT"
] | 1
|
2021-12-12T09:22:33.000Z
|
2021-12-12T09:22:33.000Z
|
GraphicsTechniquesInSiv3D/ShadowMapping.h
|
tana/GraphicsTechniquesInSiv3D
|
a129a673eefdf9a7691338593b914f0432e6ff11
|
[
"MIT"
] | null | null | null |
GraphicsTechniquesInSiv3D/ShadowMapping.h
|
tana/GraphicsTechniquesInSiv3D
|
a129a673eefdf9a7691338593b914f0432e6ff11
|
[
"MIT"
] | null | null | null |
#pragma once
# include <Siv3D.hpp>
# include "utils.h"
// シャドウマッピングで影をつける
// ベースはチュートリアル37.1「3Dモデルを描く」(下記のURL)
// https://zenn.dev/reputeless/books/siv3d-documentation/viewer/tutorial-3d-2#37.1-3d-%E3%83%A2%E3%83%87%E3%83%AB%E3%82%92%E6%8F%8F%E3%81%8F
// 描画時にシェーダに渡す情報
struct VSShadowMapping
{
Mat4x4 sunCameraMatrix;
};
class ShadowMapping : public SceneManager<String>::Scene
{
public:
ShadowMapping(const InitData& initData);
void update() override;
void draw() const override;
private:
const ColorF backgroundColor;
const Mesh groundPlane;
const Texture groundTexture;
const Model blacksmithModel, millModel, treeModel, pineModel, siv3dkunModel;
const MSRenderTexture renderTexture;
const RenderTexture shadowMapTexture; // シャドウマップ
const VertexShader shadowMapGenerationVS; // シャドウマップ生成用の頂点シェーダ
const PixelShader shadowMapGenerationPS; // シャドウマップ生成用のピクセルシェーダ
const VertexShader shadowMappingVS; // 影を付けて描画するための頂点シェーダ
const PixelShader shadowMappingPS; // 影を付けて描画するためのピクセルシェーダ
ConstantBuffer<VSShadowMapping> cbVSShadowMapping; // 描画時に渡す定数バッファ
DebugCamera3D camera;
Vec3 sunPosition;
Mat4x4 worldToSunMatrix;
Mat4x4 sunProjMatrix;
Mat4x4 sunCameraMatrix;
// 各3Dモデルを描画する
void drawModels() const;
};
| 25.244898
| 142
| 0.790622
|
[
"mesh",
"model",
"3d"
] |
b54eec1f2846f829c9ccf32767900c7a77fc0023
| 425,518
|
h
|
C
|
proto/test_models.h
|
backwardn/FastBinaryEncoding
|
ba66e57951f19047491d3befea9b580f247a694d
|
[
"MIT"
] | null | null | null |
proto/test_models.h
|
backwardn/FastBinaryEncoding
|
ba66e57951f19047491d3befea9b580f247a694d
|
[
"MIT"
] | null | null | null |
proto/test_models.h
|
backwardn/FastBinaryEncoding
|
ba66e57951f19047491d3befea9b580f247a694d
|
[
"MIT"
] | null | null | null |
// Automatically generated by the Fast Binary Encoding compiler, do not modify!
// https://github.com/chronoxor/FastBinaryEncoding
// Source: test.fbe
// Version: 1.3.0.0
#pragma once
#if defined(__clang__)
#pragma clang system_header
#elif defined(__GNUC__)
#pragma GCC system_header
#elif defined(_MSC_VER)
#pragma system_header
#endif
#include "test.h"
#include "proto_models.h"
namespace FBE {
// Fast Binary Encoding ::test::EnumSimple field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::EnumSimple> : public FieldModelBase<TBuffer, ::test::EnumSimple, int32_t>
{
public:
using FieldModelBase<TBuffer, ::test::EnumSimple, int32_t>::FieldModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::EnumSimple final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::EnumSimple> : public FinalModelBase<TBuffer, ::test::EnumSimple, int32_t>
{
public:
using FinalModelBase<TBuffer, ::test::EnumSimple, int32_t>::FinalModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::EnumTyped field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::EnumTyped> : public FieldModelBase<TBuffer, ::test::EnumTyped, uint8_t>
{
public:
using FieldModelBase<TBuffer, ::test::EnumTyped, uint8_t>::FieldModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::EnumTyped final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::EnumTyped> : public FinalModelBase<TBuffer, ::test::EnumTyped, uint8_t>
{
public:
using FinalModelBase<TBuffer, ::test::EnumTyped, uint8_t>::FinalModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::EnumEmpty field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::EnumEmpty> : public FieldModelBase<TBuffer, ::test::EnumEmpty, int32_t>
{
public:
using FieldModelBase<TBuffer, ::test::EnumEmpty, int32_t>::FieldModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::EnumEmpty final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::EnumEmpty> : public FinalModelBase<TBuffer, ::test::EnumEmpty, int32_t>
{
public:
using FinalModelBase<TBuffer, ::test::EnumEmpty, int32_t>::FinalModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::FlagsSimple field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::FlagsSimple> : public FieldModelBase<TBuffer, ::test::FlagsSimple, int32_t>
{
public:
using FieldModelBase<TBuffer, ::test::FlagsSimple, int32_t>::FieldModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::FlagsSimple final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::FlagsSimple> : public FinalModelBase<TBuffer, ::test::FlagsSimple, int32_t>
{
public:
using FinalModelBase<TBuffer, ::test::FlagsSimple, int32_t>::FinalModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::FlagsTyped field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::FlagsTyped> : public FieldModelBase<TBuffer, ::test::FlagsTyped, uint64_t>
{
public:
using FieldModelBase<TBuffer, ::test::FlagsTyped, uint64_t>::FieldModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::FlagsTyped final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::FlagsTyped> : public FinalModelBase<TBuffer, ::test::FlagsTyped, uint64_t>
{
public:
using FinalModelBase<TBuffer, ::test::FlagsTyped, uint64_t>::FinalModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::FlagsEmpty field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::FlagsEmpty> : public FieldModelBase<TBuffer, ::test::FlagsEmpty, int32_t>
{
public:
using FieldModelBase<TBuffer, ::test::FlagsEmpty, int32_t>::FieldModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::FlagsEmpty final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::FlagsEmpty> : public FinalModelBase<TBuffer, ::test::FlagsEmpty, int32_t>
{
public:
using FinalModelBase<TBuffer, ::test::FlagsEmpty, int32_t>::FinalModelBase;
};
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructSimple field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructSimple>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, id(buffer, 4 + 4)
, f1(buffer, id.fbe_offset() + id.fbe_size())
, f2(buffer, f1.fbe_offset() + f1.fbe_size())
, f3(buffer, f2.fbe_offset() + f2.fbe_size())
, f4(buffer, f3.fbe_offset() + f3.fbe_size())
, f5(buffer, f4.fbe_offset() + f4.fbe_size())
, f6(buffer, f5.fbe_offset() + f5.fbe_size())
, f7(buffer, f6.fbe_offset() + f6.fbe_size())
, f8(buffer, f7.fbe_offset() + f7.fbe_size())
, f9(buffer, f8.fbe_offset() + f8.fbe_size())
, f10(buffer, f9.fbe_offset() + f9.fbe_size())
, f11(buffer, f10.fbe_offset() + f10.fbe_size())
, f12(buffer, f11.fbe_offset() + f11.fbe_size())
, f13(buffer, f12.fbe_offset() + f12.fbe_size())
, f14(buffer, f13.fbe_offset() + f13.fbe_size())
, f15(buffer, f14.fbe_offset() + f14.fbe_size())
, f16(buffer, f15.fbe_offset() + f15.fbe_size())
, f17(buffer, f16.fbe_offset() + f16.fbe_size())
, f18(buffer, f17.fbe_offset() + f17.fbe_size())
, f19(buffer, f18.fbe_offset() + f18.fbe_size())
, f20(buffer, f19.fbe_offset() + f19.fbe_size())
, f21(buffer, f20.fbe_offset() + f20.fbe_size())
, f22(buffer, f21.fbe_offset() + f21.fbe_size())
, f23(buffer, f22.fbe_offset() + f22.fbe_size())
, f24(buffer, f23.fbe_offset() + f23.fbe_size())
, f25(buffer, f24.fbe_offset() + f24.fbe_size())
, f26(buffer, f25.fbe_offset() + f25.fbe_size())
, f27(buffer, f26.fbe_offset() + f26.fbe_size())
, f28(buffer, f27.fbe_offset() + f27.fbe_size())
, f29(buffer, f28.fbe_offset() + f28.fbe_size())
, f30(buffer, f29.fbe_offset() + f29.fbe_size())
, f31(buffer, f30.fbe_offset() + f30.fbe_size())
, f32(buffer, f31.fbe_offset() + f31.fbe_size())
, f33(buffer, f32.fbe_offset() + f32.fbe_size())
, f34(buffer, f33.fbe_offset() + f33.fbe_size())
, f35(buffer, f34.fbe_offset() + f34.fbe_size())
, f36(buffer, f35.fbe_offset() + f35.fbe_size())
, f37(buffer, f36.fbe_offset() + f36.fbe_size())
, f38(buffer, f37.fbe_offset() + f37.fbe_size())
, f39(buffer, f38.fbe_offset() + f38.fbe_size())
, f40(buffer, f39.fbe_offset() + f39.fbe_size())
, f41(buffer, f40.fbe_offset() + f40.fbe_size())
, f42(buffer, f41.fbe_offset() + f41.fbe_size())
, f43(buffer, f42.fbe_offset() + f42.fbe_size())
, f44(buffer, f43.fbe_offset() + f43.fbe_size())
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
+ id.fbe_size()
+ f1.fbe_size()
+ f2.fbe_size()
+ f3.fbe_size()
+ f4.fbe_size()
+ f5.fbe_size()
+ f6.fbe_size()
+ f7.fbe_size()
+ f8.fbe_size()
+ f9.fbe_size()
+ f10.fbe_size()
+ f11.fbe_size()
+ f12.fbe_size()
+ f13.fbe_size()
+ f14.fbe_size()
+ f15.fbe_size()
+ f16.fbe_size()
+ f17.fbe_size()
+ f18.fbe_size()
+ f19.fbe_size()
+ f20.fbe_size()
+ f21.fbe_size()
+ f22.fbe_size()
+ f23.fbe_size()
+ f24.fbe_size()
+ f25.fbe_size()
+ f26.fbe_size()
+ f27.fbe_size()
+ f28.fbe_size()
+ f29.fbe_size()
+ f30.fbe_size()
+ f31.fbe_size()
+ f32.fbe_size()
+ f33.fbe_size()
+ f34.fbe_size()
+ f35.fbe_size()
+ f36.fbe_size()
+ f37.fbe_size()
+ f38.fbe_size()
+ f39.fbe_size()
+ f40.fbe_size()
+ f41.fbe_size()
+ f42.fbe_size()
+ f43.fbe_size()
+ f44.fbe_size()
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
+ id.fbe_extra()
+ f1.fbe_extra()
+ f2.fbe_extra()
+ f3.fbe_extra()
+ f4.fbe_extra()
+ f5.fbe_extra()
+ f6.fbe_extra()
+ f7.fbe_extra()
+ f8.fbe_extra()
+ f9.fbe_extra()
+ f10.fbe_extra()
+ f11.fbe_extra()
+ f12.fbe_extra()
+ f13.fbe_extra()
+ f14.fbe_extra()
+ f15.fbe_extra()
+ f16.fbe_extra()
+ f17.fbe_extra()
+ f18.fbe_extra()
+ f19.fbe_extra()
+ f20.fbe_extra()
+ f21.fbe_extra()
+ f22.fbe_extra()
+ f23.fbe_extra()
+ f24.fbe_extra()
+ f25.fbe_extra()
+ f26.fbe_extra()
+ f27.fbe_extra()
+ f28.fbe_extra()
+ f29.fbe_extra()
+ f30.fbe_extra()
+ f31.fbe_extra()
+ f32.fbe_extra()
+ f33.fbe_extra()
+ f34.fbe_extra()
+ f35.fbe_extra()
+ f36.fbe_extra()
+ f37.fbe_extra()
+ f38.fbe_extra()
+ f39.fbe_extra()
+ f40.fbe_extra()
+ f41.fbe_extra()
+ f42.fbe_extra()
+ f43.fbe_extra()
+ f44.fbe_extra()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 110; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + id.fbe_size()) > fbe_struct_size)
return true;
if (!id.verify())
return false;
fbe_current_size += id.fbe_size();
if ((fbe_current_size + f1.fbe_size()) > fbe_struct_size)
return true;
if (!f1.verify())
return false;
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) > fbe_struct_size)
return true;
if (!f2.verify())
return false;
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) > fbe_struct_size)
return true;
if (!f3.verify())
return false;
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) > fbe_struct_size)
return true;
if (!f4.verify())
return false;
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) > fbe_struct_size)
return true;
if (!f5.verify())
return false;
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) > fbe_struct_size)
return true;
if (!f6.verify())
return false;
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) > fbe_struct_size)
return true;
if (!f7.verify())
return false;
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) > fbe_struct_size)
return true;
if (!f8.verify())
return false;
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) > fbe_struct_size)
return true;
if (!f9.verify())
return false;
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) > fbe_struct_size)
return true;
if (!f10.verify())
return false;
fbe_current_size += f10.fbe_size();
if ((fbe_current_size + f11.fbe_size()) > fbe_struct_size)
return true;
if (!f11.verify())
return false;
fbe_current_size += f11.fbe_size();
if ((fbe_current_size + f12.fbe_size()) > fbe_struct_size)
return true;
if (!f12.verify())
return false;
fbe_current_size += f12.fbe_size();
if ((fbe_current_size + f13.fbe_size()) > fbe_struct_size)
return true;
if (!f13.verify())
return false;
fbe_current_size += f13.fbe_size();
if ((fbe_current_size + f14.fbe_size()) > fbe_struct_size)
return true;
if (!f14.verify())
return false;
fbe_current_size += f14.fbe_size();
if ((fbe_current_size + f15.fbe_size()) > fbe_struct_size)
return true;
if (!f15.verify())
return false;
fbe_current_size += f15.fbe_size();
if ((fbe_current_size + f16.fbe_size()) > fbe_struct_size)
return true;
if (!f16.verify())
return false;
fbe_current_size += f16.fbe_size();
if ((fbe_current_size + f17.fbe_size()) > fbe_struct_size)
return true;
if (!f17.verify())
return false;
fbe_current_size += f17.fbe_size();
if ((fbe_current_size + f18.fbe_size()) > fbe_struct_size)
return true;
if (!f18.verify())
return false;
fbe_current_size += f18.fbe_size();
if ((fbe_current_size + f19.fbe_size()) > fbe_struct_size)
return true;
if (!f19.verify())
return false;
fbe_current_size += f19.fbe_size();
if ((fbe_current_size + f20.fbe_size()) > fbe_struct_size)
return true;
if (!f20.verify())
return false;
fbe_current_size += f20.fbe_size();
if ((fbe_current_size + f21.fbe_size()) > fbe_struct_size)
return true;
if (!f21.verify())
return false;
fbe_current_size += f21.fbe_size();
if ((fbe_current_size + f22.fbe_size()) > fbe_struct_size)
return true;
if (!f22.verify())
return false;
fbe_current_size += f22.fbe_size();
if ((fbe_current_size + f23.fbe_size()) > fbe_struct_size)
return true;
if (!f23.verify())
return false;
fbe_current_size += f23.fbe_size();
if ((fbe_current_size + f24.fbe_size()) > fbe_struct_size)
return true;
if (!f24.verify())
return false;
fbe_current_size += f24.fbe_size();
if ((fbe_current_size + f25.fbe_size()) > fbe_struct_size)
return true;
if (!f25.verify())
return false;
fbe_current_size += f25.fbe_size();
if ((fbe_current_size + f26.fbe_size()) > fbe_struct_size)
return true;
if (!f26.verify())
return false;
fbe_current_size += f26.fbe_size();
if ((fbe_current_size + f27.fbe_size()) > fbe_struct_size)
return true;
if (!f27.verify())
return false;
fbe_current_size += f27.fbe_size();
if ((fbe_current_size + f28.fbe_size()) > fbe_struct_size)
return true;
if (!f28.verify())
return false;
fbe_current_size += f28.fbe_size();
if ((fbe_current_size + f29.fbe_size()) > fbe_struct_size)
return true;
if (!f29.verify())
return false;
fbe_current_size += f29.fbe_size();
if ((fbe_current_size + f30.fbe_size()) > fbe_struct_size)
return true;
if (!f30.verify())
return false;
fbe_current_size += f30.fbe_size();
if ((fbe_current_size + f31.fbe_size()) > fbe_struct_size)
return true;
if (!f31.verify())
return false;
fbe_current_size += f31.fbe_size();
if ((fbe_current_size + f32.fbe_size()) > fbe_struct_size)
return true;
if (!f32.verify())
return false;
fbe_current_size += f32.fbe_size();
if ((fbe_current_size + f33.fbe_size()) > fbe_struct_size)
return true;
if (!f33.verify())
return false;
fbe_current_size += f33.fbe_size();
if ((fbe_current_size + f34.fbe_size()) > fbe_struct_size)
return true;
if (!f34.verify())
return false;
fbe_current_size += f34.fbe_size();
if ((fbe_current_size + f35.fbe_size()) > fbe_struct_size)
return true;
if (!f35.verify())
return false;
fbe_current_size += f35.fbe_size();
if ((fbe_current_size + f36.fbe_size()) > fbe_struct_size)
return true;
if (!f36.verify())
return false;
fbe_current_size += f36.fbe_size();
if ((fbe_current_size + f37.fbe_size()) > fbe_struct_size)
return true;
if (!f37.verify())
return false;
fbe_current_size += f37.fbe_size();
if ((fbe_current_size + f38.fbe_size()) > fbe_struct_size)
return true;
if (!f38.verify())
return false;
fbe_current_size += f38.fbe_size();
if ((fbe_current_size + f39.fbe_size()) > fbe_struct_size)
return true;
if (!f39.verify())
return false;
fbe_current_size += f39.fbe_size();
if ((fbe_current_size + f40.fbe_size()) > fbe_struct_size)
return true;
if (!f40.verify())
return false;
fbe_current_size += f40.fbe_size();
if ((fbe_current_size + f41.fbe_size()) > fbe_struct_size)
return true;
if (!f41.verify())
return false;
fbe_current_size += f41.fbe_size();
if ((fbe_current_size + f42.fbe_size()) > fbe_struct_size)
return true;
if (!f42.verify())
return false;
fbe_current_size += f42.fbe_size();
if ((fbe_current_size + f43.fbe_size()) > fbe_struct_size)
return true;
if (!f43.verify())
return false;
fbe_current_size += f43.fbe_size();
if ((fbe_current_size + f44.fbe_size()) > fbe_struct_size)
return true;
if (!f44.verify())
return false;
fbe_current_size += f44.fbe_size();
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructSimple& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructSimple& fbe_value, size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + id.fbe_size()) <= fbe_struct_size)
id.get(fbe_value.id);
else
fbe_value.id = (int32_t)0ll;
fbe_current_size += id.fbe_size();
if ((fbe_current_size + f1.fbe_size()) <= fbe_struct_size)
f1.get(fbe_value.f1);
else
fbe_value.f1 = false;
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) <= fbe_struct_size)
f2.get(fbe_value.f2, true);
else
fbe_value.f2 = true;
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) <= fbe_struct_size)
f3.get(fbe_value.f3);
else
fbe_value.f3 = (uint8_t)0u;
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) <= fbe_struct_size)
f4.get(fbe_value.f4, (uint8_t)255u);
else
fbe_value.f4 = (uint8_t)255u;
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) <= fbe_struct_size)
f5.get(fbe_value.f5);
else
fbe_value.f5 = '\0';
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) <= fbe_struct_size)
f6.get(fbe_value.f6, (char)'!');
else
fbe_value.f6 = (char)'!';
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) <= fbe_struct_size)
f7.get(fbe_value.f7);
else
fbe_value.f7 = L'\0';
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) <= fbe_struct_size)
f8.get(fbe_value.f8, (wchar_t)0x0444);
else
fbe_value.f8 = (wchar_t)0x0444;
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) <= fbe_struct_size)
f9.get(fbe_value.f9);
else
fbe_value.f9 = (int8_t)0;
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) <= fbe_struct_size)
f10.get(fbe_value.f10, (int8_t)127);
else
fbe_value.f10 = (int8_t)127;
fbe_current_size += f10.fbe_size();
if ((fbe_current_size + f11.fbe_size()) <= fbe_struct_size)
f11.get(fbe_value.f11, (uint8_t)0u);
else
fbe_value.f11 = (uint8_t)0u;
fbe_current_size += f11.fbe_size();
if ((fbe_current_size + f12.fbe_size()) <= fbe_struct_size)
f12.get(fbe_value.f12, (uint8_t)255u);
else
fbe_value.f12 = (uint8_t)255u;
fbe_current_size += f12.fbe_size();
if ((fbe_current_size + f13.fbe_size()) <= fbe_struct_size)
f13.get(fbe_value.f13);
else
fbe_value.f13 = (int16_t)0;
fbe_current_size += f13.fbe_size();
if ((fbe_current_size + f14.fbe_size()) <= fbe_struct_size)
f14.get(fbe_value.f14, (int16_t)32767);
else
fbe_value.f14 = (int16_t)32767;
fbe_current_size += f14.fbe_size();
if ((fbe_current_size + f15.fbe_size()) <= fbe_struct_size)
f15.get(fbe_value.f15, (uint16_t)0u);
else
fbe_value.f15 = (uint16_t)0u;
fbe_current_size += f15.fbe_size();
if ((fbe_current_size + f16.fbe_size()) <= fbe_struct_size)
f16.get(fbe_value.f16, (uint16_t)65535u);
else
fbe_value.f16 = (uint16_t)65535u;
fbe_current_size += f16.fbe_size();
if ((fbe_current_size + f17.fbe_size()) <= fbe_struct_size)
f17.get(fbe_value.f17);
else
fbe_value.f17 = (int32_t)0ll;
fbe_current_size += f17.fbe_size();
if ((fbe_current_size + f18.fbe_size()) <= fbe_struct_size)
f18.get(fbe_value.f18, (int32_t)2147483647ll);
else
fbe_value.f18 = (int32_t)2147483647ll;
fbe_current_size += f18.fbe_size();
if ((fbe_current_size + f19.fbe_size()) <= fbe_struct_size)
f19.get(fbe_value.f19, (uint32_t)0ull);
else
fbe_value.f19 = (uint32_t)0ull;
fbe_current_size += f19.fbe_size();
if ((fbe_current_size + f20.fbe_size()) <= fbe_struct_size)
f20.get(fbe_value.f20, (uint32_t)4294967295ull);
else
fbe_value.f20 = (uint32_t)4294967295ull;
fbe_current_size += f20.fbe_size();
if ((fbe_current_size + f21.fbe_size()) <= fbe_struct_size)
f21.get(fbe_value.f21);
else
fbe_value.f21 = (int64_t)0ll;
fbe_current_size += f21.fbe_size();
if ((fbe_current_size + f22.fbe_size()) <= fbe_struct_size)
f22.get(fbe_value.f22, (int64_t)9223372036854775807ll);
else
fbe_value.f22 = (int64_t)9223372036854775807ll;
fbe_current_size += f22.fbe_size();
if ((fbe_current_size + f23.fbe_size()) <= fbe_struct_size)
f23.get(fbe_value.f23, (uint64_t)0ull);
else
fbe_value.f23 = (uint64_t)0ull;
fbe_current_size += f23.fbe_size();
if ((fbe_current_size + f24.fbe_size()) <= fbe_struct_size)
f24.get(fbe_value.f24, (uint64_t)18446744073709551615ull);
else
fbe_value.f24 = (uint64_t)18446744073709551615ull;
fbe_current_size += f24.fbe_size();
if ((fbe_current_size + f25.fbe_size()) <= fbe_struct_size)
f25.get(fbe_value.f25);
else
fbe_value.f25 = 0.0f;
fbe_current_size += f25.fbe_size();
if ((fbe_current_size + f26.fbe_size()) <= fbe_struct_size)
f26.get(fbe_value.f26, (float)123.456f);
else
fbe_value.f26 = (float)123.456f;
fbe_current_size += f26.fbe_size();
if ((fbe_current_size + f27.fbe_size()) <= fbe_struct_size)
f27.get(fbe_value.f27);
else
fbe_value.f27 = 0.0;
fbe_current_size += f27.fbe_size();
if ((fbe_current_size + f28.fbe_size()) <= fbe_struct_size)
f28.get(fbe_value.f28, (double)-123.456e+123);
else
fbe_value.f28 = (double)-123.456e+123;
fbe_current_size += f28.fbe_size();
if ((fbe_current_size + f29.fbe_size()) <= fbe_struct_size)
f29.get(fbe_value.f29);
else
fbe_value.f29 = FBE::decimal_t();
fbe_current_size += f29.fbe_size();
if ((fbe_current_size + f30.fbe_size()) <= fbe_struct_size)
f30.get(fbe_value.f30, FBE::decimal_t(123456.123456));
else
fbe_value.f30 = FBE::decimal_t(123456.123456);
fbe_current_size += f30.fbe_size();
if ((fbe_current_size + f31.fbe_size()) <= fbe_struct_size)
f31.get(fbe_value.f31);
else
fbe_value.f31 = "";
fbe_current_size += f31.fbe_size();
if ((fbe_current_size + f32.fbe_size()) <= fbe_struct_size)
f32.get(fbe_value.f32, "Initial string!");
else
fbe_value.f32 = "Initial string!";
fbe_current_size += f32.fbe_size();
if ((fbe_current_size + f33.fbe_size()) <= fbe_struct_size)
f33.get(fbe_value.f33);
else
fbe_value.f33 = (uint64_t)0ull;
fbe_current_size += f33.fbe_size();
if ((fbe_current_size + f34.fbe_size()) <= fbe_struct_size)
f34.get(fbe_value.f34, FBE::epoch());
else
fbe_value.f34 = FBE::epoch();
fbe_current_size += f34.fbe_size();
if ((fbe_current_size + f35.fbe_size()) <= fbe_struct_size)
f35.get(fbe_value.f35, FBE::utc());
else
fbe_value.f35 = FBE::utc();
fbe_current_size += f35.fbe_size();
if ((fbe_current_size + f36.fbe_size()) <= fbe_struct_size)
f36.get(fbe_value.f36);
else
fbe_value.f36 = FBE::uuid_t::nil();
fbe_current_size += f36.fbe_size();
if ((fbe_current_size + f37.fbe_size()) <= fbe_struct_size)
f37.get(fbe_value.f37, FBE::uuid_t::sequential());
else
fbe_value.f37 = FBE::uuid_t::sequential();
fbe_current_size += f37.fbe_size();
if ((fbe_current_size + f38.fbe_size()) <= fbe_struct_size)
f38.get(fbe_value.f38, FBE::uuid_t("123e4567-e89b-12d3-a456-426655440000"));
else
fbe_value.f38 = FBE::uuid_t("123e4567-e89b-12d3-a456-426655440000");
fbe_current_size += f38.fbe_size();
if ((fbe_current_size + f39.fbe_size()) <= fbe_struct_size)
f39.get(fbe_value.f39);
else
fbe_value.f39 = ::proto::OrderSide();
fbe_current_size += f39.fbe_size();
if ((fbe_current_size + f40.fbe_size()) <= fbe_struct_size)
f40.get(fbe_value.f40);
else
fbe_value.f40 = ::proto::OrderType();
fbe_current_size += f40.fbe_size();
if ((fbe_current_size + f41.fbe_size()) <= fbe_struct_size)
f41.get(fbe_value.f41);
else
fbe_value.f41 = ::proto::Order();
fbe_current_size += f41.fbe_size();
if ((fbe_current_size + f42.fbe_size()) <= fbe_struct_size)
f42.get(fbe_value.f42);
else
fbe_value.f42 = ::proto::Balance();
fbe_current_size += f42.fbe_size();
if ((fbe_current_size + f43.fbe_size()) <= fbe_struct_size)
f43.get(fbe_value.f43);
else
fbe_value.f43 = ::proto::State();
fbe_current_size += f43.fbe_size();
if ((fbe_current_size + f44.fbe_size()) <= fbe_struct_size)
f44.get(fbe_value.f44);
else
fbe_value.f44 = ::proto::Account();
fbe_current_size += f44.fbe_size();
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructSimple& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructSimple& fbe_value) noexcept
{
id.set(fbe_value.id);
f1.set(fbe_value.f1);
f2.set(fbe_value.f2);
f3.set(fbe_value.f3);
f4.set(fbe_value.f4);
f5.set(fbe_value.f5);
f6.set(fbe_value.f6);
f7.set(fbe_value.f7);
f8.set(fbe_value.f8);
f9.set(fbe_value.f9);
f10.set(fbe_value.f10);
f11.set(fbe_value.f11);
f12.set(fbe_value.f12);
f13.set(fbe_value.f13);
f14.set(fbe_value.f14);
f15.set(fbe_value.f15);
f16.set(fbe_value.f16);
f17.set(fbe_value.f17);
f18.set(fbe_value.f18);
f19.set(fbe_value.f19);
f20.set(fbe_value.f20);
f21.set(fbe_value.f21);
f22.set(fbe_value.f22);
f23.set(fbe_value.f23);
f24.set(fbe_value.f24);
f25.set(fbe_value.f25);
f26.set(fbe_value.f26);
f27.set(fbe_value.f27);
f28.set(fbe_value.f28);
f29.set(fbe_value.f29);
f30.set(fbe_value.f30);
f31.set(fbe_value.f31);
f32.set(fbe_value.f32);
f33.set(fbe_value.f33);
f34.set(fbe_value.f34);
f35.set(fbe_value.f35);
f36.set(fbe_value.f36);
f37.set(fbe_value.f37);
f38.set(fbe_value.f38);
f39.set(fbe_value.f39);
f40.set(fbe_value.f40);
f41.set(fbe_value.f41);
f42.set(fbe_value.f42);
f43.set(fbe_value.f43);
f44.set(fbe_value.f44);
}
private:
TBuffer& _buffer;
size_t _offset;
public:
FieldModel<TBuffer, int32_t> id;
FieldModel<TBuffer, bool> f1;
FieldModel<TBuffer, bool> f2;
FieldModel<TBuffer, uint8_t> f3;
FieldModel<TBuffer, uint8_t> f4;
FieldModel<TBuffer, char> f5;
FieldModel<TBuffer, char> f6;
FieldModel<TBuffer, wchar_t> f7;
FieldModel<TBuffer, wchar_t> f8;
FieldModel<TBuffer, int8_t> f9;
FieldModel<TBuffer, int8_t> f10;
FieldModel<TBuffer, uint8_t> f11;
FieldModel<TBuffer, uint8_t> f12;
FieldModel<TBuffer, int16_t> f13;
FieldModel<TBuffer, int16_t> f14;
FieldModel<TBuffer, uint16_t> f15;
FieldModel<TBuffer, uint16_t> f16;
FieldModel<TBuffer, int32_t> f17;
FieldModel<TBuffer, int32_t> f18;
FieldModel<TBuffer, uint32_t> f19;
FieldModel<TBuffer, uint32_t> f20;
FieldModel<TBuffer, int64_t> f21;
FieldModel<TBuffer, int64_t> f22;
FieldModel<TBuffer, uint64_t> f23;
FieldModel<TBuffer, uint64_t> f24;
FieldModel<TBuffer, float> f25;
FieldModel<TBuffer, float> f26;
FieldModel<TBuffer, double> f27;
FieldModel<TBuffer, double> f28;
FieldModel<TBuffer, FBE::decimal_t> f29;
FieldModel<TBuffer, FBE::decimal_t> f30;
FieldModel<TBuffer, std::string> f31;
FieldModel<TBuffer, std::string> f32;
FieldModel<TBuffer, uint64_t> f33;
FieldModel<TBuffer, uint64_t> f34;
FieldModel<TBuffer, uint64_t> f35;
FieldModel<TBuffer, FBE::uuid_t> f36;
FieldModel<TBuffer, FBE::uuid_t> f37;
FieldModel<TBuffer, FBE::uuid_t> f38;
FieldModel<TBuffer, ::proto::OrderSide> f39;
FieldModel<TBuffer, ::proto::OrderType> f40;
FieldModel<TBuffer, ::proto::Order> f41;
FieldModel<TBuffer, ::proto::Balance> f42;
FieldModel<TBuffer, ::proto::State> f43;
FieldModel<TBuffer, ::proto::Account> f44;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructSimple model
template <class TBuffer>
class StructSimpleModel : public FBE::Model<TBuffer>
{
public:
StructSimpleModel() : model(this->buffer(), 4) {}
StructSimpleModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructSimple>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructSimple& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructSimple& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructSimple> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructSimple final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructSimple>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, id(buffer, 0)
, f1(buffer, 0)
, f2(buffer, 0)
, f3(buffer, 0)
, f4(buffer, 0)
, f5(buffer, 0)
, f6(buffer, 0)
, f7(buffer, 0)
, f8(buffer, 0)
, f9(buffer, 0)
, f10(buffer, 0)
, f11(buffer, 0)
, f12(buffer, 0)
, f13(buffer, 0)
, f14(buffer, 0)
, f15(buffer, 0)
, f16(buffer, 0)
, f17(buffer, 0)
, f18(buffer, 0)
, f19(buffer, 0)
, f20(buffer, 0)
, f21(buffer, 0)
, f22(buffer, 0)
, f23(buffer, 0)
, f24(buffer, 0)
, f25(buffer, 0)
, f26(buffer, 0)
, f27(buffer, 0)
, f28(buffer, 0)
, f29(buffer, 0)
, f30(buffer, 0)
, f31(buffer, 0)
, f32(buffer, 0)
, f33(buffer, 0)
, f34(buffer, 0)
, f35(buffer, 0)
, f36(buffer, 0)
, f37(buffer, 0)
, f38(buffer, 0)
, f39(buffer, 0)
, f40(buffer, 0)
, f41(buffer, 0)
, f42(buffer, 0)
, f43(buffer, 0)
, f44(buffer, 0)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructSimple& fbe_value) const noexcept
{
size_t fbe_result = 0
+ id.fbe_allocation_size(fbe_value.id)
+ f1.fbe_allocation_size(fbe_value.f1)
+ f2.fbe_allocation_size(fbe_value.f2)
+ f3.fbe_allocation_size(fbe_value.f3)
+ f4.fbe_allocation_size(fbe_value.f4)
+ f5.fbe_allocation_size(fbe_value.f5)
+ f6.fbe_allocation_size(fbe_value.f6)
+ f7.fbe_allocation_size(fbe_value.f7)
+ f8.fbe_allocation_size(fbe_value.f8)
+ f9.fbe_allocation_size(fbe_value.f9)
+ f10.fbe_allocation_size(fbe_value.f10)
+ f11.fbe_allocation_size(fbe_value.f11)
+ f12.fbe_allocation_size(fbe_value.f12)
+ f13.fbe_allocation_size(fbe_value.f13)
+ f14.fbe_allocation_size(fbe_value.f14)
+ f15.fbe_allocation_size(fbe_value.f15)
+ f16.fbe_allocation_size(fbe_value.f16)
+ f17.fbe_allocation_size(fbe_value.f17)
+ f18.fbe_allocation_size(fbe_value.f18)
+ f19.fbe_allocation_size(fbe_value.f19)
+ f20.fbe_allocation_size(fbe_value.f20)
+ f21.fbe_allocation_size(fbe_value.f21)
+ f22.fbe_allocation_size(fbe_value.f22)
+ f23.fbe_allocation_size(fbe_value.f23)
+ f24.fbe_allocation_size(fbe_value.f24)
+ f25.fbe_allocation_size(fbe_value.f25)
+ f26.fbe_allocation_size(fbe_value.f26)
+ f27.fbe_allocation_size(fbe_value.f27)
+ f28.fbe_allocation_size(fbe_value.f28)
+ f29.fbe_allocation_size(fbe_value.f29)
+ f30.fbe_allocation_size(fbe_value.f30)
+ f31.fbe_allocation_size(fbe_value.f31)
+ f32.fbe_allocation_size(fbe_value.f32)
+ f33.fbe_allocation_size(fbe_value.f33)
+ f34.fbe_allocation_size(fbe_value.f34)
+ f35.fbe_allocation_size(fbe_value.f35)
+ f36.fbe_allocation_size(fbe_value.f36)
+ f37.fbe_allocation_size(fbe_value.f37)
+ f38.fbe_allocation_size(fbe_value.f38)
+ f39.fbe_allocation_size(fbe_value.f39)
+ f40.fbe_allocation_size(fbe_value.f40)
+ f41.fbe_allocation_size(fbe_value.f41)
+ f42.fbe_allocation_size(fbe_value.f42)
+ f43.fbe_allocation_size(fbe_value.f43)
+ f44.fbe_allocation_size(fbe_value.f44)
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 110; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_field_size;
id.fbe_offset(fbe_current_offset);
fbe_field_size = id.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f11.fbe_offset(fbe_current_offset);
fbe_field_size = f11.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f12.fbe_offset(fbe_current_offset);
fbe_field_size = f12.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f13.fbe_offset(fbe_current_offset);
fbe_field_size = f13.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f14.fbe_offset(fbe_current_offset);
fbe_field_size = f14.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f15.fbe_offset(fbe_current_offset);
fbe_field_size = f15.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f16.fbe_offset(fbe_current_offset);
fbe_field_size = f16.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f17.fbe_offset(fbe_current_offset);
fbe_field_size = f17.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f18.fbe_offset(fbe_current_offset);
fbe_field_size = f18.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f19.fbe_offset(fbe_current_offset);
fbe_field_size = f19.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f20.fbe_offset(fbe_current_offset);
fbe_field_size = f20.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f21.fbe_offset(fbe_current_offset);
fbe_field_size = f21.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f22.fbe_offset(fbe_current_offset);
fbe_field_size = f22.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f23.fbe_offset(fbe_current_offset);
fbe_field_size = f23.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f24.fbe_offset(fbe_current_offset);
fbe_field_size = f24.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f25.fbe_offset(fbe_current_offset);
fbe_field_size = f25.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f26.fbe_offset(fbe_current_offset);
fbe_field_size = f26.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f27.fbe_offset(fbe_current_offset);
fbe_field_size = f27.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f28.fbe_offset(fbe_current_offset);
fbe_field_size = f28.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f29.fbe_offset(fbe_current_offset);
fbe_field_size = f29.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f30.fbe_offset(fbe_current_offset);
fbe_field_size = f30.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f31.fbe_offset(fbe_current_offset);
fbe_field_size = f31.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f32.fbe_offset(fbe_current_offset);
fbe_field_size = f32.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f33.fbe_offset(fbe_current_offset);
fbe_field_size = f33.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f34.fbe_offset(fbe_current_offset);
fbe_field_size = f34.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f35.fbe_offset(fbe_current_offset);
fbe_field_size = f35.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f36.fbe_offset(fbe_current_offset);
fbe_field_size = f36.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f37.fbe_offset(fbe_current_offset);
fbe_field_size = f37.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f38.fbe_offset(fbe_current_offset);
fbe_field_size = f38.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f39.fbe_offset(fbe_current_offset);
fbe_field_size = f39.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f40.fbe_offset(fbe_current_offset);
fbe_field_size = f40.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f41.fbe_offset(fbe_current_offset);
fbe_field_size = f41.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f42.fbe_offset(fbe_current_offset);
fbe_field_size = f42.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f43.fbe_offset(fbe_current_offset);
fbe_field_size = f43.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f44.fbe_offset(fbe_current_offset);
fbe_field_size = f44.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
return fbe_current_offset;
}
// Get the struct value
size_t get(::test::StructSimple& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructSimple& fbe_value) const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
id.fbe_offset(fbe_current_offset);
fbe_field_size = id.get(fbe_value.id);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.get(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.get(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.get(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.get(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.get(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.get(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.get(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.get(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.get(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.get(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f11.fbe_offset(fbe_current_offset);
fbe_field_size = f11.get(fbe_value.f11);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f12.fbe_offset(fbe_current_offset);
fbe_field_size = f12.get(fbe_value.f12);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f13.fbe_offset(fbe_current_offset);
fbe_field_size = f13.get(fbe_value.f13);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f14.fbe_offset(fbe_current_offset);
fbe_field_size = f14.get(fbe_value.f14);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f15.fbe_offset(fbe_current_offset);
fbe_field_size = f15.get(fbe_value.f15);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f16.fbe_offset(fbe_current_offset);
fbe_field_size = f16.get(fbe_value.f16);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f17.fbe_offset(fbe_current_offset);
fbe_field_size = f17.get(fbe_value.f17);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f18.fbe_offset(fbe_current_offset);
fbe_field_size = f18.get(fbe_value.f18);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f19.fbe_offset(fbe_current_offset);
fbe_field_size = f19.get(fbe_value.f19);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f20.fbe_offset(fbe_current_offset);
fbe_field_size = f20.get(fbe_value.f20);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f21.fbe_offset(fbe_current_offset);
fbe_field_size = f21.get(fbe_value.f21);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f22.fbe_offset(fbe_current_offset);
fbe_field_size = f22.get(fbe_value.f22);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f23.fbe_offset(fbe_current_offset);
fbe_field_size = f23.get(fbe_value.f23);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f24.fbe_offset(fbe_current_offset);
fbe_field_size = f24.get(fbe_value.f24);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f25.fbe_offset(fbe_current_offset);
fbe_field_size = f25.get(fbe_value.f25);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f26.fbe_offset(fbe_current_offset);
fbe_field_size = f26.get(fbe_value.f26);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f27.fbe_offset(fbe_current_offset);
fbe_field_size = f27.get(fbe_value.f27);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f28.fbe_offset(fbe_current_offset);
fbe_field_size = f28.get(fbe_value.f28);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f29.fbe_offset(fbe_current_offset);
fbe_field_size = f29.get(fbe_value.f29);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f30.fbe_offset(fbe_current_offset);
fbe_field_size = f30.get(fbe_value.f30);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f31.fbe_offset(fbe_current_offset);
fbe_field_size = f31.get(fbe_value.f31);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f32.fbe_offset(fbe_current_offset);
fbe_field_size = f32.get(fbe_value.f32);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f33.fbe_offset(fbe_current_offset);
fbe_field_size = f33.get(fbe_value.f33);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f34.fbe_offset(fbe_current_offset);
fbe_field_size = f34.get(fbe_value.f34);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f35.fbe_offset(fbe_current_offset);
fbe_field_size = f35.get(fbe_value.f35);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f36.fbe_offset(fbe_current_offset);
fbe_field_size = f36.get(fbe_value.f36);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f37.fbe_offset(fbe_current_offset);
fbe_field_size = f37.get(fbe_value.f37);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f38.fbe_offset(fbe_current_offset);
fbe_field_size = f38.get(fbe_value.f38);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f39.fbe_offset(fbe_current_offset);
fbe_field_size = f39.get(fbe_value.f39);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f40.fbe_offset(fbe_current_offset);
fbe_field_size = f40.get(fbe_value.f40);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f41.fbe_offset(fbe_current_offset);
fbe_field_size = f41.get(fbe_value.f41);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f42.fbe_offset(fbe_current_offset);
fbe_field_size = f42.get(fbe_value.f42);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f43.fbe_offset(fbe_current_offset);
fbe_field_size = f43.get(fbe_value.f43);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f44.fbe_offset(fbe_current_offset);
fbe_field_size = f44.get(fbe_value.f44);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
// Set the struct value
size_t set(const ::test::StructSimple& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructSimple& fbe_value) noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
id.fbe_offset(fbe_current_offset);
fbe_field_size = id.set(fbe_value.id);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.set(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.set(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.set(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.set(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.set(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.set(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.set(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.set(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.set(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.set(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f11.fbe_offset(fbe_current_offset);
fbe_field_size = f11.set(fbe_value.f11);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f12.fbe_offset(fbe_current_offset);
fbe_field_size = f12.set(fbe_value.f12);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f13.fbe_offset(fbe_current_offset);
fbe_field_size = f13.set(fbe_value.f13);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f14.fbe_offset(fbe_current_offset);
fbe_field_size = f14.set(fbe_value.f14);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f15.fbe_offset(fbe_current_offset);
fbe_field_size = f15.set(fbe_value.f15);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f16.fbe_offset(fbe_current_offset);
fbe_field_size = f16.set(fbe_value.f16);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f17.fbe_offset(fbe_current_offset);
fbe_field_size = f17.set(fbe_value.f17);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f18.fbe_offset(fbe_current_offset);
fbe_field_size = f18.set(fbe_value.f18);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f19.fbe_offset(fbe_current_offset);
fbe_field_size = f19.set(fbe_value.f19);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f20.fbe_offset(fbe_current_offset);
fbe_field_size = f20.set(fbe_value.f20);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f21.fbe_offset(fbe_current_offset);
fbe_field_size = f21.set(fbe_value.f21);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f22.fbe_offset(fbe_current_offset);
fbe_field_size = f22.set(fbe_value.f22);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f23.fbe_offset(fbe_current_offset);
fbe_field_size = f23.set(fbe_value.f23);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f24.fbe_offset(fbe_current_offset);
fbe_field_size = f24.set(fbe_value.f24);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f25.fbe_offset(fbe_current_offset);
fbe_field_size = f25.set(fbe_value.f25);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f26.fbe_offset(fbe_current_offset);
fbe_field_size = f26.set(fbe_value.f26);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f27.fbe_offset(fbe_current_offset);
fbe_field_size = f27.set(fbe_value.f27);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f28.fbe_offset(fbe_current_offset);
fbe_field_size = f28.set(fbe_value.f28);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f29.fbe_offset(fbe_current_offset);
fbe_field_size = f29.set(fbe_value.f29);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f30.fbe_offset(fbe_current_offset);
fbe_field_size = f30.set(fbe_value.f30);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f31.fbe_offset(fbe_current_offset);
fbe_field_size = f31.set(fbe_value.f31);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f32.fbe_offset(fbe_current_offset);
fbe_field_size = f32.set(fbe_value.f32);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f33.fbe_offset(fbe_current_offset);
fbe_field_size = f33.set(fbe_value.f33);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f34.fbe_offset(fbe_current_offset);
fbe_field_size = f34.set(fbe_value.f34);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f35.fbe_offset(fbe_current_offset);
fbe_field_size = f35.set(fbe_value.f35);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f36.fbe_offset(fbe_current_offset);
fbe_field_size = f36.set(fbe_value.f36);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f37.fbe_offset(fbe_current_offset);
fbe_field_size = f37.set(fbe_value.f37);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f38.fbe_offset(fbe_current_offset);
fbe_field_size = f38.set(fbe_value.f38);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f39.fbe_offset(fbe_current_offset);
fbe_field_size = f39.set(fbe_value.f39);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f40.fbe_offset(fbe_current_offset);
fbe_field_size = f40.set(fbe_value.f40);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f41.fbe_offset(fbe_current_offset);
fbe_field_size = f41.set(fbe_value.f41);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f42.fbe_offset(fbe_current_offset);
fbe_field_size = f42.set(fbe_value.f42);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f43.fbe_offset(fbe_current_offset);
fbe_field_size = f43.set(fbe_value.f43);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f44.fbe_offset(fbe_current_offset);
fbe_field_size = f44.set(fbe_value.f44);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
FinalModel<TBuffer, int32_t> id;
FinalModel<TBuffer, bool> f1;
FinalModel<TBuffer, bool> f2;
FinalModel<TBuffer, uint8_t> f3;
FinalModel<TBuffer, uint8_t> f4;
FinalModel<TBuffer, char> f5;
FinalModel<TBuffer, char> f6;
FinalModel<TBuffer, wchar_t> f7;
FinalModel<TBuffer, wchar_t> f8;
FinalModel<TBuffer, int8_t> f9;
FinalModel<TBuffer, int8_t> f10;
FinalModel<TBuffer, uint8_t> f11;
FinalModel<TBuffer, uint8_t> f12;
FinalModel<TBuffer, int16_t> f13;
FinalModel<TBuffer, int16_t> f14;
FinalModel<TBuffer, uint16_t> f15;
FinalModel<TBuffer, uint16_t> f16;
FinalModel<TBuffer, int32_t> f17;
FinalModel<TBuffer, int32_t> f18;
FinalModel<TBuffer, uint32_t> f19;
FinalModel<TBuffer, uint32_t> f20;
FinalModel<TBuffer, int64_t> f21;
FinalModel<TBuffer, int64_t> f22;
FinalModel<TBuffer, uint64_t> f23;
FinalModel<TBuffer, uint64_t> f24;
FinalModel<TBuffer, float> f25;
FinalModel<TBuffer, float> f26;
FinalModel<TBuffer, double> f27;
FinalModel<TBuffer, double> f28;
FinalModel<TBuffer, FBE::decimal_t> f29;
FinalModel<TBuffer, FBE::decimal_t> f30;
FinalModel<TBuffer, std::string> f31;
FinalModel<TBuffer, std::string> f32;
FinalModel<TBuffer, uint64_t> f33;
FinalModel<TBuffer, uint64_t> f34;
FinalModel<TBuffer, uint64_t> f35;
FinalModel<TBuffer, FBE::uuid_t> f36;
FinalModel<TBuffer, FBE::uuid_t> f37;
FinalModel<TBuffer, FBE::uuid_t> f38;
FinalModel<TBuffer, ::proto::OrderSide> f39;
FinalModel<TBuffer, ::proto::OrderType> f40;
FinalModel<TBuffer, ::proto::Order> f41;
FinalModel<TBuffer, ::proto::Balance> f42;
FinalModel<TBuffer, ::proto::State> f43;
FinalModel<TBuffer, ::proto::Account> f44;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructSimple final model
template <class TBuffer>
class StructSimpleFinalModel : public FBE::Model<TBuffer>
{
public:
StructSimpleFinalModel() : _model(this->buffer(), 8) {}
StructSimpleFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructSimple>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructSimple& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructSimple& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructSimple> _model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructOptional field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructOptional>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, parent(buffer, 4 + 4)
, f100(buffer, parent.fbe_offset() + parent.fbe_body() - 4 - 4)
, f101(buffer, f100.fbe_offset() + f100.fbe_size())
, f102(buffer, f101.fbe_offset() + f101.fbe_size())
, f103(buffer, f102.fbe_offset() + f102.fbe_size())
, f104(buffer, f103.fbe_offset() + f103.fbe_size())
, f105(buffer, f104.fbe_offset() + f104.fbe_size())
, f106(buffer, f105.fbe_offset() + f105.fbe_size())
, f107(buffer, f106.fbe_offset() + f106.fbe_size())
, f108(buffer, f107.fbe_offset() + f107.fbe_size())
, f109(buffer, f108.fbe_offset() + f108.fbe_size())
, f110(buffer, f109.fbe_offset() + f109.fbe_size())
, f111(buffer, f110.fbe_offset() + f110.fbe_size())
, f112(buffer, f111.fbe_offset() + f111.fbe_size())
, f113(buffer, f112.fbe_offset() + f112.fbe_size())
, f114(buffer, f113.fbe_offset() + f113.fbe_size())
, f115(buffer, f114.fbe_offset() + f114.fbe_size())
, f116(buffer, f115.fbe_offset() + f115.fbe_size())
, f117(buffer, f116.fbe_offset() + f116.fbe_size())
, f118(buffer, f117.fbe_offset() + f117.fbe_size())
, f119(buffer, f118.fbe_offset() + f118.fbe_size())
, f120(buffer, f119.fbe_offset() + f119.fbe_size())
, f121(buffer, f120.fbe_offset() + f120.fbe_size())
, f122(buffer, f121.fbe_offset() + f121.fbe_size())
, f123(buffer, f122.fbe_offset() + f122.fbe_size())
, f124(buffer, f123.fbe_offset() + f123.fbe_size())
, f125(buffer, f124.fbe_offset() + f124.fbe_size())
, f126(buffer, f125.fbe_offset() + f125.fbe_size())
, f127(buffer, f126.fbe_offset() + f126.fbe_size())
, f128(buffer, f127.fbe_offset() + f127.fbe_size())
, f129(buffer, f128.fbe_offset() + f128.fbe_size())
, f130(buffer, f129.fbe_offset() + f129.fbe_size())
, f131(buffer, f130.fbe_offset() + f130.fbe_size())
, f132(buffer, f131.fbe_offset() + f131.fbe_size())
, f133(buffer, f132.fbe_offset() + f132.fbe_size())
, f134(buffer, f133.fbe_offset() + f133.fbe_size())
, f135(buffer, f134.fbe_offset() + f134.fbe_size())
, f136(buffer, f135.fbe_offset() + f135.fbe_size())
, f137(buffer, f136.fbe_offset() + f136.fbe_size())
, f138(buffer, f137.fbe_offset() + f137.fbe_size())
, f139(buffer, f138.fbe_offset() + f138.fbe_size())
, f140(buffer, f139.fbe_offset() + f139.fbe_size())
, f141(buffer, f140.fbe_offset() + f140.fbe_size())
, f142(buffer, f141.fbe_offset() + f141.fbe_size())
, f143(buffer, f142.fbe_offset() + f142.fbe_size())
, f144(buffer, f143.fbe_offset() + f143.fbe_size())
, f145(buffer, f144.fbe_offset() + f144.fbe_size())
, f146(buffer, f145.fbe_offset() + f145.fbe_size())
, f147(buffer, f146.fbe_offset() + f146.fbe_size())
, f148(buffer, f147.fbe_offset() + f147.fbe_size())
, f149(buffer, f148.fbe_offset() + f148.fbe_size())
, f150(buffer, f149.fbe_offset() + f149.fbe_size())
, f151(buffer, f150.fbe_offset() + f150.fbe_size())
, f152(buffer, f151.fbe_offset() + f151.fbe_size())
, f153(buffer, f152.fbe_offset() + f152.fbe_size())
, f154(buffer, f153.fbe_offset() + f153.fbe_size())
, f155(buffer, f154.fbe_offset() + f154.fbe_size())
, f156(buffer, f155.fbe_offset() + f155.fbe_size())
, f157(buffer, f156.fbe_offset() + f156.fbe_size())
, f158(buffer, f157.fbe_offset() + f157.fbe_size())
, f159(buffer, f158.fbe_offset() + f158.fbe_size())
, f160(buffer, f159.fbe_offset() + f159.fbe_size())
, f161(buffer, f160.fbe_offset() + f160.fbe_size())
, f162(buffer, f161.fbe_offset() + f161.fbe_size())
, f163(buffer, f162.fbe_offset() + f162.fbe_size())
, f164(buffer, f163.fbe_offset() + f163.fbe_size())
, f165(buffer, f164.fbe_offset() + f164.fbe_size())
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
+ parent.fbe_body() - 4 - 4
+ f100.fbe_size()
+ f101.fbe_size()
+ f102.fbe_size()
+ f103.fbe_size()
+ f104.fbe_size()
+ f105.fbe_size()
+ f106.fbe_size()
+ f107.fbe_size()
+ f108.fbe_size()
+ f109.fbe_size()
+ f110.fbe_size()
+ f111.fbe_size()
+ f112.fbe_size()
+ f113.fbe_size()
+ f114.fbe_size()
+ f115.fbe_size()
+ f116.fbe_size()
+ f117.fbe_size()
+ f118.fbe_size()
+ f119.fbe_size()
+ f120.fbe_size()
+ f121.fbe_size()
+ f122.fbe_size()
+ f123.fbe_size()
+ f124.fbe_size()
+ f125.fbe_size()
+ f126.fbe_size()
+ f127.fbe_size()
+ f128.fbe_size()
+ f129.fbe_size()
+ f130.fbe_size()
+ f131.fbe_size()
+ f132.fbe_size()
+ f133.fbe_size()
+ f134.fbe_size()
+ f135.fbe_size()
+ f136.fbe_size()
+ f137.fbe_size()
+ f138.fbe_size()
+ f139.fbe_size()
+ f140.fbe_size()
+ f141.fbe_size()
+ f142.fbe_size()
+ f143.fbe_size()
+ f144.fbe_size()
+ f145.fbe_size()
+ f146.fbe_size()
+ f147.fbe_size()
+ f148.fbe_size()
+ f149.fbe_size()
+ f150.fbe_size()
+ f151.fbe_size()
+ f152.fbe_size()
+ f153.fbe_size()
+ f154.fbe_size()
+ f155.fbe_size()
+ f156.fbe_size()
+ f157.fbe_size()
+ f158.fbe_size()
+ f159.fbe_size()
+ f160.fbe_size()
+ f161.fbe_size()
+ f162.fbe_size()
+ f163.fbe_size()
+ f164.fbe_size()
+ f165.fbe_size()
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
+ parent.fbe_extra()
+ f100.fbe_extra()
+ f101.fbe_extra()
+ f102.fbe_extra()
+ f103.fbe_extra()
+ f104.fbe_extra()
+ f105.fbe_extra()
+ f106.fbe_extra()
+ f107.fbe_extra()
+ f108.fbe_extra()
+ f109.fbe_extra()
+ f110.fbe_extra()
+ f111.fbe_extra()
+ f112.fbe_extra()
+ f113.fbe_extra()
+ f114.fbe_extra()
+ f115.fbe_extra()
+ f116.fbe_extra()
+ f117.fbe_extra()
+ f118.fbe_extra()
+ f119.fbe_extra()
+ f120.fbe_extra()
+ f121.fbe_extra()
+ f122.fbe_extra()
+ f123.fbe_extra()
+ f124.fbe_extra()
+ f125.fbe_extra()
+ f126.fbe_extra()
+ f127.fbe_extra()
+ f128.fbe_extra()
+ f129.fbe_extra()
+ f130.fbe_extra()
+ f131.fbe_extra()
+ f132.fbe_extra()
+ f133.fbe_extra()
+ f134.fbe_extra()
+ f135.fbe_extra()
+ f136.fbe_extra()
+ f137.fbe_extra()
+ f138.fbe_extra()
+ f139.fbe_extra()
+ f140.fbe_extra()
+ f141.fbe_extra()
+ f142.fbe_extra()
+ f143.fbe_extra()
+ f144.fbe_extra()
+ f145.fbe_extra()
+ f146.fbe_extra()
+ f147.fbe_extra()
+ f148.fbe_extra()
+ f149.fbe_extra()
+ f150.fbe_extra()
+ f151.fbe_extra()
+ f152.fbe_extra()
+ f153.fbe_extra()
+ f154.fbe_extra()
+ f155.fbe_extra()
+ f156.fbe_extra()
+ f157.fbe_extra()
+ f158.fbe_extra()
+ f159.fbe_extra()
+ f160.fbe_extra()
+ f161.fbe_extra()
+ f162.fbe_extra()
+ f163.fbe_extra()
+ f164.fbe_extra()
+ f165.fbe_extra()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 111; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + parent.fbe_body() - 4 - 4) > fbe_struct_size)
return true;
if (!parent.verify_fields(fbe_struct_size))
return false;
fbe_current_size += parent.fbe_body() - 4 - 4;
if ((fbe_current_size + f100.fbe_size()) > fbe_struct_size)
return true;
if (!f100.verify())
return false;
fbe_current_size += f100.fbe_size();
if ((fbe_current_size + f101.fbe_size()) > fbe_struct_size)
return true;
if (!f101.verify())
return false;
fbe_current_size += f101.fbe_size();
if ((fbe_current_size + f102.fbe_size()) > fbe_struct_size)
return true;
if (!f102.verify())
return false;
fbe_current_size += f102.fbe_size();
if ((fbe_current_size + f103.fbe_size()) > fbe_struct_size)
return true;
if (!f103.verify())
return false;
fbe_current_size += f103.fbe_size();
if ((fbe_current_size + f104.fbe_size()) > fbe_struct_size)
return true;
if (!f104.verify())
return false;
fbe_current_size += f104.fbe_size();
if ((fbe_current_size + f105.fbe_size()) > fbe_struct_size)
return true;
if (!f105.verify())
return false;
fbe_current_size += f105.fbe_size();
if ((fbe_current_size + f106.fbe_size()) > fbe_struct_size)
return true;
if (!f106.verify())
return false;
fbe_current_size += f106.fbe_size();
if ((fbe_current_size + f107.fbe_size()) > fbe_struct_size)
return true;
if (!f107.verify())
return false;
fbe_current_size += f107.fbe_size();
if ((fbe_current_size + f108.fbe_size()) > fbe_struct_size)
return true;
if (!f108.verify())
return false;
fbe_current_size += f108.fbe_size();
if ((fbe_current_size + f109.fbe_size()) > fbe_struct_size)
return true;
if (!f109.verify())
return false;
fbe_current_size += f109.fbe_size();
if ((fbe_current_size + f110.fbe_size()) > fbe_struct_size)
return true;
if (!f110.verify())
return false;
fbe_current_size += f110.fbe_size();
if ((fbe_current_size + f111.fbe_size()) > fbe_struct_size)
return true;
if (!f111.verify())
return false;
fbe_current_size += f111.fbe_size();
if ((fbe_current_size + f112.fbe_size()) > fbe_struct_size)
return true;
if (!f112.verify())
return false;
fbe_current_size += f112.fbe_size();
if ((fbe_current_size + f113.fbe_size()) > fbe_struct_size)
return true;
if (!f113.verify())
return false;
fbe_current_size += f113.fbe_size();
if ((fbe_current_size + f114.fbe_size()) > fbe_struct_size)
return true;
if (!f114.verify())
return false;
fbe_current_size += f114.fbe_size();
if ((fbe_current_size + f115.fbe_size()) > fbe_struct_size)
return true;
if (!f115.verify())
return false;
fbe_current_size += f115.fbe_size();
if ((fbe_current_size + f116.fbe_size()) > fbe_struct_size)
return true;
if (!f116.verify())
return false;
fbe_current_size += f116.fbe_size();
if ((fbe_current_size + f117.fbe_size()) > fbe_struct_size)
return true;
if (!f117.verify())
return false;
fbe_current_size += f117.fbe_size();
if ((fbe_current_size + f118.fbe_size()) > fbe_struct_size)
return true;
if (!f118.verify())
return false;
fbe_current_size += f118.fbe_size();
if ((fbe_current_size + f119.fbe_size()) > fbe_struct_size)
return true;
if (!f119.verify())
return false;
fbe_current_size += f119.fbe_size();
if ((fbe_current_size + f120.fbe_size()) > fbe_struct_size)
return true;
if (!f120.verify())
return false;
fbe_current_size += f120.fbe_size();
if ((fbe_current_size + f121.fbe_size()) > fbe_struct_size)
return true;
if (!f121.verify())
return false;
fbe_current_size += f121.fbe_size();
if ((fbe_current_size + f122.fbe_size()) > fbe_struct_size)
return true;
if (!f122.verify())
return false;
fbe_current_size += f122.fbe_size();
if ((fbe_current_size + f123.fbe_size()) > fbe_struct_size)
return true;
if (!f123.verify())
return false;
fbe_current_size += f123.fbe_size();
if ((fbe_current_size + f124.fbe_size()) > fbe_struct_size)
return true;
if (!f124.verify())
return false;
fbe_current_size += f124.fbe_size();
if ((fbe_current_size + f125.fbe_size()) > fbe_struct_size)
return true;
if (!f125.verify())
return false;
fbe_current_size += f125.fbe_size();
if ((fbe_current_size + f126.fbe_size()) > fbe_struct_size)
return true;
if (!f126.verify())
return false;
fbe_current_size += f126.fbe_size();
if ((fbe_current_size + f127.fbe_size()) > fbe_struct_size)
return true;
if (!f127.verify())
return false;
fbe_current_size += f127.fbe_size();
if ((fbe_current_size + f128.fbe_size()) > fbe_struct_size)
return true;
if (!f128.verify())
return false;
fbe_current_size += f128.fbe_size();
if ((fbe_current_size + f129.fbe_size()) > fbe_struct_size)
return true;
if (!f129.verify())
return false;
fbe_current_size += f129.fbe_size();
if ((fbe_current_size + f130.fbe_size()) > fbe_struct_size)
return true;
if (!f130.verify())
return false;
fbe_current_size += f130.fbe_size();
if ((fbe_current_size + f131.fbe_size()) > fbe_struct_size)
return true;
if (!f131.verify())
return false;
fbe_current_size += f131.fbe_size();
if ((fbe_current_size + f132.fbe_size()) > fbe_struct_size)
return true;
if (!f132.verify())
return false;
fbe_current_size += f132.fbe_size();
if ((fbe_current_size + f133.fbe_size()) > fbe_struct_size)
return true;
if (!f133.verify())
return false;
fbe_current_size += f133.fbe_size();
if ((fbe_current_size + f134.fbe_size()) > fbe_struct_size)
return true;
if (!f134.verify())
return false;
fbe_current_size += f134.fbe_size();
if ((fbe_current_size + f135.fbe_size()) > fbe_struct_size)
return true;
if (!f135.verify())
return false;
fbe_current_size += f135.fbe_size();
if ((fbe_current_size + f136.fbe_size()) > fbe_struct_size)
return true;
if (!f136.verify())
return false;
fbe_current_size += f136.fbe_size();
if ((fbe_current_size + f137.fbe_size()) > fbe_struct_size)
return true;
if (!f137.verify())
return false;
fbe_current_size += f137.fbe_size();
if ((fbe_current_size + f138.fbe_size()) > fbe_struct_size)
return true;
if (!f138.verify())
return false;
fbe_current_size += f138.fbe_size();
if ((fbe_current_size + f139.fbe_size()) > fbe_struct_size)
return true;
if (!f139.verify())
return false;
fbe_current_size += f139.fbe_size();
if ((fbe_current_size + f140.fbe_size()) > fbe_struct_size)
return true;
if (!f140.verify())
return false;
fbe_current_size += f140.fbe_size();
if ((fbe_current_size + f141.fbe_size()) > fbe_struct_size)
return true;
if (!f141.verify())
return false;
fbe_current_size += f141.fbe_size();
if ((fbe_current_size + f142.fbe_size()) > fbe_struct_size)
return true;
if (!f142.verify())
return false;
fbe_current_size += f142.fbe_size();
if ((fbe_current_size + f143.fbe_size()) > fbe_struct_size)
return true;
if (!f143.verify())
return false;
fbe_current_size += f143.fbe_size();
if ((fbe_current_size + f144.fbe_size()) > fbe_struct_size)
return true;
if (!f144.verify())
return false;
fbe_current_size += f144.fbe_size();
if ((fbe_current_size + f145.fbe_size()) > fbe_struct_size)
return true;
if (!f145.verify())
return false;
fbe_current_size += f145.fbe_size();
if ((fbe_current_size + f146.fbe_size()) > fbe_struct_size)
return true;
if (!f146.verify())
return false;
fbe_current_size += f146.fbe_size();
if ((fbe_current_size + f147.fbe_size()) > fbe_struct_size)
return true;
if (!f147.verify())
return false;
fbe_current_size += f147.fbe_size();
if ((fbe_current_size + f148.fbe_size()) > fbe_struct_size)
return true;
if (!f148.verify())
return false;
fbe_current_size += f148.fbe_size();
if ((fbe_current_size + f149.fbe_size()) > fbe_struct_size)
return true;
if (!f149.verify())
return false;
fbe_current_size += f149.fbe_size();
if ((fbe_current_size + f150.fbe_size()) > fbe_struct_size)
return true;
if (!f150.verify())
return false;
fbe_current_size += f150.fbe_size();
if ((fbe_current_size + f151.fbe_size()) > fbe_struct_size)
return true;
if (!f151.verify())
return false;
fbe_current_size += f151.fbe_size();
if ((fbe_current_size + f152.fbe_size()) > fbe_struct_size)
return true;
if (!f152.verify())
return false;
fbe_current_size += f152.fbe_size();
if ((fbe_current_size + f153.fbe_size()) > fbe_struct_size)
return true;
if (!f153.verify())
return false;
fbe_current_size += f153.fbe_size();
if ((fbe_current_size + f154.fbe_size()) > fbe_struct_size)
return true;
if (!f154.verify())
return false;
fbe_current_size += f154.fbe_size();
if ((fbe_current_size + f155.fbe_size()) > fbe_struct_size)
return true;
if (!f155.verify())
return false;
fbe_current_size += f155.fbe_size();
if ((fbe_current_size + f156.fbe_size()) > fbe_struct_size)
return true;
if (!f156.verify())
return false;
fbe_current_size += f156.fbe_size();
if ((fbe_current_size + f157.fbe_size()) > fbe_struct_size)
return true;
if (!f157.verify())
return false;
fbe_current_size += f157.fbe_size();
if ((fbe_current_size + f158.fbe_size()) > fbe_struct_size)
return true;
if (!f158.verify())
return false;
fbe_current_size += f158.fbe_size();
if ((fbe_current_size + f159.fbe_size()) > fbe_struct_size)
return true;
if (!f159.verify())
return false;
fbe_current_size += f159.fbe_size();
if ((fbe_current_size + f160.fbe_size()) > fbe_struct_size)
return true;
if (!f160.verify())
return false;
fbe_current_size += f160.fbe_size();
if ((fbe_current_size + f161.fbe_size()) > fbe_struct_size)
return true;
if (!f161.verify())
return false;
fbe_current_size += f161.fbe_size();
if ((fbe_current_size + f162.fbe_size()) > fbe_struct_size)
return true;
if (!f162.verify())
return false;
fbe_current_size += f162.fbe_size();
if ((fbe_current_size + f163.fbe_size()) > fbe_struct_size)
return true;
if (!f163.verify())
return false;
fbe_current_size += f163.fbe_size();
if ((fbe_current_size + f164.fbe_size()) > fbe_struct_size)
return true;
if (!f164.verify())
return false;
fbe_current_size += f164.fbe_size();
if ((fbe_current_size + f165.fbe_size()) > fbe_struct_size)
return true;
if (!f165.verify())
return false;
fbe_current_size += f165.fbe_size();
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructOptional& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructOptional& fbe_value, size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + parent.fbe_body() - 4 - 4) <= fbe_struct_size)
parent.get_fields(fbe_value, fbe_struct_size);
fbe_current_size += parent.fbe_body() - 4 - 4;
if ((fbe_current_size + f100.fbe_size()) <= fbe_struct_size)
f100.get(fbe_value.f100);
else
fbe_value.f100 = std::nullopt;
fbe_current_size += f100.fbe_size();
if ((fbe_current_size + f101.fbe_size()) <= fbe_struct_size)
f101.get(fbe_value.f101, true);
else
fbe_value.f101 = true;
fbe_current_size += f101.fbe_size();
if ((fbe_current_size + f102.fbe_size()) <= fbe_struct_size)
f102.get(fbe_value.f102, std::nullopt);
else
fbe_value.f102 = std::nullopt;
fbe_current_size += f102.fbe_size();
if ((fbe_current_size + f103.fbe_size()) <= fbe_struct_size)
f103.get(fbe_value.f103);
else
fbe_value.f103 = std::nullopt;
fbe_current_size += f103.fbe_size();
if ((fbe_current_size + f104.fbe_size()) <= fbe_struct_size)
f104.get(fbe_value.f104, (uint8_t)255u);
else
fbe_value.f104 = (uint8_t)255u;
fbe_current_size += f104.fbe_size();
if ((fbe_current_size + f105.fbe_size()) <= fbe_struct_size)
f105.get(fbe_value.f105, std::nullopt);
else
fbe_value.f105 = std::nullopt;
fbe_current_size += f105.fbe_size();
if ((fbe_current_size + f106.fbe_size()) <= fbe_struct_size)
f106.get(fbe_value.f106);
else
fbe_value.f106 = std::nullopt;
fbe_current_size += f106.fbe_size();
if ((fbe_current_size + f107.fbe_size()) <= fbe_struct_size)
f107.get(fbe_value.f107, (char)'!');
else
fbe_value.f107 = (char)'!';
fbe_current_size += f107.fbe_size();
if ((fbe_current_size + f108.fbe_size()) <= fbe_struct_size)
f108.get(fbe_value.f108, std::nullopt);
else
fbe_value.f108 = std::nullopt;
fbe_current_size += f108.fbe_size();
if ((fbe_current_size + f109.fbe_size()) <= fbe_struct_size)
f109.get(fbe_value.f109);
else
fbe_value.f109 = std::nullopt;
fbe_current_size += f109.fbe_size();
if ((fbe_current_size + f110.fbe_size()) <= fbe_struct_size)
f110.get(fbe_value.f110, (wchar_t)0x0444);
else
fbe_value.f110 = (wchar_t)0x0444;
fbe_current_size += f110.fbe_size();
if ((fbe_current_size + f111.fbe_size()) <= fbe_struct_size)
f111.get(fbe_value.f111, std::nullopt);
else
fbe_value.f111 = std::nullopt;
fbe_current_size += f111.fbe_size();
if ((fbe_current_size + f112.fbe_size()) <= fbe_struct_size)
f112.get(fbe_value.f112);
else
fbe_value.f112 = std::nullopt;
fbe_current_size += f112.fbe_size();
if ((fbe_current_size + f113.fbe_size()) <= fbe_struct_size)
f113.get(fbe_value.f113, (int8_t)127);
else
fbe_value.f113 = (int8_t)127;
fbe_current_size += f113.fbe_size();
if ((fbe_current_size + f114.fbe_size()) <= fbe_struct_size)
f114.get(fbe_value.f114, std::nullopt);
else
fbe_value.f114 = std::nullopt;
fbe_current_size += f114.fbe_size();
if ((fbe_current_size + f115.fbe_size()) <= fbe_struct_size)
f115.get(fbe_value.f115);
else
fbe_value.f115 = std::nullopt;
fbe_current_size += f115.fbe_size();
if ((fbe_current_size + f116.fbe_size()) <= fbe_struct_size)
f116.get(fbe_value.f116, (uint8_t)255u);
else
fbe_value.f116 = (uint8_t)255u;
fbe_current_size += f116.fbe_size();
if ((fbe_current_size + f117.fbe_size()) <= fbe_struct_size)
f117.get(fbe_value.f117, std::nullopt);
else
fbe_value.f117 = std::nullopt;
fbe_current_size += f117.fbe_size();
if ((fbe_current_size + f118.fbe_size()) <= fbe_struct_size)
f118.get(fbe_value.f118);
else
fbe_value.f118 = std::nullopt;
fbe_current_size += f118.fbe_size();
if ((fbe_current_size + f119.fbe_size()) <= fbe_struct_size)
f119.get(fbe_value.f119, (int16_t)32767);
else
fbe_value.f119 = (int16_t)32767;
fbe_current_size += f119.fbe_size();
if ((fbe_current_size + f120.fbe_size()) <= fbe_struct_size)
f120.get(fbe_value.f120, std::nullopt);
else
fbe_value.f120 = std::nullopt;
fbe_current_size += f120.fbe_size();
if ((fbe_current_size + f121.fbe_size()) <= fbe_struct_size)
f121.get(fbe_value.f121);
else
fbe_value.f121 = std::nullopt;
fbe_current_size += f121.fbe_size();
if ((fbe_current_size + f122.fbe_size()) <= fbe_struct_size)
f122.get(fbe_value.f122, (uint16_t)65535u);
else
fbe_value.f122 = (uint16_t)65535u;
fbe_current_size += f122.fbe_size();
if ((fbe_current_size + f123.fbe_size()) <= fbe_struct_size)
f123.get(fbe_value.f123, std::nullopt);
else
fbe_value.f123 = std::nullopt;
fbe_current_size += f123.fbe_size();
if ((fbe_current_size + f124.fbe_size()) <= fbe_struct_size)
f124.get(fbe_value.f124);
else
fbe_value.f124 = std::nullopt;
fbe_current_size += f124.fbe_size();
if ((fbe_current_size + f125.fbe_size()) <= fbe_struct_size)
f125.get(fbe_value.f125, (int32_t)2147483647ll);
else
fbe_value.f125 = (int32_t)2147483647ll;
fbe_current_size += f125.fbe_size();
if ((fbe_current_size + f126.fbe_size()) <= fbe_struct_size)
f126.get(fbe_value.f126, std::nullopt);
else
fbe_value.f126 = std::nullopt;
fbe_current_size += f126.fbe_size();
if ((fbe_current_size + f127.fbe_size()) <= fbe_struct_size)
f127.get(fbe_value.f127);
else
fbe_value.f127 = std::nullopt;
fbe_current_size += f127.fbe_size();
if ((fbe_current_size + f128.fbe_size()) <= fbe_struct_size)
f128.get(fbe_value.f128, (uint32_t)4294967295ull);
else
fbe_value.f128 = (uint32_t)4294967295ull;
fbe_current_size += f128.fbe_size();
if ((fbe_current_size + f129.fbe_size()) <= fbe_struct_size)
f129.get(fbe_value.f129, std::nullopt);
else
fbe_value.f129 = std::nullopt;
fbe_current_size += f129.fbe_size();
if ((fbe_current_size + f130.fbe_size()) <= fbe_struct_size)
f130.get(fbe_value.f130);
else
fbe_value.f130 = std::nullopt;
fbe_current_size += f130.fbe_size();
if ((fbe_current_size + f131.fbe_size()) <= fbe_struct_size)
f131.get(fbe_value.f131, (int64_t)9223372036854775807ll);
else
fbe_value.f131 = (int64_t)9223372036854775807ll;
fbe_current_size += f131.fbe_size();
if ((fbe_current_size + f132.fbe_size()) <= fbe_struct_size)
f132.get(fbe_value.f132, std::nullopt);
else
fbe_value.f132 = std::nullopt;
fbe_current_size += f132.fbe_size();
if ((fbe_current_size + f133.fbe_size()) <= fbe_struct_size)
f133.get(fbe_value.f133);
else
fbe_value.f133 = std::nullopt;
fbe_current_size += f133.fbe_size();
if ((fbe_current_size + f134.fbe_size()) <= fbe_struct_size)
f134.get(fbe_value.f134, (uint64_t)18446744073709551615ull);
else
fbe_value.f134 = (uint64_t)18446744073709551615ull;
fbe_current_size += f134.fbe_size();
if ((fbe_current_size + f135.fbe_size()) <= fbe_struct_size)
f135.get(fbe_value.f135, std::nullopt);
else
fbe_value.f135 = std::nullopt;
fbe_current_size += f135.fbe_size();
if ((fbe_current_size + f136.fbe_size()) <= fbe_struct_size)
f136.get(fbe_value.f136);
else
fbe_value.f136 = std::nullopt;
fbe_current_size += f136.fbe_size();
if ((fbe_current_size + f137.fbe_size()) <= fbe_struct_size)
f137.get(fbe_value.f137, (float)123.456f);
else
fbe_value.f137 = (float)123.456f;
fbe_current_size += f137.fbe_size();
if ((fbe_current_size + f138.fbe_size()) <= fbe_struct_size)
f138.get(fbe_value.f138, std::nullopt);
else
fbe_value.f138 = std::nullopt;
fbe_current_size += f138.fbe_size();
if ((fbe_current_size + f139.fbe_size()) <= fbe_struct_size)
f139.get(fbe_value.f139);
else
fbe_value.f139 = std::nullopt;
fbe_current_size += f139.fbe_size();
if ((fbe_current_size + f140.fbe_size()) <= fbe_struct_size)
f140.get(fbe_value.f140, (double)-123.456e+123);
else
fbe_value.f140 = (double)-123.456e+123;
fbe_current_size += f140.fbe_size();
if ((fbe_current_size + f141.fbe_size()) <= fbe_struct_size)
f141.get(fbe_value.f141, std::nullopt);
else
fbe_value.f141 = std::nullopt;
fbe_current_size += f141.fbe_size();
if ((fbe_current_size + f142.fbe_size()) <= fbe_struct_size)
f142.get(fbe_value.f142);
else
fbe_value.f142 = std::nullopt;
fbe_current_size += f142.fbe_size();
if ((fbe_current_size + f143.fbe_size()) <= fbe_struct_size)
f143.get(fbe_value.f143, FBE::decimal_t(123456.123456));
else
fbe_value.f143 = FBE::decimal_t(123456.123456);
fbe_current_size += f143.fbe_size();
if ((fbe_current_size + f144.fbe_size()) <= fbe_struct_size)
f144.get(fbe_value.f144, std::nullopt);
else
fbe_value.f144 = std::nullopt;
fbe_current_size += f144.fbe_size();
if ((fbe_current_size + f145.fbe_size()) <= fbe_struct_size)
f145.get(fbe_value.f145);
else
fbe_value.f145 = std::nullopt;
fbe_current_size += f145.fbe_size();
if ((fbe_current_size + f146.fbe_size()) <= fbe_struct_size)
f146.get(fbe_value.f146, "Initial string!");
else
fbe_value.f146 = "Initial string!";
fbe_current_size += f146.fbe_size();
if ((fbe_current_size + f147.fbe_size()) <= fbe_struct_size)
f147.get(fbe_value.f147, std::nullopt);
else
fbe_value.f147 = std::nullopt;
fbe_current_size += f147.fbe_size();
if ((fbe_current_size + f148.fbe_size()) <= fbe_struct_size)
f148.get(fbe_value.f148);
else
fbe_value.f148 = std::nullopt;
fbe_current_size += f148.fbe_size();
if ((fbe_current_size + f149.fbe_size()) <= fbe_struct_size)
f149.get(fbe_value.f149, FBE::utc());
else
fbe_value.f149 = FBE::utc();
fbe_current_size += f149.fbe_size();
if ((fbe_current_size + f150.fbe_size()) <= fbe_struct_size)
f150.get(fbe_value.f150, std::nullopt);
else
fbe_value.f150 = std::nullopt;
fbe_current_size += f150.fbe_size();
if ((fbe_current_size + f151.fbe_size()) <= fbe_struct_size)
f151.get(fbe_value.f151);
else
fbe_value.f151 = std::nullopt;
fbe_current_size += f151.fbe_size();
if ((fbe_current_size + f152.fbe_size()) <= fbe_struct_size)
f152.get(fbe_value.f152, FBE::uuid_t("123e4567-e89b-12d3-a456-426655440000"));
else
fbe_value.f152 = FBE::uuid_t("123e4567-e89b-12d3-a456-426655440000");
fbe_current_size += f152.fbe_size();
if ((fbe_current_size + f153.fbe_size()) <= fbe_struct_size)
f153.get(fbe_value.f153, std::nullopt);
else
fbe_value.f153 = std::nullopt;
fbe_current_size += f153.fbe_size();
if ((fbe_current_size + f154.fbe_size()) <= fbe_struct_size)
f154.get(fbe_value.f154);
else
fbe_value.f154 = std::nullopt;
fbe_current_size += f154.fbe_size();
if ((fbe_current_size + f155.fbe_size()) <= fbe_struct_size)
f155.get(fbe_value.f155, std::nullopt);
else
fbe_value.f155 = std::nullopt;
fbe_current_size += f155.fbe_size();
if ((fbe_current_size + f156.fbe_size()) <= fbe_struct_size)
f156.get(fbe_value.f156);
else
fbe_value.f156 = std::nullopt;
fbe_current_size += f156.fbe_size();
if ((fbe_current_size + f157.fbe_size()) <= fbe_struct_size)
f157.get(fbe_value.f157, std::nullopt);
else
fbe_value.f157 = std::nullopt;
fbe_current_size += f157.fbe_size();
if ((fbe_current_size + f158.fbe_size()) <= fbe_struct_size)
f158.get(fbe_value.f158);
else
fbe_value.f158 = std::nullopt;
fbe_current_size += f158.fbe_size();
if ((fbe_current_size + f159.fbe_size()) <= fbe_struct_size)
f159.get(fbe_value.f159, std::nullopt);
else
fbe_value.f159 = std::nullopt;
fbe_current_size += f159.fbe_size();
if ((fbe_current_size + f160.fbe_size()) <= fbe_struct_size)
f160.get(fbe_value.f160);
else
fbe_value.f160 = std::nullopt;
fbe_current_size += f160.fbe_size();
if ((fbe_current_size + f161.fbe_size()) <= fbe_struct_size)
f161.get(fbe_value.f161, std::nullopt);
else
fbe_value.f161 = std::nullopt;
fbe_current_size += f161.fbe_size();
if ((fbe_current_size + f162.fbe_size()) <= fbe_struct_size)
f162.get(fbe_value.f162);
else
fbe_value.f162 = std::nullopt;
fbe_current_size += f162.fbe_size();
if ((fbe_current_size + f163.fbe_size()) <= fbe_struct_size)
f163.get(fbe_value.f163, std::nullopt);
else
fbe_value.f163 = std::nullopt;
fbe_current_size += f163.fbe_size();
if ((fbe_current_size + f164.fbe_size()) <= fbe_struct_size)
f164.get(fbe_value.f164);
else
fbe_value.f164 = std::nullopt;
fbe_current_size += f164.fbe_size();
if ((fbe_current_size + f165.fbe_size()) <= fbe_struct_size)
f165.get(fbe_value.f165, std::nullopt);
else
fbe_value.f165 = std::nullopt;
fbe_current_size += f165.fbe_size();
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructOptional& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructOptional& fbe_value) noexcept
{
parent.set_fields(fbe_value);
f100.set(fbe_value.f100);
f101.set(fbe_value.f101);
f102.set(fbe_value.f102);
f103.set(fbe_value.f103);
f104.set(fbe_value.f104);
f105.set(fbe_value.f105);
f106.set(fbe_value.f106);
f107.set(fbe_value.f107);
f108.set(fbe_value.f108);
f109.set(fbe_value.f109);
f110.set(fbe_value.f110);
f111.set(fbe_value.f111);
f112.set(fbe_value.f112);
f113.set(fbe_value.f113);
f114.set(fbe_value.f114);
f115.set(fbe_value.f115);
f116.set(fbe_value.f116);
f117.set(fbe_value.f117);
f118.set(fbe_value.f118);
f119.set(fbe_value.f119);
f120.set(fbe_value.f120);
f121.set(fbe_value.f121);
f122.set(fbe_value.f122);
f123.set(fbe_value.f123);
f124.set(fbe_value.f124);
f125.set(fbe_value.f125);
f126.set(fbe_value.f126);
f127.set(fbe_value.f127);
f128.set(fbe_value.f128);
f129.set(fbe_value.f129);
f130.set(fbe_value.f130);
f131.set(fbe_value.f131);
f132.set(fbe_value.f132);
f133.set(fbe_value.f133);
f134.set(fbe_value.f134);
f135.set(fbe_value.f135);
f136.set(fbe_value.f136);
f137.set(fbe_value.f137);
f138.set(fbe_value.f138);
f139.set(fbe_value.f139);
f140.set(fbe_value.f140);
f141.set(fbe_value.f141);
f142.set(fbe_value.f142);
f143.set(fbe_value.f143);
f144.set(fbe_value.f144);
f145.set(fbe_value.f145);
f146.set(fbe_value.f146);
f147.set(fbe_value.f147);
f148.set(fbe_value.f148);
f149.set(fbe_value.f149);
f150.set(fbe_value.f150);
f151.set(fbe_value.f151);
f152.set(fbe_value.f152);
f153.set(fbe_value.f153);
f154.set(fbe_value.f154);
f155.set(fbe_value.f155);
f156.set(fbe_value.f156);
f157.set(fbe_value.f157);
f158.set(fbe_value.f158);
f159.set(fbe_value.f159);
f160.set(fbe_value.f160);
f161.set(fbe_value.f161);
f162.set(fbe_value.f162);
f163.set(fbe_value.f163);
f164.set(fbe_value.f164);
f165.set(fbe_value.f165);
}
private:
TBuffer& _buffer;
size_t _offset;
public:
FieldModel<TBuffer, ::test::StructSimple> parent;
FieldModel<TBuffer, std::optional<bool>> f100;
FieldModel<TBuffer, std::optional<bool>> f101;
FieldModel<TBuffer, std::optional<bool>> f102;
FieldModel<TBuffer, std::optional<uint8_t>> f103;
FieldModel<TBuffer, std::optional<uint8_t>> f104;
FieldModel<TBuffer, std::optional<uint8_t>> f105;
FieldModel<TBuffer, std::optional<char>> f106;
FieldModel<TBuffer, std::optional<char>> f107;
FieldModel<TBuffer, std::optional<char>> f108;
FieldModel<TBuffer, std::optional<wchar_t>> f109;
FieldModel<TBuffer, std::optional<wchar_t>> f110;
FieldModel<TBuffer, std::optional<wchar_t>> f111;
FieldModel<TBuffer, std::optional<int8_t>> f112;
FieldModel<TBuffer, std::optional<int8_t>> f113;
FieldModel<TBuffer, std::optional<int8_t>> f114;
FieldModel<TBuffer, std::optional<uint8_t>> f115;
FieldModel<TBuffer, std::optional<uint8_t>> f116;
FieldModel<TBuffer, std::optional<uint8_t>> f117;
FieldModel<TBuffer, std::optional<int16_t>> f118;
FieldModel<TBuffer, std::optional<int16_t>> f119;
FieldModel<TBuffer, std::optional<int16_t>> f120;
FieldModel<TBuffer, std::optional<uint16_t>> f121;
FieldModel<TBuffer, std::optional<uint16_t>> f122;
FieldModel<TBuffer, std::optional<uint16_t>> f123;
FieldModel<TBuffer, std::optional<int32_t>> f124;
FieldModel<TBuffer, std::optional<int32_t>> f125;
FieldModel<TBuffer, std::optional<int32_t>> f126;
FieldModel<TBuffer, std::optional<uint32_t>> f127;
FieldModel<TBuffer, std::optional<uint32_t>> f128;
FieldModel<TBuffer, std::optional<uint32_t>> f129;
FieldModel<TBuffer, std::optional<int64_t>> f130;
FieldModel<TBuffer, std::optional<int64_t>> f131;
FieldModel<TBuffer, std::optional<int64_t>> f132;
FieldModel<TBuffer, std::optional<uint64_t>> f133;
FieldModel<TBuffer, std::optional<uint64_t>> f134;
FieldModel<TBuffer, std::optional<uint64_t>> f135;
FieldModel<TBuffer, std::optional<float>> f136;
FieldModel<TBuffer, std::optional<float>> f137;
FieldModel<TBuffer, std::optional<float>> f138;
FieldModel<TBuffer, std::optional<double>> f139;
FieldModel<TBuffer, std::optional<double>> f140;
FieldModel<TBuffer, std::optional<double>> f141;
FieldModel<TBuffer, std::optional<FBE::decimal_t>> f142;
FieldModel<TBuffer, std::optional<FBE::decimal_t>> f143;
FieldModel<TBuffer, std::optional<FBE::decimal_t>> f144;
FieldModel<TBuffer, std::optional<std::string>> f145;
FieldModel<TBuffer, std::optional<std::string>> f146;
FieldModel<TBuffer, std::optional<std::string>> f147;
FieldModel<TBuffer, std::optional<uint64_t>> f148;
FieldModel<TBuffer, std::optional<uint64_t>> f149;
FieldModel<TBuffer, std::optional<uint64_t>> f150;
FieldModel<TBuffer, std::optional<FBE::uuid_t>> f151;
FieldModel<TBuffer, std::optional<FBE::uuid_t>> f152;
FieldModel<TBuffer, std::optional<FBE::uuid_t>> f153;
FieldModel<TBuffer, std::optional<::proto::OrderSide>> f154;
FieldModel<TBuffer, std::optional<::proto::OrderSide>> f155;
FieldModel<TBuffer, std::optional<::proto::OrderType>> f156;
FieldModel<TBuffer, std::optional<::proto::OrderType>> f157;
FieldModel<TBuffer, std::optional<::proto::Order>> f158;
FieldModel<TBuffer, std::optional<::proto::Order>> f159;
FieldModel<TBuffer, std::optional<::proto::Balance>> f160;
FieldModel<TBuffer, std::optional<::proto::Balance>> f161;
FieldModel<TBuffer, std::optional<::proto::State>> f162;
FieldModel<TBuffer, std::optional<::proto::State>> f163;
FieldModel<TBuffer, std::optional<::proto::Account>> f164;
FieldModel<TBuffer, std::optional<::proto::Account>> f165;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructOptional model
template <class TBuffer>
class StructOptionalModel : public FBE::Model<TBuffer>
{
public:
StructOptionalModel() : model(this->buffer(), 4) {}
StructOptionalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructOptional>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructOptional& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructOptional& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructOptional> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructOptional final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructOptional>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, parent(buffer, 0)
, f100(buffer, 0)
, f101(buffer, 0)
, f102(buffer, 0)
, f103(buffer, 0)
, f104(buffer, 0)
, f105(buffer, 0)
, f106(buffer, 0)
, f107(buffer, 0)
, f108(buffer, 0)
, f109(buffer, 0)
, f110(buffer, 0)
, f111(buffer, 0)
, f112(buffer, 0)
, f113(buffer, 0)
, f114(buffer, 0)
, f115(buffer, 0)
, f116(buffer, 0)
, f117(buffer, 0)
, f118(buffer, 0)
, f119(buffer, 0)
, f120(buffer, 0)
, f121(buffer, 0)
, f122(buffer, 0)
, f123(buffer, 0)
, f124(buffer, 0)
, f125(buffer, 0)
, f126(buffer, 0)
, f127(buffer, 0)
, f128(buffer, 0)
, f129(buffer, 0)
, f130(buffer, 0)
, f131(buffer, 0)
, f132(buffer, 0)
, f133(buffer, 0)
, f134(buffer, 0)
, f135(buffer, 0)
, f136(buffer, 0)
, f137(buffer, 0)
, f138(buffer, 0)
, f139(buffer, 0)
, f140(buffer, 0)
, f141(buffer, 0)
, f142(buffer, 0)
, f143(buffer, 0)
, f144(buffer, 0)
, f145(buffer, 0)
, f146(buffer, 0)
, f147(buffer, 0)
, f148(buffer, 0)
, f149(buffer, 0)
, f150(buffer, 0)
, f151(buffer, 0)
, f152(buffer, 0)
, f153(buffer, 0)
, f154(buffer, 0)
, f155(buffer, 0)
, f156(buffer, 0)
, f157(buffer, 0)
, f158(buffer, 0)
, f159(buffer, 0)
, f160(buffer, 0)
, f161(buffer, 0)
, f162(buffer, 0)
, f163(buffer, 0)
, f164(buffer, 0)
, f165(buffer, 0)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructOptional& fbe_value) const noexcept
{
size_t fbe_result = 0
+ parent.fbe_allocation_size(fbe_value)
+ f100.fbe_allocation_size(fbe_value.f100)
+ f101.fbe_allocation_size(fbe_value.f101)
+ f102.fbe_allocation_size(fbe_value.f102)
+ f103.fbe_allocation_size(fbe_value.f103)
+ f104.fbe_allocation_size(fbe_value.f104)
+ f105.fbe_allocation_size(fbe_value.f105)
+ f106.fbe_allocation_size(fbe_value.f106)
+ f107.fbe_allocation_size(fbe_value.f107)
+ f108.fbe_allocation_size(fbe_value.f108)
+ f109.fbe_allocation_size(fbe_value.f109)
+ f110.fbe_allocation_size(fbe_value.f110)
+ f111.fbe_allocation_size(fbe_value.f111)
+ f112.fbe_allocation_size(fbe_value.f112)
+ f113.fbe_allocation_size(fbe_value.f113)
+ f114.fbe_allocation_size(fbe_value.f114)
+ f115.fbe_allocation_size(fbe_value.f115)
+ f116.fbe_allocation_size(fbe_value.f116)
+ f117.fbe_allocation_size(fbe_value.f117)
+ f118.fbe_allocation_size(fbe_value.f118)
+ f119.fbe_allocation_size(fbe_value.f119)
+ f120.fbe_allocation_size(fbe_value.f120)
+ f121.fbe_allocation_size(fbe_value.f121)
+ f122.fbe_allocation_size(fbe_value.f122)
+ f123.fbe_allocation_size(fbe_value.f123)
+ f124.fbe_allocation_size(fbe_value.f124)
+ f125.fbe_allocation_size(fbe_value.f125)
+ f126.fbe_allocation_size(fbe_value.f126)
+ f127.fbe_allocation_size(fbe_value.f127)
+ f128.fbe_allocation_size(fbe_value.f128)
+ f129.fbe_allocation_size(fbe_value.f129)
+ f130.fbe_allocation_size(fbe_value.f130)
+ f131.fbe_allocation_size(fbe_value.f131)
+ f132.fbe_allocation_size(fbe_value.f132)
+ f133.fbe_allocation_size(fbe_value.f133)
+ f134.fbe_allocation_size(fbe_value.f134)
+ f135.fbe_allocation_size(fbe_value.f135)
+ f136.fbe_allocation_size(fbe_value.f136)
+ f137.fbe_allocation_size(fbe_value.f137)
+ f138.fbe_allocation_size(fbe_value.f138)
+ f139.fbe_allocation_size(fbe_value.f139)
+ f140.fbe_allocation_size(fbe_value.f140)
+ f141.fbe_allocation_size(fbe_value.f141)
+ f142.fbe_allocation_size(fbe_value.f142)
+ f143.fbe_allocation_size(fbe_value.f143)
+ f144.fbe_allocation_size(fbe_value.f144)
+ f145.fbe_allocation_size(fbe_value.f145)
+ f146.fbe_allocation_size(fbe_value.f146)
+ f147.fbe_allocation_size(fbe_value.f147)
+ f148.fbe_allocation_size(fbe_value.f148)
+ f149.fbe_allocation_size(fbe_value.f149)
+ f150.fbe_allocation_size(fbe_value.f150)
+ f151.fbe_allocation_size(fbe_value.f151)
+ f152.fbe_allocation_size(fbe_value.f152)
+ f153.fbe_allocation_size(fbe_value.f153)
+ f154.fbe_allocation_size(fbe_value.f154)
+ f155.fbe_allocation_size(fbe_value.f155)
+ f156.fbe_allocation_size(fbe_value.f156)
+ f157.fbe_allocation_size(fbe_value.f157)
+ f158.fbe_allocation_size(fbe_value.f158)
+ f159.fbe_allocation_size(fbe_value.f159)
+ f160.fbe_allocation_size(fbe_value.f160)
+ f161.fbe_allocation_size(fbe_value.f161)
+ f162.fbe_allocation_size(fbe_value.f162)
+ f163.fbe_allocation_size(fbe_value.f163)
+ f164.fbe_allocation_size(fbe_value.f164)
+ f165.fbe_allocation_size(fbe_value.f165)
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 111; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_field_size;
parent.fbe_offset(fbe_current_offset);
fbe_field_size = parent.verify_fields();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f100.fbe_offset(fbe_current_offset);
fbe_field_size = f100.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f101.fbe_offset(fbe_current_offset);
fbe_field_size = f101.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f102.fbe_offset(fbe_current_offset);
fbe_field_size = f102.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f103.fbe_offset(fbe_current_offset);
fbe_field_size = f103.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f104.fbe_offset(fbe_current_offset);
fbe_field_size = f104.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f105.fbe_offset(fbe_current_offset);
fbe_field_size = f105.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f106.fbe_offset(fbe_current_offset);
fbe_field_size = f106.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f107.fbe_offset(fbe_current_offset);
fbe_field_size = f107.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f108.fbe_offset(fbe_current_offset);
fbe_field_size = f108.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f109.fbe_offset(fbe_current_offset);
fbe_field_size = f109.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f110.fbe_offset(fbe_current_offset);
fbe_field_size = f110.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f111.fbe_offset(fbe_current_offset);
fbe_field_size = f111.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f112.fbe_offset(fbe_current_offset);
fbe_field_size = f112.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f113.fbe_offset(fbe_current_offset);
fbe_field_size = f113.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f114.fbe_offset(fbe_current_offset);
fbe_field_size = f114.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f115.fbe_offset(fbe_current_offset);
fbe_field_size = f115.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f116.fbe_offset(fbe_current_offset);
fbe_field_size = f116.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f117.fbe_offset(fbe_current_offset);
fbe_field_size = f117.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f118.fbe_offset(fbe_current_offset);
fbe_field_size = f118.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f119.fbe_offset(fbe_current_offset);
fbe_field_size = f119.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f120.fbe_offset(fbe_current_offset);
fbe_field_size = f120.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f121.fbe_offset(fbe_current_offset);
fbe_field_size = f121.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f122.fbe_offset(fbe_current_offset);
fbe_field_size = f122.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f123.fbe_offset(fbe_current_offset);
fbe_field_size = f123.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f124.fbe_offset(fbe_current_offset);
fbe_field_size = f124.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f125.fbe_offset(fbe_current_offset);
fbe_field_size = f125.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f126.fbe_offset(fbe_current_offset);
fbe_field_size = f126.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f127.fbe_offset(fbe_current_offset);
fbe_field_size = f127.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f128.fbe_offset(fbe_current_offset);
fbe_field_size = f128.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f129.fbe_offset(fbe_current_offset);
fbe_field_size = f129.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f130.fbe_offset(fbe_current_offset);
fbe_field_size = f130.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f131.fbe_offset(fbe_current_offset);
fbe_field_size = f131.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f132.fbe_offset(fbe_current_offset);
fbe_field_size = f132.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f133.fbe_offset(fbe_current_offset);
fbe_field_size = f133.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f134.fbe_offset(fbe_current_offset);
fbe_field_size = f134.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f135.fbe_offset(fbe_current_offset);
fbe_field_size = f135.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f136.fbe_offset(fbe_current_offset);
fbe_field_size = f136.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f137.fbe_offset(fbe_current_offset);
fbe_field_size = f137.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f138.fbe_offset(fbe_current_offset);
fbe_field_size = f138.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f139.fbe_offset(fbe_current_offset);
fbe_field_size = f139.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f140.fbe_offset(fbe_current_offset);
fbe_field_size = f140.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f141.fbe_offset(fbe_current_offset);
fbe_field_size = f141.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f142.fbe_offset(fbe_current_offset);
fbe_field_size = f142.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f143.fbe_offset(fbe_current_offset);
fbe_field_size = f143.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f144.fbe_offset(fbe_current_offset);
fbe_field_size = f144.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f145.fbe_offset(fbe_current_offset);
fbe_field_size = f145.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f146.fbe_offset(fbe_current_offset);
fbe_field_size = f146.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f147.fbe_offset(fbe_current_offset);
fbe_field_size = f147.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f148.fbe_offset(fbe_current_offset);
fbe_field_size = f148.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f149.fbe_offset(fbe_current_offset);
fbe_field_size = f149.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f150.fbe_offset(fbe_current_offset);
fbe_field_size = f150.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f151.fbe_offset(fbe_current_offset);
fbe_field_size = f151.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f152.fbe_offset(fbe_current_offset);
fbe_field_size = f152.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f153.fbe_offset(fbe_current_offset);
fbe_field_size = f153.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f154.fbe_offset(fbe_current_offset);
fbe_field_size = f154.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f155.fbe_offset(fbe_current_offset);
fbe_field_size = f155.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f156.fbe_offset(fbe_current_offset);
fbe_field_size = f156.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f157.fbe_offset(fbe_current_offset);
fbe_field_size = f157.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f158.fbe_offset(fbe_current_offset);
fbe_field_size = f158.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f159.fbe_offset(fbe_current_offset);
fbe_field_size = f159.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f160.fbe_offset(fbe_current_offset);
fbe_field_size = f160.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f161.fbe_offset(fbe_current_offset);
fbe_field_size = f161.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f162.fbe_offset(fbe_current_offset);
fbe_field_size = f162.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f163.fbe_offset(fbe_current_offset);
fbe_field_size = f163.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f164.fbe_offset(fbe_current_offset);
fbe_field_size = f164.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f165.fbe_offset(fbe_current_offset);
fbe_field_size = f165.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
return fbe_current_offset;
}
// Get the struct value
size_t get(::test::StructOptional& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructOptional& fbe_value) const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
parent.fbe_offset(fbe_current_offset);
fbe_field_size = parent.get_fields(fbe_value);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f100.fbe_offset(fbe_current_offset);
fbe_field_size = f100.get(fbe_value.f100);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f101.fbe_offset(fbe_current_offset);
fbe_field_size = f101.get(fbe_value.f101);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f102.fbe_offset(fbe_current_offset);
fbe_field_size = f102.get(fbe_value.f102);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f103.fbe_offset(fbe_current_offset);
fbe_field_size = f103.get(fbe_value.f103);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f104.fbe_offset(fbe_current_offset);
fbe_field_size = f104.get(fbe_value.f104);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f105.fbe_offset(fbe_current_offset);
fbe_field_size = f105.get(fbe_value.f105);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f106.fbe_offset(fbe_current_offset);
fbe_field_size = f106.get(fbe_value.f106);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f107.fbe_offset(fbe_current_offset);
fbe_field_size = f107.get(fbe_value.f107);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f108.fbe_offset(fbe_current_offset);
fbe_field_size = f108.get(fbe_value.f108);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f109.fbe_offset(fbe_current_offset);
fbe_field_size = f109.get(fbe_value.f109);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f110.fbe_offset(fbe_current_offset);
fbe_field_size = f110.get(fbe_value.f110);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f111.fbe_offset(fbe_current_offset);
fbe_field_size = f111.get(fbe_value.f111);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f112.fbe_offset(fbe_current_offset);
fbe_field_size = f112.get(fbe_value.f112);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f113.fbe_offset(fbe_current_offset);
fbe_field_size = f113.get(fbe_value.f113);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f114.fbe_offset(fbe_current_offset);
fbe_field_size = f114.get(fbe_value.f114);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f115.fbe_offset(fbe_current_offset);
fbe_field_size = f115.get(fbe_value.f115);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f116.fbe_offset(fbe_current_offset);
fbe_field_size = f116.get(fbe_value.f116);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f117.fbe_offset(fbe_current_offset);
fbe_field_size = f117.get(fbe_value.f117);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f118.fbe_offset(fbe_current_offset);
fbe_field_size = f118.get(fbe_value.f118);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f119.fbe_offset(fbe_current_offset);
fbe_field_size = f119.get(fbe_value.f119);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f120.fbe_offset(fbe_current_offset);
fbe_field_size = f120.get(fbe_value.f120);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f121.fbe_offset(fbe_current_offset);
fbe_field_size = f121.get(fbe_value.f121);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f122.fbe_offset(fbe_current_offset);
fbe_field_size = f122.get(fbe_value.f122);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f123.fbe_offset(fbe_current_offset);
fbe_field_size = f123.get(fbe_value.f123);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f124.fbe_offset(fbe_current_offset);
fbe_field_size = f124.get(fbe_value.f124);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f125.fbe_offset(fbe_current_offset);
fbe_field_size = f125.get(fbe_value.f125);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f126.fbe_offset(fbe_current_offset);
fbe_field_size = f126.get(fbe_value.f126);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f127.fbe_offset(fbe_current_offset);
fbe_field_size = f127.get(fbe_value.f127);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f128.fbe_offset(fbe_current_offset);
fbe_field_size = f128.get(fbe_value.f128);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f129.fbe_offset(fbe_current_offset);
fbe_field_size = f129.get(fbe_value.f129);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f130.fbe_offset(fbe_current_offset);
fbe_field_size = f130.get(fbe_value.f130);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f131.fbe_offset(fbe_current_offset);
fbe_field_size = f131.get(fbe_value.f131);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f132.fbe_offset(fbe_current_offset);
fbe_field_size = f132.get(fbe_value.f132);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f133.fbe_offset(fbe_current_offset);
fbe_field_size = f133.get(fbe_value.f133);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f134.fbe_offset(fbe_current_offset);
fbe_field_size = f134.get(fbe_value.f134);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f135.fbe_offset(fbe_current_offset);
fbe_field_size = f135.get(fbe_value.f135);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f136.fbe_offset(fbe_current_offset);
fbe_field_size = f136.get(fbe_value.f136);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f137.fbe_offset(fbe_current_offset);
fbe_field_size = f137.get(fbe_value.f137);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f138.fbe_offset(fbe_current_offset);
fbe_field_size = f138.get(fbe_value.f138);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f139.fbe_offset(fbe_current_offset);
fbe_field_size = f139.get(fbe_value.f139);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f140.fbe_offset(fbe_current_offset);
fbe_field_size = f140.get(fbe_value.f140);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f141.fbe_offset(fbe_current_offset);
fbe_field_size = f141.get(fbe_value.f141);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f142.fbe_offset(fbe_current_offset);
fbe_field_size = f142.get(fbe_value.f142);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f143.fbe_offset(fbe_current_offset);
fbe_field_size = f143.get(fbe_value.f143);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f144.fbe_offset(fbe_current_offset);
fbe_field_size = f144.get(fbe_value.f144);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f145.fbe_offset(fbe_current_offset);
fbe_field_size = f145.get(fbe_value.f145);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f146.fbe_offset(fbe_current_offset);
fbe_field_size = f146.get(fbe_value.f146);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f147.fbe_offset(fbe_current_offset);
fbe_field_size = f147.get(fbe_value.f147);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f148.fbe_offset(fbe_current_offset);
fbe_field_size = f148.get(fbe_value.f148);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f149.fbe_offset(fbe_current_offset);
fbe_field_size = f149.get(fbe_value.f149);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f150.fbe_offset(fbe_current_offset);
fbe_field_size = f150.get(fbe_value.f150);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f151.fbe_offset(fbe_current_offset);
fbe_field_size = f151.get(fbe_value.f151);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f152.fbe_offset(fbe_current_offset);
fbe_field_size = f152.get(fbe_value.f152);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f153.fbe_offset(fbe_current_offset);
fbe_field_size = f153.get(fbe_value.f153);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f154.fbe_offset(fbe_current_offset);
fbe_field_size = f154.get(fbe_value.f154);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f155.fbe_offset(fbe_current_offset);
fbe_field_size = f155.get(fbe_value.f155);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f156.fbe_offset(fbe_current_offset);
fbe_field_size = f156.get(fbe_value.f156);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f157.fbe_offset(fbe_current_offset);
fbe_field_size = f157.get(fbe_value.f157);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f158.fbe_offset(fbe_current_offset);
fbe_field_size = f158.get(fbe_value.f158);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f159.fbe_offset(fbe_current_offset);
fbe_field_size = f159.get(fbe_value.f159);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f160.fbe_offset(fbe_current_offset);
fbe_field_size = f160.get(fbe_value.f160);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f161.fbe_offset(fbe_current_offset);
fbe_field_size = f161.get(fbe_value.f161);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f162.fbe_offset(fbe_current_offset);
fbe_field_size = f162.get(fbe_value.f162);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f163.fbe_offset(fbe_current_offset);
fbe_field_size = f163.get(fbe_value.f163);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f164.fbe_offset(fbe_current_offset);
fbe_field_size = f164.get(fbe_value.f164);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f165.fbe_offset(fbe_current_offset);
fbe_field_size = f165.get(fbe_value.f165);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
// Set the struct value
size_t set(const ::test::StructOptional& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructOptional& fbe_value) noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
parent.fbe_offset(fbe_current_offset);
fbe_field_size = parent.set_fields(fbe_value);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f100.fbe_offset(fbe_current_offset);
fbe_field_size = f100.set(fbe_value.f100);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f101.fbe_offset(fbe_current_offset);
fbe_field_size = f101.set(fbe_value.f101);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f102.fbe_offset(fbe_current_offset);
fbe_field_size = f102.set(fbe_value.f102);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f103.fbe_offset(fbe_current_offset);
fbe_field_size = f103.set(fbe_value.f103);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f104.fbe_offset(fbe_current_offset);
fbe_field_size = f104.set(fbe_value.f104);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f105.fbe_offset(fbe_current_offset);
fbe_field_size = f105.set(fbe_value.f105);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f106.fbe_offset(fbe_current_offset);
fbe_field_size = f106.set(fbe_value.f106);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f107.fbe_offset(fbe_current_offset);
fbe_field_size = f107.set(fbe_value.f107);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f108.fbe_offset(fbe_current_offset);
fbe_field_size = f108.set(fbe_value.f108);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f109.fbe_offset(fbe_current_offset);
fbe_field_size = f109.set(fbe_value.f109);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f110.fbe_offset(fbe_current_offset);
fbe_field_size = f110.set(fbe_value.f110);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f111.fbe_offset(fbe_current_offset);
fbe_field_size = f111.set(fbe_value.f111);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f112.fbe_offset(fbe_current_offset);
fbe_field_size = f112.set(fbe_value.f112);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f113.fbe_offset(fbe_current_offset);
fbe_field_size = f113.set(fbe_value.f113);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f114.fbe_offset(fbe_current_offset);
fbe_field_size = f114.set(fbe_value.f114);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f115.fbe_offset(fbe_current_offset);
fbe_field_size = f115.set(fbe_value.f115);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f116.fbe_offset(fbe_current_offset);
fbe_field_size = f116.set(fbe_value.f116);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f117.fbe_offset(fbe_current_offset);
fbe_field_size = f117.set(fbe_value.f117);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f118.fbe_offset(fbe_current_offset);
fbe_field_size = f118.set(fbe_value.f118);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f119.fbe_offset(fbe_current_offset);
fbe_field_size = f119.set(fbe_value.f119);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f120.fbe_offset(fbe_current_offset);
fbe_field_size = f120.set(fbe_value.f120);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f121.fbe_offset(fbe_current_offset);
fbe_field_size = f121.set(fbe_value.f121);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f122.fbe_offset(fbe_current_offset);
fbe_field_size = f122.set(fbe_value.f122);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f123.fbe_offset(fbe_current_offset);
fbe_field_size = f123.set(fbe_value.f123);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f124.fbe_offset(fbe_current_offset);
fbe_field_size = f124.set(fbe_value.f124);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f125.fbe_offset(fbe_current_offset);
fbe_field_size = f125.set(fbe_value.f125);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f126.fbe_offset(fbe_current_offset);
fbe_field_size = f126.set(fbe_value.f126);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f127.fbe_offset(fbe_current_offset);
fbe_field_size = f127.set(fbe_value.f127);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f128.fbe_offset(fbe_current_offset);
fbe_field_size = f128.set(fbe_value.f128);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f129.fbe_offset(fbe_current_offset);
fbe_field_size = f129.set(fbe_value.f129);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f130.fbe_offset(fbe_current_offset);
fbe_field_size = f130.set(fbe_value.f130);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f131.fbe_offset(fbe_current_offset);
fbe_field_size = f131.set(fbe_value.f131);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f132.fbe_offset(fbe_current_offset);
fbe_field_size = f132.set(fbe_value.f132);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f133.fbe_offset(fbe_current_offset);
fbe_field_size = f133.set(fbe_value.f133);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f134.fbe_offset(fbe_current_offset);
fbe_field_size = f134.set(fbe_value.f134);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f135.fbe_offset(fbe_current_offset);
fbe_field_size = f135.set(fbe_value.f135);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f136.fbe_offset(fbe_current_offset);
fbe_field_size = f136.set(fbe_value.f136);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f137.fbe_offset(fbe_current_offset);
fbe_field_size = f137.set(fbe_value.f137);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f138.fbe_offset(fbe_current_offset);
fbe_field_size = f138.set(fbe_value.f138);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f139.fbe_offset(fbe_current_offset);
fbe_field_size = f139.set(fbe_value.f139);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f140.fbe_offset(fbe_current_offset);
fbe_field_size = f140.set(fbe_value.f140);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f141.fbe_offset(fbe_current_offset);
fbe_field_size = f141.set(fbe_value.f141);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f142.fbe_offset(fbe_current_offset);
fbe_field_size = f142.set(fbe_value.f142);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f143.fbe_offset(fbe_current_offset);
fbe_field_size = f143.set(fbe_value.f143);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f144.fbe_offset(fbe_current_offset);
fbe_field_size = f144.set(fbe_value.f144);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f145.fbe_offset(fbe_current_offset);
fbe_field_size = f145.set(fbe_value.f145);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f146.fbe_offset(fbe_current_offset);
fbe_field_size = f146.set(fbe_value.f146);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f147.fbe_offset(fbe_current_offset);
fbe_field_size = f147.set(fbe_value.f147);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f148.fbe_offset(fbe_current_offset);
fbe_field_size = f148.set(fbe_value.f148);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f149.fbe_offset(fbe_current_offset);
fbe_field_size = f149.set(fbe_value.f149);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f150.fbe_offset(fbe_current_offset);
fbe_field_size = f150.set(fbe_value.f150);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f151.fbe_offset(fbe_current_offset);
fbe_field_size = f151.set(fbe_value.f151);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f152.fbe_offset(fbe_current_offset);
fbe_field_size = f152.set(fbe_value.f152);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f153.fbe_offset(fbe_current_offset);
fbe_field_size = f153.set(fbe_value.f153);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f154.fbe_offset(fbe_current_offset);
fbe_field_size = f154.set(fbe_value.f154);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f155.fbe_offset(fbe_current_offset);
fbe_field_size = f155.set(fbe_value.f155);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f156.fbe_offset(fbe_current_offset);
fbe_field_size = f156.set(fbe_value.f156);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f157.fbe_offset(fbe_current_offset);
fbe_field_size = f157.set(fbe_value.f157);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f158.fbe_offset(fbe_current_offset);
fbe_field_size = f158.set(fbe_value.f158);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f159.fbe_offset(fbe_current_offset);
fbe_field_size = f159.set(fbe_value.f159);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f160.fbe_offset(fbe_current_offset);
fbe_field_size = f160.set(fbe_value.f160);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f161.fbe_offset(fbe_current_offset);
fbe_field_size = f161.set(fbe_value.f161);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f162.fbe_offset(fbe_current_offset);
fbe_field_size = f162.set(fbe_value.f162);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f163.fbe_offset(fbe_current_offset);
fbe_field_size = f163.set(fbe_value.f163);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f164.fbe_offset(fbe_current_offset);
fbe_field_size = f164.set(fbe_value.f164);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f165.fbe_offset(fbe_current_offset);
fbe_field_size = f165.set(fbe_value.f165);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
FinalModel<TBuffer, ::test::StructSimple> parent;
FinalModel<TBuffer, std::optional<bool>> f100;
FinalModel<TBuffer, std::optional<bool>> f101;
FinalModel<TBuffer, std::optional<bool>> f102;
FinalModel<TBuffer, std::optional<uint8_t>> f103;
FinalModel<TBuffer, std::optional<uint8_t>> f104;
FinalModel<TBuffer, std::optional<uint8_t>> f105;
FinalModel<TBuffer, std::optional<char>> f106;
FinalModel<TBuffer, std::optional<char>> f107;
FinalModel<TBuffer, std::optional<char>> f108;
FinalModel<TBuffer, std::optional<wchar_t>> f109;
FinalModel<TBuffer, std::optional<wchar_t>> f110;
FinalModel<TBuffer, std::optional<wchar_t>> f111;
FinalModel<TBuffer, std::optional<int8_t>> f112;
FinalModel<TBuffer, std::optional<int8_t>> f113;
FinalModel<TBuffer, std::optional<int8_t>> f114;
FinalModel<TBuffer, std::optional<uint8_t>> f115;
FinalModel<TBuffer, std::optional<uint8_t>> f116;
FinalModel<TBuffer, std::optional<uint8_t>> f117;
FinalModel<TBuffer, std::optional<int16_t>> f118;
FinalModel<TBuffer, std::optional<int16_t>> f119;
FinalModel<TBuffer, std::optional<int16_t>> f120;
FinalModel<TBuffer, std::optional<uint16_t>> f121;
FinalModel<TBuffer, std::optional<uint16_t>> f122;
FinalModel<TBuffer, std::optional<uint16_t>> f123;
FinalModel<TBuffer, std::optional<int32_t>> f124;
FinalModel<TBuffer, std::optional<int32_t>> f125;
FinalModel<TBuffer, std::optional<int32_t>> f126;
FinalModel<TBuffer, std::optional<uint32_t>> f127;
FinalModel<TBuffer, std::optional<uint32_t>> f128;
FinalModel<TBuffer, std::optional<uint32_t>> f129;
FinalModel<TBuffer, std::optional<int64_t>> f130;
FinalModel<TBuffer, std::optional<int64_t>> f131;
FinalModel<TBuffer, std::optional<int64_t>> f132;
FinalModel<TBuffer, std::optional<uint64_t>> f133;
FinalModel<TBuffer, std::optional<uint64_t>> f134;
FinalModel<TBuffer, std::optional<uint64_t>> f135;
FinalModel<TBuffer, std::optional<float>> f136;
FinalModel<TBuffer, std::optional<float>> f137;
FinalModel<TBuffer, std::optional<float>> f138;
FinalModel<TBuffer, std::optional<double>> f139;
FinalModel<TBuffer, std::optional<double>> f140;
FinalModel<TBuffer, std::optional<double>> f141;
FinalModel<TBuffer, std::optional<FBE::decimal_t>> f142;
FinalModel<TBuffer, std::optional<FBE::decimal_t>> f143;
FinalModel<TBuffer, std::optional<FBE::decimal_t>> f144;
FinalModel<TBuffer, std::optional<std::string>> f145;
FinalModel<TBuffer, std::optional<std::string>> f146;
FinalModel<TBuffer, std::optional<std::string>> f147;
FinalModel<TBuffer, std::optional<uint64_t>> f148;
FinalModel<TBuffer, std::optional<uint64_t>> f149;
FinalModel<TBuffer, std::optional<uint64_t>> f150;
FinalModel<TBuffer, std::optional<FBE::uuid_t>> f151;
FinalModel<TBuffer, std::optional<FBE::uuid_t>> f152;
FinalModel<TBuffer, std::optional<FBE::uuid_t>> f153;
FinalModel<TBuffer, std::optional<::proto::OrderSide>> f154;
FinalModel<TBuffer, std::optional<::proto::OrderSide>> f155;
FinalModel<TBuffer, std::optional<::proto::OrderType>> f156;
FinalModel<TBuffer, std::optional<::proto::OrderType>> f157;
FinalModel<TBuffer, std::optional<::proto::Order>> f158;
FinalModel<TBuffer, std::optional<::proto::Order>> f159;
FinalModel<TBuffer, std::optional<::proto::Balance>> f160;
FinalModel<TBuffer, std::optional<::proto::Balance>> f161;
FinalModel<TBuffer, std::optional<::proto::State>> f162;
FinalModel<TBuffer, std::optional<::proto::State>> f163;
FinalModel<TBuffer, std::optional<::proto::Account>> f164;
FinalModel<TBuffer, std::optional<::proto::Account>> f165;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructOptional final model
template <class TBuffer>
class StructOptionalFinalModel : public FBE::Model<TBuffer>
{
public:
StructOptionalFinalModel() : _model(this->buffer(), 8) {}
StructOptionalFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructOptional>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructOptional& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructOptional& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructOptional> _model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructNested field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructNested>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, parent(buffer, 4 + 4)
, f1000(buffer, parent.fbe_offset() + parent.fbe_body() - 4 - 4)
, f1001(buffer, f1000.fbe_offset() + f1000.fbe_size())
, f1002(buffer, f1001.fbe_offset() + f1001.fbe_size())
, f1003(buffer, f1002.fbe_offset() + f1002.fbe_size())
, f1004(buffer, f1003.fbe_offset() + f1003.fbe_size())
, f1005(buffer, f1004.fbe_offset() + f1004.fbe_size())
, f1006(buffer, f1005.fbe_offset() + f1005.fbe_size())
, f1007(buffer, f1006.fbe_offset() + f1006.fbe_size())
, f1008(buffer, f1007.fbe_offset() + f1007.fbe_size())
, f1009(buffer, f1008.fbe_offset() + f1008.fbe_size())
, f1010(buffer, f1009.fbe_offset() + f1009.fbe_size())
, f1011(buffer, f1010.fbe_offset() + f1010.fbe_size())
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
+ parent.fbe_body() - 4 - 4
+ f1000.fbe_size()
+ f1001.fbe_size()
+ f1002.fbe_size()
+ f1003.fbe_size()
+ f1004.fbe_size()
+ f1005.fbe_size()
+ f1006.fbe_size()
+ f1007.fbe_size()
+ f1008.fbe_size()
+ f1009.fbe_size()
+ f1010.fbe_size()
+ f1011.fbe_size()
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
+ parent.fbe_extra()
+ f1000.fbe_extra()
+ f1001.fbe_extra()
+ f1002.fbe_extra()
+ f1003.fbe_extra()
+ f1004.fbe_extra()
+ f1005.fbe_extra()
+ f1006.fbe_extra()
+ f1007.fbe_extra()
+ f1008.fbe_extra()
+ f1009.fbe_extra()
+ f1010.fbe_extra()
+ f1011.fbe_extra()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 112; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + parent.fbe_body() - 4 - 4) > fbe_struct_size)
return true;
if (!parent.verify_fields(fbe_struct_size))
return false;
fbe_current_size += parent.fbe_body() - 4 - 4;
if ((fbe_current_size + f1000.fbe_size()) > fbe_struct_size)
return true;
if (!f1000.verify())
return false;
fbe_current_size += f1000.fbe_size();
if ((fbe_current_size + f1001.fbe_size()) > fbe_struct_size)
return true;
if (!f1001.verify())
return false;
fbe_current_size += f1001.fbe_size();
if ((fbe_current_size + f1002.fbe_size()) > fbe_struct_size)
return true;
if (!f1002.verify())
return false;
fbe_current_size += f1002.fbe_size();
if ((fbe_current_size + f1003.fbe_size()) > fbe_struct_size)
return true;
if (!f1003.verify())
return false;
fbe_current_size += f1003.fbe_size();
if ((fbe_current_size + f1004.fbe_size()) > fbe_struct_size)
return true;
if (!f1004.verify())
return false;
fbe_current_size += f1004.fbe_size();
if ((fbe_current_size + f1005.fbe_size()) > fbe_struct_size)
return true;
if (!f1005.verify())
return false;
fbe_current_size += f1005.fbe_size();
if ((fbe_current_size + f1006.fbe_size()) > fbe_struct_size)
return true;
if (!f1006.verify())
return false;
fbe_current_size += f1006.fbe_size();
if ((fbe_current_size + f1007.fbe_size()) > fbe_struct_size)
return true;
if (!f1007.verify())
return false;
fbe_current_size += f1007.fbe_size();
if ((fbe_current_size + f1008.fbe_size()) > fbe_struct_size)
return true;
if (!f1008.verify())
return false;
fbe_current_size += f1008.fbe_size();
if ((fbe_current_size + f1009.fbe_size()) > fbe_struct_size)
return true;
if (!f1009.verify())
return false;
fbe_current_size += f1009.fbe_size();
if ((fbe_current_size + f1010.fbe_size()) > fbe_struct_size)
return true;
if (!f1010.verify())
return false;
fbe_current_size += f1010.fbe_size();
if ((fbe_current_size + f1011.fbe_size()) > fbe_struct_size)
return true;
if (!f1011.verify())
return false;
fbe_current_size += f1011.fbe_size();
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructNested& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructNested& fbe_value, size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + parent.fbe_body() - 4 - 4) <= fbe_struct_size)
parent.get_fields(fbe_value, fbe_struct_size);
fbe_current_size += parent.fbe_body() - 4 - 4;
if ((fbe_current_size + f1000.fbe_size()) <= fbe_struct_size)
f1000.get(fbe_value.f1000);
else
fbe_value.f1000 = ::test::EnumSimple();
fbe_current_size += f1000.fbe_size();
if ((fbe_current_size + f1001.fbe_size()) <= fbe_struct_size)
f1001.get(fbe_value.f1001);
else
fbe_value.f1001 = std::nullopt;
fbe_current_size += f1001.fbe_size();
if ((fbe_current_size + f1002.fbe_size()) <= fbe_struct_size)
f1002.get(fbe_value.f1002, EnumTyped::ENUM_VALUE_2);
else
fbe_value.f1002 = EnumTyped::ENUM_VALUE_2;
fbe_current_size += f1002.fbe_size();
if ((fbe_current_size + f1003.fbe_size()) <= fbe_struct_size)
f1003.get(fbe_value.f1003, std::nullopt);
else
fbe_value.f1003 = std::nullopt;
fbe_current_size += f1003.fbe_size();
if ((fbe_current_size + f1004.fbe_size()) <= fbe_struct_size)
f1004.get(fbe_value.f1004);
else
fbe_value.f1004 = ::test::FlagsSimple();
fbe_current_size += f1004.fbe_size();
if ((fbe_current_size + f1005.fbe_size()) <= fbe_struct_size)
f1005.get(fbe_value.f1005);
else
fbe_value.f1005 = std::nullopt;
fbe_current_size += f1005.fbe_size();
if ((fbe_current_size + f1006.fbe_size()) <= fbe_struct_size)
f1006.get(fbe_value.f1006, FlagsTyped::FLAG_VALUE_2 | FlagsTyped::FLAG_VALUE_4 | FlagsTyped::FLAG_VALUE_6);
else
fbe_value.f1006 = FlagsTyped::FLAG_VALUE_2 | FlagsTyped::FLAG_VALUE_4 | FlagsTyped::FLAG_VALUE_6;
fbe_current_size += f1006.fbe_size();
if ((fbe_current_size + f1007.fbe_size()) <= fbe_struct_size)
f1007.get(fbe_value.f1007, std::nullopt);
else
fbe_value.f1007 = std::nullopt;
fbe_current_size += f1007.fbe_size();
if ((fbe_current_size + f1008.fbe_size()) <= fbe_struct_size)
f1008.get(fbe_value.f1008);
else
fbe_value.f1008 = ::test::StructSimple();
fbe_current_size += f1008.fbe_size();
if ((fbe_current_size + f1009.fbe_size()) <= fbe_struct_size)
f1009.get(fbe_value.f1009);
else
fbe_value.f1009 = std::nullopt;
fbe_current_size += f1009.fbe_size();
if ((fbe_current_size + f1010.fbe_size()) <= fbe_struct_size)
f1010.get(fbe_value.f1010);
else
fbe_value.f1010 = ::test::StructOptional();
fbe_current_size += f1010.fbe_size();
if ((fbe_current_size + f1011.fbe_size()) <= fbe_struct_size)
f1011.get(fbe_value.f1011, std::nullopt);
else
fbe_value.f1011 = std::nullopt;
fbe_current_size += f1011.fbe_size();
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructNested& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructNested& fbe_value) noexcept
{
parent.set_fields(fbe_value);
f1000.set(fbe_value.f1000);
f1001.set(fbe_value.f1001);
f1002.set(fbe_value.f1002);
f1003.set(fbe_value.f1003);
f1004.set(fbe_value.f1004);
f1005.set(fbe_value.f1005);
f1006.set(fbe_value.f1006);
f1007.set(fbe_value.f1007);
f1008.set(fbe_value.f1008);
f1009.set(fbe_value.f1009);
f1010.set(fbe_value.f1010);
f1011.set(fbe_value.f1011);
}
private:
TBuffer& _buffer;
size_t _offset;
public:
FieldModel<TBuffer, ::test::StructOptional> parent;
FieldModel<TBuffer, ::test::EnumSimple> f1000;
FieldModel<TBuffer, std::optional<::test::EnumSimple>> f1001;
FieldModel<TBuffer, ::test::EnumTyped> f1002;
FieldModel<TBuffer, std::optional<::test::EnumTyped>> f1003;
FieldModel<TBuffer, ::test::FlagsSimple> f1004;
FieldModel<TBuffer, std::optional<::test::FlagsSimple>> f1005;
FieldModel<TBuffer, ::test::FlagsTyped> f1006;
FieldModel<TBuffer, std::optional<::test::FlagsTyped>> f1007;
FieldModel<TBuffer, ::test::StructSimple> f1008;
FieldModel<TBuffer, std::optional<::test::StructSimple>> f1009;
FieldModel<TBuffer, ::test::StructOptional> f1010;
FieldModel<TBuffer, std::optional<::test::StructOptional>> f1011;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructNested model
template <class TBuffer>
class StructNestedModel : public FBE::Model<TBuffer>
{
public:
StructNestedModel() : model(this->buffer(), 4) {}
StructNestedModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructNested>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructNested& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructNested& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructNested> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructNested final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructNested>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, parent(buffer, 0)
, f1000(buffer, 0)
, f1001(buffer, 0)
, f1002(buffer, 0)
, f1003(buffer, 0)
, f1004(buffer, 0)
, f1005(buffer, 0)
, f1006(buffer, 0)
, f1007(buffer, 0)
, f1008(buffer, 0)
, f1009(buffer, 0)
, f1010(buffer, 0)
, f1011(buffer, 0)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructNested& fbe_value) const noexcept
{
size_t fbe_result = 0
+ parent.fbe_allocation_size(fbe_value)
+ f1000.fbe_allocation_size(fbe_value.f1000)
+ f1001.fbe_allocation_size(fbe_value.f1001)
+ f1002.fbe_allocation_size(fbe_value.f1002)
+ f1003.fbe_allocation_size(fbe_value.f1003)
+ f1004.fbe_allocation_size(fbe_value.f1004)
+ f1005.fbe_allocation_size(fbe_value.f1005)
+ f1006.fbe_allocation_size(fbe_value.f1006)
+ f1007.fbe_allocation_size(fbe_value.f1007)
+ f1008.fbe_allocation_size(fbe_value.f1008)
+ f1009.fbe_allocation_size(fbe_value.f1009)
+ f1010.fbe_allocation_size(fbe_value.f1010)
+ f1011.fbe_allocation_size(fbe_value.f1011)
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 112; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_field_size;
parent.fbe_offset(fbe_current_offset);
fbe_field_size = parent.verify_fields();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1000.fbe_offset(fbe_current_offset);
fbe_field_size = f1000.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1001.fbe_offset(fbe_current_offset);
fbe_field_size = f1001.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1002.fbe_offset(fbe_current_offset);
fbe_field_size = f1002.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1003.fbe_offset(fbe_current_offset);
fbe_field_size = f1003.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1004.fbe_offset(fbe_current_offset);
fbe_field_size = f1004.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1005.fbe_offset(fbe_current_offset);
fbe_field_size = f1005.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1006.fbe_offset(fbe_current_offset);
fbe_field_size = f1006.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1007.fbe_offset(fbe_current_offset);
fbe_field_size = f1007.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1008.fbe_offset(fbe_current_offset);
fbe_field_size = f1008.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1009.fbe_offset(fbe_current_offset);
fbe_field_size = f1009.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1010.fbe_offset(fbe_current_offset);
fbe_field_size = f1010.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f1011.fbe_offset(fbe_current_offset);
fbe_field_size = f1011.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
return fbe_current_offset;
}
// Get the struct value
size_t get(::test::StructNested& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructNested& fbe_value) const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
parent.fbe_offset(fbe_current_offset);
fbe_field_size = parent.get_fields(fbe_value);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1000.fbe_offset(fbe_current_offset);
fbe_field_size = f1000.get(fbe_value.f1000);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1001.fbe_offset(fbe_current_offset);
fbe_field_size = f1001.get(fbe_value.f1001);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1002.fbe_offset(fbe_current_offset);
fbe_field_size = f1002.get(fbe_value.f1002);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1003.fbe_offset(fbe_current_offset);
fbe_field_size = f1003.get(fbe_value.f1003);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1004.fbe_offset(fbe_current_offset);
fbe_field_size = f1004.get(fbe_value.f1004);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1005.fbe_offset(fbe_current_offset);
fbe_field_size = f1005.get(fbe_value.f1005);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1006.fbe_offset(fbe_current_offset);
fbe_field_size = f1006.get(fbe_value.f1006);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1007.fbe_offset(fbe_current_offset);
fbe_field_size = f1007.get(fbe_value.f1007);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1008.fbe_offset(fbe_current_offset);
fbe_field_size = f1008.get(fbe_value.f1008);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1009.fbe_offset(fbe_current_offset);
fbe_field_size = f1009.get(fbe_value.f1009);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1010.fbe_offset(fbe_current_offset);
fbe_field_size = f1010.get(fbe_value.f1010);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1011.fbe_offset(fbe_current_offset);
fbe_field_size = f1011.get(fbe_value.f1011);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
// Set the struct value
size_t set(const ::test::StructNested& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructNested& fbe_value) noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
parent.fbe_offset(fbe_current_offset);
fbe_field_size = parent.set_fields(fbe_value);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1000.fbe_offset(fbe_current_offset);
fbe_field_size = f1000.set(fbe_value.f1000);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1001.fbe_offset(fbe_current_offset);
fbe_field_size = f1001.set(fbe_value.f1001);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1002.fbe_offset(fbe_current_offset);
fbe_field_size = f1002.set(fbe_value.f1002);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1003.fbe_offset(fbe_current_offset);
fbe_field_size = f1003.set(fbe_value.f1003);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1004.fbe_offset(fbe_current_offset);
fbe_field_size = f1004.set(fbe_value.f1004);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1005.fbe_offset(fbe_current_offset);
fbe_field_size = f1005.set(fbe_value.f1005);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1006.fbe_offset(fbe_current_offset);
fbe_field_size = f1006.set(fbe_value.f1006);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1007.fbe_offset(fbe_current_offset);
fbe_field_size = f1007.set(fbe_value.f1007);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1008.fbe_offset(fbe_current_offset);
fbe_field_size = f1008.set(fbe_value.f1008);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1009.fbe_offset(fbe_current_offset);
fbe_field_size = f1009.set(fbe_value.f1009);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1010.fbe_offset(fbe_current_offset);
fbe_field_size = f1010.set(fbe_value.f1010);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f1011.fbe_offset(fbe_current_offset);
fbe_field_size = f1011.set(fbe_value.f1011);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
FinalModel<TBuffer, ::test::StructOptional> parent;
FinalModel<TBuffer, ::test::EnumSimple> f1000;
FinalModel<TBuffer, std::optional<::test::EnumSimple>> f1001;
FinalModel<TBuffer, ::test::EnumTyped> f1002;
FinalModel<TBuffer, std::optional<::test::EnumTyped>> f1003;
FinalModel<TBuffer, ::test::FlagsSimple> f1004;
FinalModel<TBuffer, std::optional<::test::FlagsSimple>> f1005;
FinalModel<TBuffer, ::test::FlagsTyped> f1006;
FinalModel<TBuffer, std::optional<::test::FlagsTyped>> f1007;
FinalModel<TBuffer, ::test::StructSimple> f1008;
FinalModel<TBuffer, std::optional<::test::StructSimple>> f1009;
FinalModel<TBuffer, ::test::StructOptional> f1010;
FinalModel<TBuffer, std::optional<::test::StructOptional>> f1011;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructNested final model
template <class TBuffer>
class StructNestedFinalModel : public FBE::Model<TBuffer>
{
public:
StructNestedFinalModel() : _model(this->buffer(), 8) {}
StructNestedFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructNested>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructNested& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructNested& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructNested> _model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructBytes field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructBytes>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 4 + 4)
, f2(buffer, f1.fbe_offset() + f1.fbe_size())
, f3(buffer, f2.fbe_offset() + f2.fbe_size())
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
+ f1.fbe_size()
+ f2.fbe_size()
+ f3.fbe_size()
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
+ f1.fbe_extra()
+ f2.fbe_extra()
+ f3.fbe_extra()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 120; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) > fbe_struct_size)
return true;
if (!f1.verify())
return false;
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) > fbe_struct_size)
return true;
if (!f2.verify())
return false;
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) > fbe_struct_size)
return true;
if (!f3.verify())
return false;
fbe_current_size += f3.fbe_size();
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructBytes& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructBytes& fbe_value, size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) <= fbe_struct_size)
f1.get(fbe_value.f1);
else
fbe_value.f1.clear();
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) <= fbe_struct_size)
f2.get(fbe_value.f2);
else
fbe_value.f2 = std::nullopt;
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) <= fbe_struct_size)
f3.get(fbe_value.f3, std::nullopt);
else
fbe_value.f3 = std::nullopt;
fbe_current_size += f3.fbe_size();
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructBytes& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructBytes& fbe_value) noexcept
{
f1.set(fbe_value.f1);
f2.set(fbe_value.f2);
f3.set(fbe_value.f3);
}
private:
TBuffer& _buffer;
size_t _offset;
public:
FieldModel<TBuffer, FBE::buffer_t> f1;
FieldModel<TBuffer, std::optional<FBE::buffer_t>> f2;
FieldModel<TBuffer, std::optional<FBE::buffer_t>> f3;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructBytes model
template <class TBuffer>
class StructBytesModel : public FBE::Model<TBuffer>
{
public:
StructBytesModel() : model(this->buffer(), 4) {}
StructBytesModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructBytes>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructBytes& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructBytes& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructBytes> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructBytes final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructBytes>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 0)
, f2(buffer, 0)
, f3(buffer, 0)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructBytes& fbe_value) const noexcept
{
size_t fbe_result = 0
+ f1.fbe_allocation_size(fbe_value.f1)
+ f2.fbe_allocation_size(fbe_value.f2)
+ f3.fbe_allocation_size(fbe_value.f3)
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 120; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
return fbe_current_offset;
}
// Get the struct value
size_t get(::test::StructBytes& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructBytes& fbe_value) const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.get(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.get(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.get(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
// Set the struct value
size_t set(const ::test::StructBytes& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructBytes& fbe_value) noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.set(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.set(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.set(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
FinalModel<TBuffer, FBE::buffer_t> f1;
FinalModel<TBuffer, std::optional<FBE::buffer_t>> f2;
FinalModel<TBuffer, std::optional<FBE::buffer_t>> f3;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructBytes final model
template <class TBuffer>
class StructBytesFinalModel : public FBE::Model<TBuffer>
{
public:
StructBytesFinalModel() : _model(this->buffer(), 8) {}
StructBytesFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructBytes>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructBytes& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructBytes& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructBytes> _model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructArray field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructArray>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 4 + 4)
, f2(buffer, f1.fbe_offset() + f1.fbe_size())
, f3(buffer, f2.fbe_offset() + f2.fbe_size())
, f4(buffer, f3.fbe_offset() + f3.fbe_size())
, f5(buffer, f4.fbe_offset() + f4.fbe_size())
, f6(buffer, f5.fbe_offset() + f5.fbe_size())
, f7(buffer, f6.fbe_offset() + f6.fbe_size())
, f8(buffer, f7.fbe_offset() + f7.fbe_size())
, f9(buffer, f8.fbe_offset() + f8.fbe_size())
, f10(buffer, f9.fbe_offset() + f9.fbe_size())
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
+ f1.fbe_size()
+ f2.fbe_size()
+ f3.fbe_size()
+ f4.fbe_size()
+ f5.fbe_size()
+ f6.fbe_size()
+ f7.fbe_size()
+ f8.fbe_size()
+ f9.fbe_size()
+ f10.fbe_size()
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
+ f1.fbe_extra()
+ f2.fbe_extra()
+ f3.fbe_extra()
+ f4.fbe_extra()
+ f5.fbe_extra()
+ f6.fbe_extra()
+ f7.fbe_extra()
+ f8.fbe_extra()
+ f9.fbe_extra()
+ f10.fbe_extra()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 125; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) > fbe_struct_size)
return true;
if (!f1.verify())
return false;
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) > fbe_struct_size)
return true;
if (!f2.verify())
return false;
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) > fbe_struct_size)
return true;
if (!f3.verify())
return false;
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) > fbe_struct_size)
return true;
if (!f4.verify())
return false;
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) > fbe_struct_size)
return true;
if (!f5.verify())
return false;
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) > fbe_struct_size)
return true;
if (!f6.verify())
return false;
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) > fbe_struct_size)
return true;
if (!f7.verify())
return false;
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) > fbe_struct_size)
return true;
if (!f8.verify())
return false;
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) > fbe_struct_size)
return true;
if (!f9.verify())
return false;
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) > fbe_struct_size)
return true;
if (!f10.verify())
return false;
fbe_current_size += f10.fbe_size();
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructArray& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructArray& fbe_value, size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) <= fbe_struct_size)
f1.get(fbe_value.f1);
else
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) <= fbe_struct_size)
f2.get(fbe_value.f2);
else
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) <= fbe_struct_size)
f3.get(fbe_value.f3);
else
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) <= fbe_struct_size)
f4.get(fbe_value.f4);
else
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) <= fbe_struct_size)
f5.get(fbe_value.f5);
else
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) <= fbe_struct_size)
f6.get(fbe_value.f6);
else
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) <= fbe_struct_size)
f7.get(fbe_value.f7);
else
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) <= fbe_struct_size)
f8.get(fbe_value.f8);
else
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) <= fbe_struct_size)
f9.get(fbe_value.f9);
else
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) <= fbe_struct_size)
f10.get(fbe_value.f10);
else
fbe_current_size += f10.fbe_size();
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructArray& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructArray& fbe_value) noexcept
{
f1.set(fbe_value.f1);
f2.set(fbe_value.f2);
f3.set(fbe_value.f3);
f4.set(fbe_value.f4);
f5.set(fbe_value.f5);
f6.set(fbe_value.f6);
f7.set(fbe_value.f7);
f8.set(fbe_value.f8);
f9.set(fbe_value.f9);
f10.set(fbe_value.f10);
}
private:
TBuffer& _buffer;
size_t _offset;
public:
FieldModelArray<TBuffer, uint8_t, 2> f1;
FieldModelArray<TBuffer, std::optional<uint8_t>, 2> f2;
FieldModelArray<TBuffer, FBE::buffer_t, 2> f3;
FieldModelArray<TBuffer, std::optional<FBE::buffer_t>, 2> f4;
FieldModelArray<TBuffer, ::test::EnumSimple, 2> f5;
FieldModelArray<TBuffer, std::optional<::test::EnumSimple>, 2> f6;
FieldModelArray<TBuffer, ::test::FlagsSimple, 2> f7;
FieldModelArray<TBuffer, std::optional<::test::FlagsSimple>, 2> f8;
FieldModelArray<TBuffer, ::test::StructSimple, 2> f9;
FieldModelArray<TBuffer, std::optional<::test::StructSimple>, 2> f10;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructArray model
template <class TBuffer>
class StructArrayModel : public FBE::Model<TBuffer>
{
public:
StructArrayModel() : model(this->buffer(), 4) {}
StructArrayModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructArray>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructArray& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructArray& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructArray> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructArray final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructArray>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 0)
, f2(buffer, 0)
, f3(buffer, 0)
, f4(buffer, 0)
, f5(buffer, 0)
, f6(buffer, 0)
, f7(buffer, 0)
, f8(buffer, 0)
, f9(buffer, 0)
, f10(buffer, 0)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructArray& fbe_value) const noexcept
{
size_t fbe_result = 0
+ f1.fbe_allocation_size(fbe_value.f1)
+ f2.fbe_allocation_size(fbe_value.f2)
+ f3.fbe_allocation_size(fbe_value.f3)
+ f4.fbe_allocation_size(fbe_value.f4)
+ f5.fbe_allocation_size(fbe_value.f5)
+ f6.fbe_allocation_size(fbe_value.f6)
+ f7.fbe_allocation_size(fbe_value.f7)
+ f8.fbe_allocation_size(fbe_value.f8)
+ f9.fbe_allocation_size(fbe_value.f9)
+ f10.fbe_allocation_size(fbe_value.f10)
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 125; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
return fbe_current_offset;
}
// Get the struct value
size_t get(::test::StructArray& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructArray& fbe_value) const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.get(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.get(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.get(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.get(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.get(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.get(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.get(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.get(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.get(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.get(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
// Set the struct value
size_t set(const ::test::StructArray& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructArray& fbe_value) noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.set(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.set(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.set(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.set(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.set(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.set(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.set(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.set(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.set(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.set(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
FinalModelArray<TBuffer, uint8_t, 2> f1;
FinalModelArray<TBuffer, std::optional<uint8_t>, 2> f2;
FinalModelArray<TBuffer, FBE::buffer_t, 2> f3;
FinalModelArray<TBuffer, std::optional<FBE::buffer_t>, 2> f4;
FinalModelArray<TBuffer, ::test::EnumSimple, 2> f5;
FinalModelArray<TBuffer, std::optional<::test::EnumSimple>, 2> f6;
FinalModelArray<TBuffer, ::test::FlagsSimple, 2> f7;
FinalModelArray<TBuffer, std::optional<::test::FlagsSimple>, 2> f8;
FinalModelArray<TBuffer, ::test::StructSimple, 2> f9;
FinalModelArray<TBuffer, std::optional<::test::StructSimple>, 2> f10;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructArray final model
template <class TBuffer>
class StructArrayFinalModel : public FBE::Model<TBuffer>
{
public:
StructArrayFinalModel() : _model(this->buffer(), 8) {}
StructArrayFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructArray>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructArray& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructArray& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructArray> _model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructVector field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructVector>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 4 + 4)
, f2(buffer, f1.fbe_offset() + f1.fbe_size())
, f3(buffer, f2.fbe_offset() + f2.fbe_size())
, f4(buffer, f3.fbe_offset() + f3.fbe_size())
, f5(buffer, f4.fbe_offset() + f4.fbe_size())
, f6(buffer, f5.fbe_offset() + f5.fbe_size())
, f7(buffer, f6.fbe_offset() + f6.fbe_size())
, f8(buffer, f7.fbe_offset() + f7.fbe_size())
, f9(buffer, f8.fbe_offset() + f8.fbe_size())
, f10(buffer, f9.fbe_offset() + f9.fbe_size())
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
+ f1.fbe_size()
+ f2.fbe_size()
+ f3.fbe_size()
+ f4.fbe_size()
+ f5.fbe_size()
+ f6.fbe_size()
+ f7.fbe_size()
+ f8.fbe_size()
+ f9.fbe_size()
+ f10.fbe_size()
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
+ f1.fbe_extra()
+ f2.fbe_extra()
+ f3.fbe_extra()
+ f4.fbe_extra()
+ f5.fbe_extra()
+ f6.fbe_extra()
+ f7.fbe_extra()
+ f8.fbe_extra()
+ f9.fbe_extra()
+ f10.fbe_extra()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 130; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) > fbe_struct_size)
return true;
if (!f1.verify())
return false;
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) > fbe_struct_size)
return true;
if (!f2.verify())
return false;
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) > fbe_struct_size)
return true;
if (!f3.verify())
return false;
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) > fbe_struct_size)
return true;
if (!f4.verify())
return false;
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) > fbe_struct_size)
return true;
if (!f5.verify())
return false;
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) > fbe_struct_size)
return true;
if (!f6.verify())
return false;
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) > fbe_struct_size)
return true;
if (!f7.verify())
return false;
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) > fbe_struct_size)
return true;
if (!f8.verify())
return false;
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) > fbe_struct_size)
return true;
if (!f9.verify())
return false;
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) > fbe_struct_size)
return true;
if (!f10.verify())
return false;
fbe_current_size += f10.fbe_size();
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructVector& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructVector& fbe_value, size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) <= fbe_struct_size)
f1.get(fbe_value.f1);
else
fbe_value.f1.clear();
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) <= fbe_struct_size)
f2.get(fbe_value.f2);
else
fbe_value.f2.clear();
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) <= fbe_struct_size)
f3.get(fbe_value.f3);
else
fbe_value.f3.clear();
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) <= fbe_struct_size)
f4.get(fbe_value.f4);
else
fbe_value.f4.clear();
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) <= fbe_struct_size)
f5.get(fbe_value.f5);
else
fbe_value.f5.clear();
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) <= fbe_struct_size)
f6.get(fbe_value.f6);
else
fbe_value.f6.clear();
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) <= fbe_struct_size)
f7.get(fbe_value.f7);
else
fbe_value.f7.clear();
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) <= fbe_struct_size)
f8.get(fbe_value.f8);
else
fbe_value.f8.clear();
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) <= fbe_struct_size)
f9.get(fbe_value.f9);
else
fbe_value.f9.clear();
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) <= fbe_struct_size)
f10.get(fbe_value.f10);
else
fbe_value.f10.clear();
fbe_current_size += f10.fbe_size();
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructVector& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructVector& fbe_value) noexcept
{
f1.set(fbe_value.f1);
f2.set(fbe_value.f2);
f3.set(fbe_value.f3);
f4.set(fbe_value.f4);
f5.set(fbe_value.f5);
f6.set(fbe_value.f6);
f7.set(fbe_value.f7);
f8.set(fbe_value.f8);
f9.set(fbe_value.f9);
f10.set(fbe_value.f10);
}
private:
TBuffer& _buffer;
size_t _offset;
public:
FieldModelVector<TBuffer, uint8_t> f1;
FieldModelVector<TBuffer, std::optional<uint8_t>> f2;
FieldModelVector<TBuffer, FBE::buffer_t> f3;
FieldModelVector<TBuffer, std::optional<FBE::buffer_t>> f4;
FieldModelVector<TBuffer, ::test::EnumSimple> f5;
FieldModelVector<TBuffer, std::optional<::test::EnumSimple>> f6;
FieldModelVector<TBuffer, ::test::FlagsSimple> f7;
FieldModelVector<TBuffer, std::optional<::test::FlagsSimple>> f8;
FieldModelVector<TBuffer, ::test::StructSimple> f9;
FieldModelVector<TBuffer, std::optional<::test::StructSimple>> f10;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructVector model
template <class TBuffer>
class StructVectorModel : public FBE::Model<TBuffer>
{
public:
StructVectorModel() : model(this->buffer(), 4) {}
StructVectorModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructVector>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructVector& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructVector& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructVector> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructVector final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructVector>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 0)
, f2(buffer, 0)
, f3(buffer, 0)
, f4(buffer, 0)
, f5(buffer, 0)
, f6(buffer, 0)
, f7(buffer, 0)
, f8(buffer, 0)
, f9(buffer, 0)
, f10(buffer, 0)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructVector& fbe_value) const noexcept
{
size_t fbe_result = 0
+ f1.fbe_allocation_size(fbe_value.f1)
+ f2.fbe_allocation_size(fbe_value.f2)
+ f3.fbe_allocation_size(fbe_value.f3)
+ f4.fbe_allocation_size(fbe_value.f4)
+ f5.fbe_allocation_size(fbe_value.f5)
+ f6.fbe_allocation_size(fbe_value.f6)
+ f7.fbe_allocation_size(fbe_value.f7)
+ f8.fbe_allocation_size(fbe_value.f8)
+ f9.fbe_allocation_size(fbe_value.f9)
+ f10.fbe_allocation_size(fbe_value.f10)
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 130; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
return fbe_current_offset;
}
// Get the struct value
size_t get(::test::StructVector& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructVector& fbe_value) const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.get(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.get(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.get(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.get(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.get(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.get(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.get(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.get(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.get(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.get(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
// Set the struct value
size_t set(const ::test::StructVector& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructVector& fbe_value) noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.set(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.set(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.set(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.set(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.set(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.set(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.set(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.set(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.set(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.set(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
FinalModelVector<TBuffer, uint8_t> f1;
FinalModelVector<TBuffer, std::optional<uint8_t>> f2;
FinalModelVector<TBuffer, FBE::buffer_t> f3;
FinalModelVector<TBuffer, std::optional<FBE::buffer_t>> f4;
FinalModelVector<TBuffer, ::test::EnumSimple> f5;
FinalModelVector<TBuffer, std::optional<::test::EnumSimple>> f6;
FinalModelVector<TBuffer, ::test::FlagsSimple> f7;
FinalModelVector<TBuffer, std::optional<::test::FlagsSimple>> f8;
FinalModelVector<TBuffer, ::test::StructSimple> f9;
FinalModelVector<TBuffer, std::optional<::test::StructSimple>> f10;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructVector final model
template <class TBuffer>
class StructVectorFinalModel : public FBE::Model<TBuffer>
{
public:
StructVectorFinalModel() : _model(this->buffer(), 8) {}
StructVectorFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructVector>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructVector& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructVector& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructVector> _model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructList field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructList>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 4 + 4)
, f2(buffer, f1.fbe_offset() + f1.fbe_size())
, f3(buffer, f2.fbe_offset() + f2.fbe_size())
, f4(buffer, f3.fbe_offset() + f3.fbe_size())
, f5(buffer, f4.fbe_offset() + f4.fbe_size())
, f6(buffer, f5.fbe_offset() + f5.fbe_size())
, f7(buffer, f6.fbe_offset() + f6.fbe_size())
, f8(buffer, f7.fbe_offset() + f7.fbe_size())
, f9(buffer, f8.fbe_offset() + f8.fbe_size())
, f10(buffer, f9.fbe_offset() + f9.fbe_size())
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
+ f1.fbe_size()
+ f2.fbe_size()
+ f3.fbe_size()
+ f4.fbe_size()
+ f5.fbe_size()
+ f6.fbe_size()
+ f7.fbe_size()
+ f8.fbe_size()
+ f9.fbe_size()
+ f10.fbe_size()
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
+ f1.fbe_extra()
+ f2.fbe_extra()
+ f3.fbe_extra()
+ f4.fbe_extra()
+ f5.fbe_extra()
+ f6.fbe_extra()
+ f7.fbe_extra()
+ f8.fbe_extra()
+ f9.fbe_extra()
+ f10.fbe_extra()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 131; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) > fbe_struct_size)
return true;
if (!f1.verify())
return false;
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) > fbe_struct_size)
return true;
if (!f2.verify())
return false;
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) > fbe_struct_size)
return true;
if (!f3.verify())
return false;
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) > fbe_struct_size)
return true;
if (!f4.verify())
return false;
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) > fbe_struct_size)
return true;
if (!f5.verify())
return false;
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) > fbe_struct_size)
return true;
if (!f6.verify())
return false;
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) > fbe_struct_size)
return true;
if (!f7.verify())
return false;
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) > fbe_struct_size)
return true;
if (!f8.verify())
return false;
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) > fbe_struct_size)
return true;
if (!f9.verify())
return false;
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) > fbe_struct_size)
return true;
if (!f10.verify())
return false;
fbe_current_size += f10.fbe_size();
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructList& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructList& fbe_value, size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) <= fbe_struct_size)
f1.get(fbe_value.f1);
else
fbe_value.f1.clear();
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) <= fbe_struct_size)
f2.get(fbe_value.f2);
else
fbe_value.f2.clear();
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) <= fbe_struct_size)
f3.get(fbe_value.f3);
else
fbe_value.f3.clear();
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) <= fbe_struct_size)
f4.get(fbe_value.f4);
else
fbe_value.f4.clear();
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) <= fbe_struct_size)
f5.get(fbe_value.f5);
else
fbe_value.f5.clear();
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) <= fbe_struct_size)
f6.get(fbe_value.f6);
else
fbe_value.f6.clear();
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) <= fbe_struct_size)
f7.get(fbe_value.f7);
else
fbe_value.f7.clear();
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) <= fbe_struct_size)
f8.get(fbe_value.f8);
else
fbe_value.f8.clear();
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) <= fbe_struct_size)
f9.get(fbe_value.f9);
else
fbe_value.f9.clear();
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) <= fbe_struct_size)
f10.get(fbe_value.f10);
else
fbe_value.f10.clear();
fbe_current_size += f10.fbe_size();
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructList& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructList& fbe_value) noexcept
{
f1.set(fbe_value.f1);
f2.set(fbe_value.f2);
f3.set(fbe_value.f3);
f4.set(fbe_value.f4);
f5.set(fbe_value.f5);
f6.set(fbe_value.f6);
f7.set(fbe_value.f7);
f8.set(fbe_value.f8);
f9.set(fbe_value.f9);
f10.set(fbe_value.f10);
}
private:
TBuffer& _buffer;
size_t _offset;
public:
FieldModelVector<TBuffer, uint8_t> f1;
FieldModelVector<TBuffer, std::optional<uint8_t>> f2;
FieldModelVector<TBuffer, FBE::buffer_t> f3;
FieldModelVector<TBuffer, std::optional<FBE::buffer_t>> f4;
FieldModelVector<TBuffer, ::test::EnumSimple> f5;
FieldModelVector<TBuffer, std::optional<::test::EnumSimple>> f6;
FieldModelVector<TBuffer, ::test::FlagsSimple> f7;
FieldModelVector<TBuffer, std::optional<::test::FlagsSimple>> f8;
FieldModelVector<TBuffer, ::test::StructSimple> f9;
FieldModelVector<TBuffer, std::optional<::test::StructSimple>> f10;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructList model
template <class TBuffer>
class StructListModel : public FBE::Model<TBuffer>
{
public:
StructListModel() : model(this->buffer(), 4) {}
StructListModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructList>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructList& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructList& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructList> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructList final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructList>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 0)
, f2(buffer, 0)
, f3(buffer, 0)
, f4(buffer, 0)
, f5(buffer, 0)
, f6(buffer, 0)
, f7(buffer, 0)
, f8(buffer, 0)
, f9(buffer, 0)
, f10(buffer, 0)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructList& fbe_value) const noexcept
{
size_t fbe_result = 0
+ f1.fbe_allocation_size(fbe_value.f1)
+ f2.fbe_allocation_size(fbe_value.f2)
+ f3.fbe_allocation_size(fbe_value.f3)
+ f4.fbe_allocation_size(fbe_value.f4)
+ f5.fbe_allocation_size(fbe_value.f5)
+ f6.fbe_allocation_size(fbe_value.f6)
+ f7.fbe_allocation_size(fbe_value.f7)
+ f8.fbe_allocation_size(fbe_value.f8)
+ f9.fbe_allocation_size(fbe_value.f9)
+ f10.fbe_allocation_size(fbe_value.f10)
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 131; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
return fbe_current_offset;
}
// Get the struct value
size_t get(::test::StructList& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructList& fbe_value) const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.get(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.get(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.get(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.get(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.get(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.get(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.get(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.get(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.get(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.get(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
// Set the struct value
size_t set(const ::test::StructList& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructList& fbe_value) noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.set(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.set(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.set(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.set(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.set(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.set(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.set(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.set(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.set(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.set(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
FinalModelVector<TBuffer, uint8_t> f1;
FinalModelVector<TBuffer, std::optional<uint8_t>> f2;
FinalModelVector<TBuffer, FBE::buffer_t> f3;
FinalModelVector<TBuffer, std::optional<FBE::buffer_t>> f4;
FinalModelVector<TBuffer, ::test::EnumSimple> f5;
FinalModelVector<TBuffer, std::optional<::test::EnumSimple>> f6;
FinalModelVector<TBuffer, ::test::FlagsSimple> f7;
FinalModelVector<TBuffer, std::optional<::test::FlagsSimple>> f8;
FinalModelVector<TBuffer, ::test::StructSimple> f9;
FinalModelVector<TBuffer, std::optional<::test::StructSimple>> f10;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructList final model
template <class TBuffer>
class StructListFinalModel : public FBE::Model<TBuffer>
{
public:
StructListFinalModel() : _model(this->buffer(), 8) {}
StructListFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructList>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructList& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructList& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructList> _model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructSet field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructSet>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 4 + 4)
, f2(buffer, f1.fbe_offset() + f1.fbe_size())
, f3(buffer, f2.fbe_offset() + f2.fbe_size())
, f4(buffer, f3.fbe_offset() + f3.fbe_size())
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
+ f1.fbe_size()
+ f2.fbe_size()
+ f3.fbe_size()
+ f4.fbe_size()
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
+ f1.fbe_extra()
+ f2.fbe_extra()
+ f3.fbe_extra()
+ f4.fbe_extra()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 132; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) > fbe_struct_size)
return true;
if (!f1.verify())
return false;
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) > fbe_struct_size)
return true;
if (!f2.verify())
return false;
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) > fbe_struct_size)
return true;
if (!f3.verify())
return false;
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) > fbe_struct_size)
return true;
if (!f4.verify())
return false;
fbe_current_size += f4.fbe_size();
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructSet& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructSet& fbe_value, size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) <= fbe_struct_size)
f1.get(fbe_value.f1);
else
fbe_value.f1.clear();
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) <= fbe_struct_size)
f2.get(fbe_value.f2);
else
fbe_value.f2.clear();
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) <= fbe_struct_size)
f3.get(fbe_value.f3);
else
fbe_value.f3.clear();
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) <= fbe_struct_size)
f4.get(fbe_value.f4);
else
fbe_value.f4.clear();
fbe_current_size += f4.fbe_size();
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructSet& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructSet& fbe_value) noexcept
{
f1.set(fbe_value.f1);
f2.set(fbe_value.f2);
f3.set(fbe_value.f3);
f4.set(fbe_value.f4);
}
private:
TBuffer& _buffer;
size_t _offset;
public:
FieldModelVector<TBuffer, uint8_t> f1;
FieldModelVector<TBuffer, ::test::EnumSimple> f2;
FieldModelVector<TBuffer, ::test::FlagsSimple> f3;
FieldModelVector<TBuffer, ::test::StructSimple> f4;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructSet model
template <class TBuffer>
class StructSetModel : public FBE::Model<TBuffer>
{
public:
StructSetModel() : model(this->buffer(), 4) {}
StructSetModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructSet>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructSet& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructSet& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructSet> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructSet final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructSet>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 0)
, f2(buffer, 0)
, f3(buffer, 0)
, f4(buffer, 0)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructSet& fbe_value) const noexcept
{
size_t fbe_result = 0
+ f1.fbe_allocation_size(fbe_value.f1)
+ f2.fbe_allocation_size(fbe_value.f2)
+ f3.fbe_allocation_size(fbe_value.f3)
+ f4.fbe_allocation_size(fbe_value.f4)
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 132; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
return fbe_current_offset;
}
// Get the struct value
size_t get(::test::StructSet& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructSet& fbe_value) const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.get(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.get(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.get(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.get(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
// Set the struct value
size_t set(const ::test::StructSet& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructSet& fbe_value) noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.set(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.set(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.set(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.set(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
FinalModelVector<TBuffer, uint8_t> f1;
FinalModelVector<TBuffer, ::test::EnumSimple> f2;
FinalModelVector<TBuffer, ::test::FlagsSimple> f3;
FinalModelVector<TBuffer, ::test::StructSimple> f4;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructSet final model
template <class TBuffer>
class StructSetFinalModel : public FBE::Model<TBuffer>
{
public:
StructSetFinalModel() : _model(this->buffer(), 8) {}
StructSetFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructSet>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructSet& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructSet& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructSet> _model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructMap field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructMap>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 4 + 4)
, f2(buffer, f1.fbe_offset() + f1.fbe_size())
, f3(buffer, f2.fbe_offset() + f2.fbe_size())
, f4(buffer, f3.fbe_offset() + f3.fbe_size())
, f5(buffer, f4.fbe_offset() + f4.fbe_size())
, f6(buffer, f5.fbe_offset() + f5.fbe_size())
, f7(buffer, f6.fbe_offset() + f6.fbe_size())
, f8(buffer, f7.fbe_offset() + f7.fbe_size())
, f9(buffer, f8.fbe_offset() + f8.fbe_size())
, f10(buffer, f9.fbe_offset() + f9.fbe_size())
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
+ f1.fbe_size()
+ f2.fbe_size()
+ f3.fbe_size()
+ f4.fbe_size()
+ f5.fbe_size()
+ f6.fbe_size()
+ f7.fbe_size()
+ f8.fbe_size()
+ f9.fbe_size()
+ f10.fbe_size()
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
+ f1.fbe_extra()
+ f2.fbe_extra()
+ f3.fbe_extra()
+ f4.fbe_extra()
+ f5.fbe_extra()
+ f6.fbe_extra()
+ f7.fbe_extra()
+ f8.fbe_extra()
+ f9.fbe_extra()
+ f10.fbe_extra()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 140; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) > fbe_struct_size)
return true;
if (!f1.verify())
return false;
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) > fbe_struct_size)
return true;
if (!f2.verify())
return false;
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) > fbe_struct_size)
return true;
if (!f3.verify())
return false;
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) > fbe_struct_size)
return true;
if (!f4.verify())
return false;
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) > fbe_struct_size)
return true;
if (!f5.verify())
return false;
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) > fbe_struct_size)
return true;
if (!f6.verify())
return false;
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) > fbe_struct_size)
return true;
if (!f7.verify())
return false;
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) > fbe_struct_size)
return true;
if (!f8.verify())
return false;
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) > fbe_struct_size)
return true;
if (!f9.verify())
return false;
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) > fbe_struct_size)
return true;
if (!f10.verify())
return false;
fbe_current_size += f10.fbe_size();
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructMap& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructMap& fbe_value, size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) <= fbe_struct_size)
f1.get(fbe_value.f1);
else
fbe_value.f1.clear();
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) <= fbe_struct_size)
f2.get(fbe_value.f2);
else
fbe_value.f2.clear();
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) <= fbe_struct_size)
f3.get(fbe_value.f3);
else
fbe_value.f3.clear();
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) <= fbe_struct_size)
f4.get(fbe_value.f4);
else
fbe_value.f4.clear();
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) <= fbe_struct_size)
f5.get(fbe_value.f5);
else
fbe_value.f5.clear();
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) <= fbe_struct_size)
f6.get(fbe_value.f6);
else
fbe_value.f6.clear();
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) <= fbe_struct_size)
f7.get(fbe_value.f7);
else
fbe_value.f7.clear();
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) <= fbe_struct_size)
f8.get(fbe_value.f8);
else
fbe_value.f8.clear();
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) <= fbe_struct_size)
f9.get(fbe_value.f9);
else
fbe_value.f9.clear();
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) <= fbe_struct_size)
f10.get(fbe_value.f10);
else
fbe_value.f10.clear();
fbe_current_size += f10.fbe_size();
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructMap& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructMap& fbe_value) noexcept
{
f1.set(fbe_value.f1);
f2.set(fbe_value.f2);
f3.set(fbe_value.f3);
f4.set(fbe_value.f4);
f5.set(fbe_value.f5);
f6.set(fbe_value.f6);
f7.set(fbe_value.f7);
f8.set(fbe_value.f8);
f9.set(fbe_value.f9);
f10.set(fbe_value.f10);
}
private:
TBuffer& _buffer;
size_t _offset;
public:
FieldModelMap<TBuffer, int32_t, uint8_t> f1;
FieldModelMap<TBuffer, int32_t, std::optional<uint8_t>> f2;
FieldModelMap<TBuffer, int32_t, FBE::buffer_t> f3;
FieldModelMap<TBuffer, int32_t, std::optional<FBE::buffer_t>> f4;
FieldModelMap<TBuffer, int32_t, ::test::EnumSimple> f5;
FieldModelMap<TBuffer, int32_t, std::optional<::test::EnumSimple>> f6;
FieldModelMap<TBuffer, int32_t, ::test::FlagsSimple> f7;
FieldModelMap<TBuffer, int32_t, std::optional<::test::FlagsSimple>> f8;
FieldModelMap<TBuffer, int32_t, ::test::StructSimple> f9;
FieldModelMap<TBuffer, int32_t, std::optional<::test::StructSimple>> f10;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructMap model
template <class TBuffer>
class StructMapModel : public FBE::Model<TBuffer>
{
public:
StructMapModel() : model(this->buffer(), 4) {}
StructMapModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructMap>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructMap& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructMap& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructMap> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructMap final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructMap>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 0)
, f2(buffer, 0)
, f3(buffer, 0)
, f4(buffer, 0)
, f5(buffer, 0)
, f6(buffer, 0)
, f7(buffer, 0)
, f8(buffer, 0)
, f9(buffer, 0)
, f10(buffer, 0)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructMap& fbe_value) const noexcept
{
size_t fbe_result = 0
+ f1.fbe_allocation_size(fbe_value.f1)
+ f2.fbe_allocation_size(fbe_value.f2)
+ f3.fbe_allocation_size(fbe_value.f3)
+ f4.fbe_allocation_size(fbe_value.f4)
+ f5.fbe_allocation_size(fbe_value.f5)
+ f6.fbe_allocation_size(fbe_value.f6)
+ f7.fbe_allocation_size(fbe_value.f7)
+ f8.fbe_allocation_size(fbe_value.f8)
+ f9.fbe_allocation_size(fbe_value.f9)
+ f10.fbe_allocation_size(fbe_value.f10)
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 140; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
return fbe_current_offset;
}
// Get the struct value
size_t get(::test::StructMap& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructMap& fbe_value) const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.get(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.get(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.get(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.get(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.get(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.get(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.get(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.get(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.get(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.get(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
// Set the struct value
size_t set(const ::test::StructMap& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructMap& fbe_value) noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.set(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.set(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.set(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.set(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.set(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.set(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.set(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.set(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.set(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.set(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
FinalModelMap<TBuffer, int32_t, uint8_t> f1;
FinalModelMap<TBuffer, int32_t, std::optional<uint8_t>> f2;
FinalModelMap<TBuffer, int32_t, FBE::buffer_t> f3;
FinalModelMap<TBuffer, int32_t, std::optional<FBE::buffer_t>> f4;
FinalModelMap<TBuffer, int32_t, ::test::EnumSimple> f5;
FinalModelMap<TBuffer, int32_t, std::optional<::test::EnumSimple>> f6;
FinalModelMap<TBuffer, int32_t, ::test::FlagsSimple> f7;
FinalModelMap<TBuffer, int32_t, std::optional<::test::FlagsSimple>> f8;
FinalModelMap<TBuffer, int32_t, ::test::StructSimple> f9;
FinalModelMap<TBuffer, int32_t, std::optional<::test::StructSimple>> f10;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructMap final model
template <class TBuffer>
class StructMapFinalModel : public FBE::Model<TBuffer>
{
public:
StructMapFinalModel() : _model(this->buffer(), 8) {}
StructMapFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructMap>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructMap& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructMap& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructMap> _model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructHash field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructHash>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 4 + 4)
, f2(buffer, f1.fbe_offset() + f1.fbe_size())
, f3(buffer, f2.fbe_offset() + f2.fbe_size())
, f4(buffer, f3.fbe_offset() + f3.fbe_size())
, f5(buffer, f4.fbe_offset() + f4.fbe_size())
, f6(buffer, f5.fbe_offset() + f5.fbe_size())
, f7(buffer, f6.fbe_offset() + f6.fbe_size())
, f8(buffer, f7.fbe_offset() + f7.fbe_size())
, f9(buffer, f8.fbe_offset() + f8.fbe_size())
, f10(buffer, f9.fbe_offset() + f9.fbe_size())
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
+ f1.fbe_size()
+ f2.fbe_size()
+ f3.fbe_size()
+ f4.fbe_size()
+ f5.fbe_size()
+ f6.fbe_size()
+ f7.fbe_size()
+ f8.fbe_size()
+ f9.fbe_size()
+ f10.fbe_size()
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
+ f1.fbe_extra()
+ f2.fbe_extra()
+ f3.fbe_extra()
+ f4.fbe_extra()
+ f5.fbe_extra()
+ f6.fbe_extra()
+ f7.fbe_extra()
+ f8.fbe_extra()
+ f9.fbe_extra()
+ f10.fbe_extra()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 141; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) > fbe_struct_size)
return true;
if (!f1.verify())
return false;
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) > fbe_struct_size)
return true;
if (!f2.verify())
return false;
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) > fbe_struct_size)
return true;
if (!f3.verify())
return false;
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) > fbe_struct_size)
return true;
if (!f4.verify())
return false;
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) > fbe_struct_size)
return true;
if (!f5.verify())
return false;
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) > fbe_struct_size)
return true;
if (!f6.verify())
return false;
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) > fbe_struct_size)
return true;
if (!f7.verify())
return false;
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) > fbe_struct_size)
return true;
if (!f8.verify())
return false;
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) > fbe_struct_size)
return true;
if (!f9.verify())
return false;
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) > fbe_struct_size)
return true;
if (!f10.verify())
return false;
fbe_current_size += f10.fbe_size();
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructHash& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructHash& fbe_value, size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) <= fbe_struct_size)
f1.get(fbe_value.f1);
else
fbe_value.f1.clear();
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) <= fbe_struct_size)
f2.get(fbe_value.f2);
else
fbe_value.f2.clear();
fbe_current_size += f2.fbe_size();
if ((fbe_current_size + f3.fbe_size()) <= fbe_struct_size)
f3.get(fbe_value.f3);
else
fbe_value.f3.clear();
fbe_current_size += f3.fbe_size();
if ((fbe_current_size + f4.fbe_size()) <= fbe_struct_size)
f4.get(fbe_value.f4);
else
fbe_value.f4.clear();
fbe_current_size += f4.fbe_size();
if ((fbe_current_size + f5.fbe_size()) <= fbe_struct_size)
f5.get(fbe_value.f5);
else
fbe_value.f5.clear();
fbe_current_size += f5.fbe_size();
if ((fbe_current_size + f6.fbe_size()) <= fbe_struct_size)
f6.get(fbe_value.f6);
else
fbe_value.f6.clear();
fbe_current_size += f6.fbe_size();
if ((fbe_current_size + f7.fbe_size()) <= fbe_struct_size)
f7.get(fbe_value.f7);
else
fbe_value.f7.clear();
fbe_current_size += f7.fbe_size();
if ((fbe_current_size + f8.fbe_size()) <= fbe_struct_size)
f8.get(fbe_value.f8);
else
fbe_value.f8.clear();
fbe_current_size += f8.fbe_size();
if ((fbe_current_size + f9.fbe_size()) <= fbe_struct_size)
f9.get(fbe_value.f9);
else
fbe_value.f9.clear();
fbe_current_size += f9.fbe_size();
if ((fbe_current_size + f10.fbe_size()) <= fbe_struct_size)
f10.get(fbe_value.f10);
else
fbe_value.f10.clear();
fbe_current_size += f10.fbe_size();
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructHash& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructHash& fbe_value) noexcept
{
f1.set(fbe_value.f1);
f2.set(fbe_value.f2);
f3.set(fbe_value.f3);
f4.set(fbe_value.f4);
f5.set(fbe_value.f5);
f6.set(fbe_value.f6);
f7.set(fbe_value.f7);
f8.set(fbe_value.f8);
f9.set(fbe_value.f9);
f10.set(fbe_value.f10);
}
private:
TBuffer& _buffer;
size_t _offset;
public:
FieldModelMap<TBuffer, std::string, uint8_t> f1;
FieldModelMap<TBuffer, std::string, std::optional<uint8_t>> f2;
FieldModelMap<TBuffer, std::string, FBE::buffer_t> f3;
FieldModelMap<TBuffer, std::string, std::optional<FBE::buffer_t>> f4;
FieldModelMap<TBuffer, std::string, ::test::EnumSimple> f5;
FieldModelMap<TBuffer, std::string, std::optional<::test::EnumSimple>> f6;
FieldModelMap<TBuffer, std::string, ::test::FlagsSimple> f7;
FieldModelMap<TBuffer, std::string, std::optional<::test::FlagsSimple>> f8;
FieldModelMap<TBuffer, std::string, ::test::StructSimple> f9;
FieldModelMap<TBuffer, std::string, std::optional<::test::StructSimple>> f10;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructHash model
template <class TBuffer>
class StructHashModel : public FBE::Model<TBuffer>
{
public:
StructHashModel() : model(this->buffer(), 4) {}
StructHashModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructHash>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructHash& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructHash& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructHash> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructHash final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructHash>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 0)
, f2(buffer, 0)
, f3(buffer, 0)
, f4(buffer, 0)
, f5(buffer, 0)
, f6(buffer, 0)
, f7(buffer, 0)
, f8(buffer, 0)
, f9(buffer, 0)
, f10(buffer, 0)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructHash& fbe_value) const noexcept
{
size_t fbe_result = 0
+ f1.fbe_allocation_size(fbe_value.f1)
+ f2.fbe_allocation_size(fbe_value.f2)
+ f3.fbe_allocation_size(fbe_value.f3)
+ f4.fbe_allocation_size(fbe_value.f4)
+ f5.fbe_allocation_size(fbe_value.f5)
+ f6.fbe_allocation_size(fbe_value.f6)
+ f7.fbe_allocation_size(fbe_value.f7)
+ f8.fbe_allocation_size(fbe_value.f8)
+ f9.fbe_allocation_size(fbe_value.f9)
+ f10.fbe_allocation_size(fbe_value.f10)
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 141; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
return fbe_current_offset;
}
// Get the struct value
size_t get(::test::StructHash& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructHash& fbe_value) const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.get(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.get(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.get(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.get(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.get(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.get(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.get(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.get(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.get(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.get(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
// Set the struct value
size_t set(const ::test::StructHash& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructHash& fbe_value) noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.set(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.set(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f3.fbe_offset(fbe_current_offset);
fbe_field_size = f3.set(fbe_value.f3);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f4.fbe_offset(fbe_current_offset);
fbe_field_size = f4.set(fbe_value.f4);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f5.fbe_offset(fbe_current_offset);
fbe_field_size = f5.set(fbe_value.f5);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f6.fbe_offset(fbe_current_offset);
fbe_field_size = f6.set(fbe_value.f6);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f7.fbe_offset(fbe_current_offset);
fbe_field_size = f7.set(fbe_value.f7);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f8.fbe_offset(fbe_current_offset);
fbe_field_size = f8.set(fbe_value.f8);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f9.fbe_offset(fbe_current_offset);
fbe_field_size = f9.set(fbe_value.f9);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f10.fbe_offset(fbe_current_offset);
fbe_field_size = f10.set(fbe_value.f10);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
FinalModelMap<TBuffer, std::string, uint8_t> f1;
FinalModelMap<TBuffer, std::string, std::optional<uint8_t>> f2;
FinalModelMap<TBuffer, std::string, FBE::buffer_t> f3;
FinalModelMap<TBuffer, std::string, std::optional<FBE::buffer_t>> f4;
FinalModelMap<TBuffer, std::string, ::test::EnumSimple> f5;
FinalModelMap<TBuffer, std::string, std::optional<::test::EnumSimple>> f6;
FinalModelMap<TBuffer, std::string, ::test::FlagsSimple> f7;
FinalModelMap<TBuffer, std::string, std::optional<::test::FlagsSimple>> f8;
FinalModelMap<TBuffer, std::string, ::test::StructSimple> f9;
FinalModelMap<TBuffer, std::string, std::optional<::test::StructSimple>> f10;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructHash final model
template <class TBuffer>
class StructHashFinalModel : public FBE::Model<TBuffer>
{
public:
StructHashFinalModel() : _model(this->buffer(), 8) {}
StructHashFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructHash>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructHash& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructHash& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructHash> _model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructHashEx field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructHashEx>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 4 + 4)
, f2(buffer, f1.fbe_offset() + f1.fbe_size())
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
+ f1.fbe_size()
+ f2.fbe_size()
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
+ f1.fbe_extra()
+ f2.fbe_extra()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 142; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) > fbe_struct_size)
return true;
if (!f1.verify())
return false;
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) > fbe_struct_size)
return true;
if (!f2.verify())
return false;
fbe_current_size += f2.fbe_size();
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructHashEx& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructHashEx& fbe_value, size_t fbe_struct_size) const noexcept
{
size_t fbe_current_size = 4 + 4;
if ((fbe_current_size + f1.fbe_size()) <= fbe_struct_size)
f1.get(fbe_value.f1);
else
fbe_value.f1.clear();
fbe_current_size += f1.fbe_size();
if ((fbe_current_size + f2.fbe_size()) <= fbe_struct_size)
f2.get(fbe_value.f2);
else
fbe_value.f2.clear();
fbe_current_size += f2.fbe_size();
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructHashEx& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructHashEx& fbe_value) noexcept
{
f1.set(fbe_value.f1);
f2.set(fbe_value.f2);
}
private:
TBuffer& _buffer;
size_t _offset;
public:
FieldModelMap<TBuffer, ::test::StructSimple, ::test::StructNested> f1;
FieldModelMap<TBuffer, ::test::StructSimple, std::optional<::test::StructNested>> f2;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructHashEx model
template <class TBuffer>
class StructHashExModel : public FBE::Model<TBuffer>
{
public:
StructHashExModel() : model(this->buffer(), 4) {}
StructHashExModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructHashEx>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructHashEx& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructHashEx& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructHashEx> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructHashEx final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructHashEx>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
, f1(buffer, 0)
, f2(buffer, 0)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructHashEx& fbe_value) const noexcept
{
size_t fbe_result = 0
+ f1.fbe_allocation_size(fbe_value.f1)
+ f2.fbe_allocation_size(fbe_value.f2)
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 142; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.verify();
if (fbe_field_size == std::numeric_limits<std::size_t>::max())
return std::numeric_limits<std::size_t>::max();
fbe_current_offset += fbe_field_size;
return fbe_current_offset;
}
// Get the struct value
size_t get(::test::StructHashEx& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructHashEx& fbe_value) const noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.get(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.get(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
// Set the struct value
size_t set(const ::test::StructHashEx& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructHashEx& fbe_value) noexcept
{
size_t fbe_current_offset = 0;
size_t fbe_current_size = 0;
size_t fbe_field_size;
f1.fbe_offset(fbe_current_offset);
fbe_field_size = f1.set(fbe_value.f1);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
f2.fbe_offset(fbe_current_offset);
fbe_field_size = f2.set(fbe_value.f2);
fbe_current_offset += fbe_field_size;
fbe_current_size += fbe_field_size;
return fbe_current_size;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
FinalModelMap<TBuffer, ::test::StructSimple, ::test::StructNested> f1;
FinalModelMap<TBuffer, ::test::StructSimple, std::optional<::test::StructNested>> f2;
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructHashEx final model
template <class TBuffer>
class StructHashExFinalModel : public FBE::Model<TBuffer>
{
public:
StructHashExFinalModel() : _model(this->buffer(), 8) {}
StructHashExFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructHashEx>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructHashEx& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructHashEx& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructHashEx> _model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructEmpty field model
template <class TBuffer>
class FieldModel<TBuffer, ::test::StructEmpty>
{
public:
FieldModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
{}
// Get the field offset
size_t fbe_offset() const noexcept { return _offset; }
// Get the field size
size_t fbe_size() const noexcept { return 4; }
// Get the field body size
size_t fbe_body() const noexcept
{
size_t fbe_result = 4 + 4
;
return fbe_result;
}
// Get the field extra size
size_t fbe_extra() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4) > _buffer.size()))
return 0;
_buffer.shift(fbe_struct_offset);
size_t fbe_result = fbe_body()
;
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Get the field type
static constexpr size_t fbe_type() noexcept { return 143; }
// Shift the current field offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current field offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
bool verify(bool fbe_verify_type = true) const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return true;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return false;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
if (fbe_struct_size < (4 + 4))
return false;
uint32_t fbe_struct_type = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4));
if (fbe_verify_type && (fbe_struct_type != fbe_type()))
return false;
_buffer.shift(fbe_struct_offset);
bool fbe_result = verify_fields(fbe_struct_size);
_buffer.unshift(fbe_struct_offset);
return fbe_result;
}
// Check if the struct fields are valid
bool verify_fields(size_t fbe_struct_size) const noexcept
{
return true;
}
// Get the struct value (begin phase)
size_t get_begin() const noexcept
{
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_offset = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset()));
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + 4 + 4) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + 4 + 4) > _buffer.size()))
return 0;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset));
assert((fbe_struct_size >= (4 + 4)) && "Model is broken!");
if (fbe_struct_size < (4 + 4))
return 0;
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Get the struct value (end phase)
void get_end(size_t fbe_begin) const noexcept
{
_buffer.unshift(fbe_begin);
}
// Get the struct value
void get(::test::StructEmpty& fbe_value) const noexcept
{
size_t fbe_begin = get_begin();
if (fbe_begin == 0)
return;
uint32_t fbe_struct_size = *((const uint32_t*)(_buffer.data() + _buffer.offset()));
get_fields(fbe_value, fbe_struct_size);
get_end(fbe_begin);
}
// Get the struct fields values
void get_fields(::test::StructEmpty& fbe_value, size_t fbe_struct_size) const noexcept
{
}
// Set the struct value (begin phase)
size_t set_begin()
{
assert(((_buffer.offset() + fbe_offset() + fbe_size()) <= _buffer.size()) && "Model is broken!");
if ((_buffer.offset() + fbe_offset() + fbe_size()) > _buffer.size())
return 0;
uint32_t fbe_struct_size = (uint32_t)fbe_body();
uint32_t fbe_struct_offset = (uint32_t)(_buffer.allocate(fbe_struct_size) - _buffer.offset());
assert(((fbe_struct_offset > 0) && ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) <= _buffer.size())) && "Model is broken!");
if ((fbe_struct_offset == 0) || ((_buffer.offset() + fbe_struct_offset + fbe_struct_size) > _buffer.size()))
return 0;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_offset())) = fbe_struct_offset;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset)) = fbe_struct_size;
*((uint32_t*)(_buffer.data() + _buffer.offset() + fbe_struct_offset + 4)) = (uint32_t)fbe_type();
_buffer.shift(fbe_struct_offset);
return fbe_struct_offset;
}
// Set the struct value (end phase)
void set_end(size_t fbe_begin)
{
_buffer.unshift(fbe_begin);
}
// Set the struct value
void set(const ::test::StructEmpty& fbe_value) noexcept
{
size_t fbe_begin = set_begin();
if (fbe_begin == 0)
return;
set_fields(fbe_value);
set_end(fbe_begin);
}
// Set the struct fields values
void set_fields(const ::test::StructEmpty& fbe_value) noexcept
{
}
private:
TBuffer& _buffer;
size_t _offset;
public:
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructEmpty model
template <class TBuffer>
class StructEmptyModel : public FBE::Model<TBuffer>
{
public:
StructEmptyModel() : model(this->buffer(), 4) {}
StructEmptyModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), model(this->buffer(), 4) {}
// Get the model size
size_t fbe_size() const noexcept { return model.fbe_size() + model.fbe_extra(); }
// Get the model type
static constexpr size_t fbe_type() noexcept { return FieldModel<TBuffer, ::test::StructEmpty>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return false;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
if (fbe_full_size < model.fbe_size())
return false;
return model.verify();
}
// Create a new model (begin phase)
size_t create_begin()
{
size_t fbe_begin = this->buffer().allocate(4 + model.fbe_size());
return fbe_begin;
}
// Create a new model (end phase)
size_t create_end(size_t fbe_begin)
{
size_t fbe_end = this->buffer().size();
uint32_t fbe_full_size = (uint32_t)(fbe_end - fbe_begin);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4)) = fbe_full_size;
return fbe_full_size;
}
// Serialize the struct value
size_t serialize(const ::test::StructEmpty& value)
{
size_t fbe_begin = create_begin();
model.set(value);
size_t fbe_full_size = create_end(fbe_begin);
return fbe_full_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructEmpty& value) const noexcept
{
if ((this->buffer().offset() + model.fbe_offset() - 4) > this->buffer().size())
return 0;
uint32_t fbe_full_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + model.fbe_offset() - 4));
assert((fbe_full_size >= model.fbe_size()) && "Model is broken!");
if (fbe_full_size < model.fbe_size())
return 0;
model.get(value);
return fbe_full_size;
}
// Move to the next struct value
void next(size_t prev) noexcept
{
model.fbe_shift(prev);
}
public:
FieldModel<TBuffer, ::test::StructEmpty> model;
};
} // namespace test
} // namespace FBE
namespace FBE {
// Fast Binary Encoding ::test::StructEmpty final model
template <class TBuffer>
class FinalModel<TBuffer, ::test::StructEmpty>
{
public:
FinalModel(TBuffer& buffer, size_t offset) noexcept : _buffer(buffer), _offset(offset)
{}
// Get the allocation size
size_t fbe_allocation_size(const ::test::StructEmpty& fbe_value) const noexcept
{
size_t fbe_result = 0
;
return fbe_result;
}
// Get the final offset
size_t fbe_offset() const noexcept { return _offset; }
// Set the final offset
size_t fbe_offset(size_t offset) const noexcept { return _offset = offset; }
// Get the final type
static constexpr size_t fbe_type() noexcept { return 143; }
// Shift the current final offset
void fbe_shift(size_t size) noexcept { _offset += size; }
// Unshift the current final offset
void fbe_unshift(size_t size) noexcept { _offset -= size; }
// Check if the struct value is valid
size_t verify() const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = verify_fields();
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Check if the struct fields are valid
size_t verify_fields() const noexcept
{
return 0;
}
// Get the struct value
size_t get(::test::StructEmpty& fbe_value) const noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = get_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Get the struct fields values
size_t get_fields(::test::StructEmpty& fbe_value) const noexcept
{
return 0;
}
// Set the struct value
size_t set(const ::test::StructEmpty& fbe_value) noexcept
{
_buffer.shift(fbe_offset());
size_t fbe_result = set_fields(fbe_value);
_buffer.unshift(fbe_offset());
return fbe_result;
}
// Set the struct fields values
size_t set_fields(const ::test::StructEmpty& fbe_value) noexcept
{
return 0;
}
private:
TBuffer& _buffer;
mutable size_t _offset;
public:
};
} // namespace FBE
namespace FBE {
namespace test {
// Fast Binary Encoding StructEmpty final model
template <class TBuffer>
class StructEmptyFinalModel : public FBE::Model<TBuffer>
{
public:
StructEmptyFinalModel() : _model(this->buffer(), 8) {}
StructEmptyFinalModel(const std::shared_ptr<TBuffer>& buffer) : FBE::Model<TBuffer>(buffer), _model(this->buffer(), 8) {}
// Get the model type
static constexpr size_t fbe_type() noexcept { return FinalModel<TBuffer, ::test::StructEmpty>::fbe_type(); }
// Check if the struct value is valid
bool verify()
{
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return false;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return false;
return ((8 + _model.verify()) == fbe_struct_size);
}
// Serialize the struct value
size_t serialize(const ::test::StructEmpty& value)
{
size_t fbe_initial_size = this->buffer().size();
uint32_t fbe_struct_type = (uint32_t)fbe_type();
uint32_t fbe_struct_size = (uint32_t)(8 + _model.fbe_allocation_size(value));
uint32_t fbe_struct_offset = (uint32_t)(this->buffer().allocate(fbe_struct_size) - this->buffer().offset());
assert(((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + fbe_struct_offset + fbe_struct_size) > this->buffer().size())
return 0;
fbe_struct_size = (uint32_t)(8 + _model.set(value));
this->buffer().resize(fbe_initial_size + fbe_struct_size);
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8)) = fbe_struct_size;
*((uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4)) = fbe_struct_type;
return fbe_struct_size;
}
// Deserialize the struct value
size_t deserialize(::test::StructEmpty& value) const noexcept
{
assert(((this->buffer().offset() + _model.fbe_offset()) <= this->buffer().size()) && "Model is broken!");
if ((this->buffer().offset() + _model.fbe_offset()) > this->buffer().size())
return 0;
size_t fbe_struct_size = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 8));
size_t fbe_struct_type = *((const uint32_t*)(this->buffer().data() + this->buffer().offset() + _model.fbe_offset() - 4));
assert(((fbe_struct_size > 0) && (fbe_struct_type == fbe_type())) && "Model is broken!");
if ((fbe_struct_size == 0) || (fbe_struct_type != fbe_type()))
return 8;
return 8 + _model.get(value);
}
// Move to the next struct value
void next(size_t prev) noexcept
{
_model.fbe_shift(prev);
}
private:
FinalModel<TBuffer, ::test::StructEmpty> _model;
};
} // namespace test
} // namespace FBE
| 35.664907
| 144
| 0.618517
|
[
"model"
] |
b55828ddf7a75c8fca6b68e819644bb4f9ad2211
| 1,742
|
h
|
C
|
program/engine.h
|
pondodev/custom-project
|
cc6a2de54aa7ea256fa62dc6fba4806ce8532792
|
[
"Unlicense"
] | null | null | null |
program/engine.h
|
pondodev/custom-project
|
cc6a2de54aa7ea256fa62dc6fba4806ce8532792
|
[
"Unlicense"
] | null | null | null |
program/engine.h
|
pondodev/custom-project
|
cc6a2de54aa7ea256fa62dc6fba4806ce8532792
|
[
"Unlicense"
] | null | null | null |
#ifndef ENGINE_H
#define ENGINE_H
#define WINDOW_WIDTH 1024
#define WINDOW_HEIGHT 512
#define FRAMEBUFFER_LENGTH WINDOW_WIDTH * WINDOW_HEIGHT
#include <iostream>
#include <fstream>
#include <string>
#include <cstdint>
#include <vector>
#include <cmath>
#include <array>
#include <algorithm>
#include <mutex>
#include "color.h"
#include "player.h"
#include "texture.h"
#include "entity_engine.h"
enum MapTile {
Floor = -1,
Wall1 = 0,
Wall2 = 1,
Wall3 = 2,
Wall4 = 3
};
class Engine {
public:
Engine( const std::string map_path, const std::string wall_tex_path, const std::string enemy_tex_path );
void update( const float delta_time );
void render();
void get_framebuffer( uint8_t* target );
void move_view( const float delta );
void set_player_move_dir( const Vec2 dir );
private:
Color framebuffer[ FRAMEBUFFER_LENGTH ];
std::vector<MapTile> map;
unsigned int map_width;
unsigned int map_height;
Player player;
Texture wall_textures;
Texture enemy_textures;
EntityEngine enemy_manager;
std::vector<Entity> active_enemies;
std::array<float, WINDOW_WIDTH> depth_buffer;
std::mutex framebuffer_lock;
std::mutex player_view_lock;
std::mutex player_move_dir_lock;
void clear_framebuffer( const Color color );
void draw_rect( const int x, const int y, const int w, const int h, const Color color );
void draw_sprite( const Entity enemy );
void draw_pixel( const int x, const int y, const Color color );
MapTile get_map_tile( const int x, const int y ) const;
void add_enemy( const float x, const float y, const float speed, const EnemyType type );
void enemy_movement_system( const float delta_time );
};
#endif
| 25.617647
| 108
| 0.711825
|
[
"render",
"vector"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.