diff options
author | Peter Schiffer <pschiffe@redhat.com> | 2013-04-04 19:02:29 +0200 |
---|---|---|
committer | Peter Schiffer <pschiffe@redhat.com> | 2013-04-04 19:02:29 +0200 |
commit | 381f0c0a5cd98a48dc6d5a5e0b98443707d8ea81 (patch) | |
tree | bdad2e739aac00eb02e71dfe7d4074268c6f69a4 /src/hardware | |
parent | c47c5c19c5857439db30e40d4a691f5b700adf5f (diff) | |
download | openlmi-providers-381f0c0a5cd98a48dc6d5a5e0b98443707d8ea81.tar.gz openlmi-providers-381f0c0a5cd98a48dc6d5a5e0b98443707d8ea81.tar.xz openlmi-providers-381f0c0a5cd98a48dc6d5a5e0b98443707d8ea81.zip |
Hardware: Added Processor Cache Memory Provider
New Providers:
* LMI_ProcessorCacheMemoryProvider
* LMI_AssociatedProcessorCacheMemoryProvider
Other Changes:
* Optimized usage of string constats
* Fixed wrong usage of pointers in dmidecode.c
* Filled unknown mandatory fields in providers with "Unknown" value
* Replaced hard coded numbers with LMI constants
* Minor optimization - don't gather data which won't be used
Diffstat (limited to 'src/hardware')
-rw-r--r-- | src/hardware/CMakeLists.txt | 3 | ||||
-rw-r--r-- | src/hardware/LMI_AssociatedProcessorCacheMemoryProvider.c | 652 | ||||
-rw-r--r-- | src/hardware/LMI_Hardware.h | 2 | ||||
-rw-r--r-- | src/hardware/LMI_ProcessorCacheMemoryProvider.c | 405 | ||||
-rw-r--r-- | src/hardware/LMI_ProcessorCapabilitiesProvider.c | 28 | ||||
-rw-r--r-- | src/hardware/LMI_ProcessorElementCapabilitiesProvider.c | 28 | ||||
-rw-r--r-- | src/hardware/LMI_ProcessorProvider.c | 51 | ||||
-rw-r--r-- | src/hardware/dmidecode.c | 427 | ||||
-rw-r--r-- | src/hardware/dmidecode.h | 31 | ||||
-rw-r--r-- | src/hardware/sysfs.c | 295 | ||||
-rw-r--r-- | src/hardware/sysfs.h | 64 |
11 files changed, 1891 insertions, 95 deletions
diff --git a/src/hardware/CMakeLists.txt b/src/hardware/CMakeLists.txt index 352cebe..3949a8a 100644 --- a/src/hardware/CMakeLists.txt +++ b/src/hardware/CMakeLists.txt @@ -7,9 +7,12 @@ set(provider_SRCS dmidecode.c lscpu.c cpuinfo.c + sysfs.c LMI_ProcessorProvider.c LMI_ProcessorCapabilitiesProvider.c LMI_ProcessorElementCapabilitiesProvider.c + LMI_ProcessorCacheMemoryProvider.c + LMI_AssociatedProcessorCacheMemoryProvider.c ) konkretcmpi_generate(${MOF} diff --git a/src/hardware/LMI_AssociatedProcessorCacheMemoryProvider.c b/src/hardware/LMI_AssociatedProcessorCacheMemoryProvider.c new file mode 100644 index 0000000..b06aa22 --- /dev/null +++ b/src/hardware/LMI_AssociatedProcessorCacheMemoryProvider.c @@ -0,0 +1,652 @@ +/* + * Copyright (C) 2013 Red Hat, Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Authors: Peter Schiffer <pschiffe@redhat.com> + */ + +#include <konkret/konkret.h> +#include "LMI_AssociatedProcessorCacheMemory.h" +#include "LMI_Processor.h" +#include "LMI_Hardware.h" +#include "globals.h" +#include "dmidecode.h" +#include "lscpu.h" +#include "sysfs.h" + +CMPIUint16 get_cache_level(const unsigned level); +CMPIUint16 get_write_policy(const char *op_mode); +CMPIUint16 get_cache_type(const char *type); +CMPIUint16 get_cache_associativity_dmi(const char *assoc); +CMPIUint16 get_cache_associativity_sysfs(const unsigned ways_of_assoc); + +static const CMPIBroker* _cb; + +static void LMI_AssociatedProcessorCacheMemoryInitialize() +{ +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryCleanup( + CMPIInstanceMI* mi, + const CMPIContext* cc, + CMPIBoolean term) +{ + CMReturn(CMPI_RC_OK); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryEnumInstanceNames( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop) +{ + return KDefaultEnumerateInstanceNames( + _cb, mi, cc, cr, cop); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryEnumInstances( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const char** properties) +{ + LMI_AssociatedProcessorCacheMemory lmi_assoc_cache; + LMI_ProcessorCacheMemoryRef lmi_cpu_cache; + LMI_ProcessorRef lmi_cpu; + CMPIUint16 cache_level, write_policy, cache_type, associativity; + const char *ns = KNameSpace(cop); + char *error_msg = NULL; + unsigned i, j, cpus_nb = 0; + DmiProcessor *dmi_cpus = NULL; + unsigned dmi_cpus_nb = 0; + LscpuProcessor lscpu; + DmiCpuCache *dmi_cpu_caches = NULL; + unsigned dmi_cpu_caches_nb = 0; + SysfsCpuCache *sysfs_cpu_caches = NULL; + unsigned sysfs_cpu_caches_nb = 0; + + /* get processors and caches */ + if (dmi_get_processors(&dmi_cpus, &dmi_cpus_nb) != 0 || dmi_cpus_nb < 1) { + dmi_free_processors(&dmi_cpus, &dmi_cpus_nb); + } + + if (dmi_get_cpu_caches(&dmi_cpu_caches, &dmi_cpu_caches_nb) != 0 + || dmi_cpu_caches_nb < 1) { + dmi_free_cpu_caches(&dmi_cpu_caches, &dmi_cpu_caches_nb); + } + + /* if we don't have dmidecode data */ + if (dmi_cpus_nb < 1 || dmi_cpu_caches_nb < 1) { + dmi_free_processors(&dmi_cpus, &dmi_cpus_nb); + dmi_free_cpu_caches(&dmi_cpu_caches, &dmi_cpu_caches_nb); + + if (lscpu_get_processor(&lscpu) != 0) { + error_msg = "Unable to get processor information."; + goto done; + } + + if (sysfs_get_cpu_caches(&sysfs_cpu_caches, &sysfs_cpu_caches_nb) != 0 + || sysfs_cpu_caches_nb < 1) { + error_msg = "Unable to get processor cache information."; + goto done; + } + } + + if (dmi_cpus_nb > 0) { + cpus_nb = dmi_cpus_nb; + } else if (lscpu.processors > 0) { + cpus_nb = lscpu.processors; + } else { + error_msg = "Unable to get processor information."; + goto done; + } + + /* if we have cpus and caches from dmidecode */ + /* in this case, we can match exactly cpus and caches */ + if (dmi_cpus_nb > 0 && dmi_cpu_caches_nb > 0) { + /* loop cpus */ + for (i = 0; i < dmi_cpus_nb; i++) { + LMI_AssociatedProcessorCacheMemory_Init(&lmi_assoc_cache, _cb, ns); + + LMI_ProcessorRef_Init(&lmi_cpu, _cb, ns); + LMI_ProcessorRef_Set_SystemCreationClassName(&lmi_cpu, + get_system_creation_class_name()); + LMI_ProcessorRef_Set_SystemName(&lmi_cpu, get_system_name()); + LMI_ProcessorRef_Set_CreationClassName(&lmi_cpu, + ORGID "_" CPU_CLASS_NAME); + LMI_ProcessorRef_Set_DeviceID(&lmi_cpu, dmi_cpus[i].id); + + /* loop caches */ + for (j = 0; j < dmi_cpu_caches_nb; j++) { + /* if this cpu contains this cache */ + if (strcmp(dmi_cpu_caches[j].id,dmi_cpus[i].l1_cache_handle) == 0 + || strcmp(dmi_cpu_caches[j].id, dmi_cpus[i].l2_cache_handle) == 0 + || strcmp(dmi_cpu_caches[j].id, dmi_cpus[i].l3_cache_handle) == 0) { + LMI_AssociatedProcessorCacheMemory_Init( + &lmi_assoc_cache, _cb, ns); + + LMI_ProcessorCacheMemoryRef_Init(&lmi_cpu_cache, _cb, ns); + LMI_ProcessorCacheMemoryRef_Set_SystemCreationClassName( + &lmi_cpu_cache, get_system_creation_class_name()); + LMI_ProcessorCacheMemoryRef_Set_SystemName(&lmi_cpu_cache, + get_system_name()); + LMI_ProcessorCacheMemoryRef_Set_CreationClassName( + &lmi_cpu_cache, ORGID "_" CPU_CACHE_CLASS_NAME); + LMI_ProcessorCacheMemoryRef_Set_DeviceID( + &lmi_cpu_cache, dmi_cpu_caches[j].id); + + LMI_AssociatedProcessorCacheMemory_Set_Dependent( + &lmi_assoc_cache, &lmi_cpu); + LMI_AssociatedProcessorCacheMemory_Set_Antecedent( + &lmi_assoc_cache, &lmi_cpu_cache); + + cache_level = get_cache_level(dmi_cpu_caches[j].level); + if (cache_level == 1) { + char *other_level; + if (asprintf(&other_level, "%u", + dmi_cpu_caches[j].level) < 0) { + other_level = NULL; + error_msg = "Not enough available memory."; + goto done; + } + LMI_AssociatedProcessorCacheMemory_Set_OtherLevelDescription( + &lmi_assoc_cache, other_level); + free(other_level); + other_level = NULL; + } + write_policy = get_write_policy(dmi_cpu_caches[j].op_mode); + if (write_policy == 1) { + LMI_AssociatedProcessorCacheMemory_Set_OtherWritePolicyDescription( + &lmi_assoc_cache, dmi_cpu_caches[j].op_mode); + } + cache_type = get_cache_type(dmi_cpu_caches[j].type); + if (cache_type == 1) { + LMI_AssociatedProcessorCacheMemory_Set_OtherCacheTypeDescription( + &lmi_assoc_cache, dmi_cpu_caches[j].type); + } + associativity = get_cache_associativity_dmi( + dmi_cpu_caches[j].associativity); + if (associativity == 1) { + LMI_AssociatedProcessorCacheMemory_Set_OtherAssociativityDescription( + &lmi_assoc_cache, + dmi_cpu_caches[j].associativity); + } + + LMI_AssociatedProcessorCacheMemory_Set_Level( + &lmi_assoc_cache, cache_level); + LMI_AssociatedProcessorCacheMemory_Set_WritePolicy( + &lmi_assoc_cache, write_policy); + LMI_AssociatedProcessorCacheMemory_Set_CacheType( + &lmi_assoc_cache, cache_type); + LMI_AssociatedProcessorCacheMemory_Set_Associativity( + &lmi_assoc_cache, associativity); + + LMI_AssociatedProcessorCacheMemory_Set_ReadPolicy( + &lmi_assoc_cache, + LMI_AssociatedProcessorCacheMemory_ReadPolicy_Unknown); + + KReturnInstance(cr, lmi_assoc_cache); + } + } + } + } else { + /* in this case, we match every cache to every cpu, assuming all the */ + /* cpus are the same */ + /* loop caches */ + for (i = 0; i < sysfs_cpu_caches_nb; i++) { + LMI_AssociatedProcessorCacheMemory_Init(&lmi_assoc_cache, _cb, ns); + + LMI_ProcessorCacheMemoryRef_Init(&lmi_cpu_cache, _cb, ns); + LMI_ProcessorCacheMemoryRef_Set_SystemCreationClassName( + &lmi_cpu_cache, get_system_creation_class_name()); + LMI_ProcessorCacheMemoryRef_Set_SystemName(&lmi_cpu_cache, + get_system_name()); + LMI_ProcessorCacheMemoryRef_Set_CreationClassName(&lmi_cpu_cache, + ORGID "_" CPU_CACHE_CLASS_NAME); + LMI_ProcessorCacheMemoryRef_Set_DeviceID(&lmi_cpu_cache, + sysfs_cpu_caches[i].id); + + /* loop cpus */ + for (j = 0; j < cpus_nb; j++) { + LMI_ProcessorRef_Init(&lmi_cpu, _cb, ns); + LMI_ProcessorRef_Set_SystemCreationClassName(&lmi_cpu, + get_system_creation_class_name()); + LMI_ProcessorRef_Set_SystemName(&lmi_cpu, get_system_name()); + LMI_ProcessorRef_Set_CreationClassName(&lmi_cpu, + ORGID "_" CPU_CLASS_NAME); + + if (dmi_cpus_nb > 0) { + LMI_ProcessorRef_Set_DeviceID(&lmi_cpu, dmi_cpus[j].id); + } else { + char *cpu_id; + if (asprintf(&cpu_id, "%u", j) < 0) { + cpu_id = NULL; + error_msg = "Not enough available memory."; + goto done; + } + LMI_ProcessorRef_Set_DeviceID(&lmi_cpu, cpu_id); + free(cpu_id); + cpu_id = NULL; + } + + LMI_AssociatedProcessorCacheMemory_Set_Dependent( + &lmi_assoc_cache, &lmi_cpu); + LMI_AssociatedProcessorCacheMemory_Set_Antecedent( + &lmi_assoc_cache, &lmi_cpu_cache); + + cache_level = get_cache_level(sysfs_cpu_caches[i].level); + if (cache_level == 1) { + char *other_level; + if (asprintf(&other_level, "%u", + sysfs_cpu_caches[i].level) < 0) { + other_level = NULL; + error_msg = "Not enough available memory."; + goto done; + } + LMI_AssociatedProcessorCacheMemory_Set_OtherLevelDescription( + &lmi_assoc_cache, other_level); + free(other_level); + other_level = NULL; + } + cache_type = get_cache_type(sysfs_cpu_caches[i].type); + if (cache_type == 1) { + LMI_AssociatedProcessorCacheMemory_Set_OtherCacheTypeDescription( + &lmi_assoc_cache, sysfs_cpu_caches[i].type); + } + + LMI_AssociatedProcessorCacheMemory_Set_Level( + &lmi_assoc_cache, cache_level); + LMI_AssociatedProcessorCacheMemory_Set_CacheType( + &lmi_assoc_cache, cache_type); + LMI_AssociatedProcessorCacheMemory_Set_Associativity( + &lmi_assoc_cache, + get_cache_associativity_sysfs( + sysfs_cpu_caches[i].ways_of_assoc)); + + LMI_AssociatedProcessorCacheMemory_Set_WritePolicy( + &lmi_assoc_cache, + LMI_AssociatedProcessorCacheMemory_WritePolicy_Unknown); + LMI_AssociatedProcessorCacheMemory_Set_ReadPolicy( + &lmi_assoc_cache, + LMI_AssociatedProcessorCacheMemory_ReadPolicy_Unknown); + + if (sysfs_cpu_caches[i].line_size) { + LMI_AssociatedProcessorCacheMemory_Set_LineSize( + &lmi_assoc_cache, sysfs_cpu_caches[i].line_size); + } + + KReturnInstance(cr, lmi_assoc_cache); + } + } + } + +done: + /* free lscpu only if it was used */ + if (dmi_cpus_nb < 1 || dmi_cpu_caches_nb < 1) { + lscpu_free_processor(&lscpu); + } + dmi_free_processors(&dmi_cpus, &dmi_cpus_nb); + dmi_free_cpu_caches(&dmi_cpu_caches, &dmi_cpu_caches_nb); + sysfs_free_cpu_caches(&sysfs_cpu_caches, &sysfs_cpu_caches_nb); + + if (error_msg) { + KReturn2(_cb, ERR_FAILED, error_msg); + } + + CMReturn(CMPI_RC_OK); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryGetInstance( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const char** properties) +{ + return KDefaultGetInstance( + _cb, mi, cc, cr, cop, properties); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryCreateInstance( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const CMPIInstance* ci) +{ + CMReturn(CMPI_RC_ERR_NOT_SUPPORTED); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryModifyInstance( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const CMPIInstance* ci, + const char**properties) +{ + CMReturn(CMPI_RC_ERR_NOT_SUPPORTED); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryDeleteInstance( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop) +{ + CMReturn(CMPI_RC_ERR_NOT_SUPPORTED); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryExecQuery( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const char* lang, + const char* query) +{ + CMReturn(CMPI_RC_ERR_NOT_SUPPORTED); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryAssociationCleanup( + CMPIAssociationMI* mi, + const CMPIContext* cc, + CMPIBoolean term) +{ + CMReturn(CMPI_RC_OK); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryAssociators( + CMPIAssociationMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const char* assocClass, + const char* resultClass, + const char* role, + const char* resultRole, + const char** properties) +{ + return KDefaultAssociators( + _cb, + mi, + cc, + cr, + cop, + LMI_AssociatedProcessorCacheMemory_ClassName, + assocClass, + resultClass, + role, + resultRole, + properties); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryAssociatorNames( + CMPIAssociationMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const char* assocClass, + const char* resultClass, + const char* role, + const char* resultRole) +{ + return KDefaultAssociatorNames( + _cb, + mi, + cc, + cr, + cop, + LMI_AssociatedProcessorCacheMemory_ClassName, + assocClass, + resultClass, + role, + resultRole); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryReferences( + CMPIAssociationMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const char* assocClass, + const char* role, + const char** properties) +{ + return KDefaultReferences( + _cb, + mi, + cc, + cr, + cop, + LMI_AssociatedProcessorCacheMemory_ClassName, + assocClass, + role, + properties); +} + +static CMPIStatus LMI_AssociatedProcessorCacheMemoryReferenceNames( + CMPIAssociationMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const char* assocClass, + const char* role) +{ + return KDefaultReferenceNames( + _cb, + mi, + cc, + cr, + cop, + LMI_AssociatedProcessorCacheMemory_ClassName, + assocClass, + role); +} + +/* + * Get CIM Cache Level. + * @param level + * @return CIM Cache Level + */ +CMPIUint16 get_cache_level(const unsigned level) +{ + static struct { + CMPIUint16 cim_level; /* CIM cache level */ + unsigned level; /* cache level */ + } levels[] = { + {0, 0}, + /* + {1, }, + {2, }, + */ + {3, 1}, + {4, 2}, + {5, 3}, + }; + + size_t i, lvl_length = sizeof(levels) / sizeof(levels[0]); + + for (i = 0; i < lvl_length; i++) { + if (level == levels[i].level) { + return levels[i].cim_level; + } + } + + return 1; /* Other */ +} + +/* + * Get CIM Write Policy according to dmidecode. + * @param op_mode operational mode from dmidecode + * @return CIM Write Policy + */ +CMPIUint16 get_write_policy(const char *op_mode) +{ + static struct { + CMPIUint16 write_policy; /* CIM write policy */ + char *op_mode; /* op mode from dmidecode */ + } modes[] = { + {0, "Unknown"}, + /* + {1, }, + */ + {2, "Write Back"}, + {3, "Write Through"}, + {4, "Varies With Memory Address"}, + /* + {5, }, + */ + }; + + size_t i, mode_length = sizeof(modes) / sizeof(modes[0]); + + for (i = 0; i < mode_length; i++) { + if (strcmp(op_mode, modes[i].op_mode) == 0) { + return modes[i].write_policy; + } + } + + return 1; /* Other */ +} + +/* + * Get CIM Cache Type according to dmidecode and sysfs. + * @param type cache type from dmidecode and sysfs + * @return CIM Cache Type + */ +CMPIUint16 get_cache_type(const char *type) +{ + static struct { + CMPIUint16 cache_type; /* CIM cache type */ + char *type; /* type from dmidecode and sysfs */ + } types[] = { + {0, "Unknown"}, + {1, "Other"}, + {2, "Instruction"}, + {3, "Data"}, + {4, "Unified"}, + }; + + size_t i, types_length = sizeof(types) / sizeof(types[0]); + + for (i = 0; i < types_length; i++) { + if (strcmp(type, types[i].type) == 0) { + return types[i].cache_type; + } + } + + return 1; /* Other */ +} + +/* + * Get CIM Cache Associativity according to dmidecode. + * @param assoc of cache from dmidecode + * @return CIM Cache Associativity + */ +CMPIUint16 get_cache_associativity_dmi(const char *assoc) +{ + static struct { + CMPIUint16 cache_assoc; /* CIM cache associativity */ + char *assoc; /* associativity from dmidecode */ + } assocs[] = { + {0, "Unknown"}, + {1, "Other"}, + {2, "Direct Mapped"}, + {3, "2-way Set-associative"}, + {4, "4-way Set-associative"}, + {5, "Fully Associative"}, + {6, "8-way Set-associative"}, + {7, "16-way Set-associative"}, + {8, "12-way Set-associative"}, + {9, "24-way Set-associative"}, + {10, "32-way Set-associative"}, + {11, "48-way Set-associative"}, + {12, "64-way Set-associative"}, + {13, "20-way Set-associative"}, + }; + + size_t i, assocs_length = sizeof(assocs) / sizeof(assocs[0]); + + for (i = 0; i < assocs_length; i++) { + if (strcmp(assoc, assocs[i].assoc) == 0) { + return assocs[i].cache_assoc; + } + } + + return 1; /* Other */ +} + +/* + * Get CIM Cache Associativity according to sysfs. + * @param ways_of_assoc from sysfs + * @return CIM Cache Associativity + */ +CMPIUint16 get_cache_associativity_sysfs(const unsigned ways_of_assoc) +{ + static struct { + CMPIUint16 cache_assoc; /* CIM cache associativity */ + unsigned ways; /* ways of associativity from sysfs */ + } assocs[] = { + {0, 0}, + /* + {1, "Other"}, + {2, "Direct Mapped"}, + */ + {3, 2}, + {4, 4}, + /* + {5, "Fully Associative"}, + */ + {6, 8}, + {7, 16}, + {8, 12}, + {9, 24}, + {10, 32}, + {11, 48}, + {12, 64}, + {13, 20}, + }; + + size_t i, assocs_length = sizeof(assocs) / sizeof(assocs[0]); + + for (i = 0; i < assocs_length; i++) { + if (ways_of_assoc == assocs[i].ways) { + return assocs[i].cache_assoc; + } + } + + return 1; /* Other */ +} + +CMInstanceMIStub( + LMI_AssociatedProcessorCacheMemory, + LMI_AssociatedProcessorCacheMemory, + _cb, + LMI_AssociatedProcessorCacheMemoryInitialize()) + +CMAssociationMIStub( + LMI_AssociatedProcessorCacheMemory, + LMI_AssociatedProcessorCacheMemory, + _cb, + LMI_AssociatedProcessorCacheMemoryInitialize()) + +KONKRET_REGISTRATION( + "root/cimv2", + "LMI_AssociatedProcessorCacheMemory", + "LMI_AssociatedProcessorCacheMemory", + "instance association") diff --git a/src/hardware/LMI_Hardware.h b/src/hardware/LMI_Hardware.h index e93f9fe..16bdf12 100644 --- a/src/hardware/LMI_Hardware.h +++ b/src/hardware/LMI_Hardware.h @@ -21,6 +21,8 @@ #ifndef LMI_HARDWARE_H_ #define LMI_HARDWARE_H_ +#define CPU_CLASS_NAME "Processor" #define CPU_CAP_CLASS_NAME "ProcessorCapabilities" +#define CPU_CACHE_CLASS_NAME "ProcessorCacheMemory" #endif /* LMI_HARDWARE_H_ */ diff --git a/src/hardware/LMI_ProcessorCacheMemoryProvider.c b/src/hardware/LMI_ProcessorCacheMemoryProvider.c new file mode 100644 index 0000000..d4c73f6 --- /dev/null +++ b/src/hardware/LMI_ProcessorCacheMemoryProvider.c @@ -0,0 +1,405 @@ +/* + * Copyright (C) 2013 Red Hat, Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Authors: Peter Schiffer <pschiffe@redhat.com> + */ + +#include <konkret/konkret.h> +#include "LMI_ProcessorCacheMemory.h" +#include "LMI_Hardware.h" +#include "globals.h" +#include "dmidecode.h" +#include "sysfs.h" + +CMPIUint16 get_cachestatus(const char *status); + +static const CMPIBroker* _cb = NULL; + +static void LMI_ProcessorCacheMemoryInitialize() +{ +} + +static CMPIStatus LMI_ProcessorCacheMemoryCleanup( + CMPIInstanceMI* mi, + const CMPIContext* cc, + CMPIBoolean term) +{ + CMReturn(CMPI_RC_OK); +} + +static CMPIStatus LMI_ProcessorCacheMemoryEnumInstanceNames( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop) +{ + return KDefaultEnumerateInstanceNames( + _cb, mi, cc, cr, cop); +} + +static CMPIStatus LMI_ProcessorCacheMemoryEnumInstances( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const char** properties) +{ + LMI_ProcessorCacheMemory lmi_cpu_cache; + const char *ns = KNameSpace(cop); + char *error_msg = NULL, *instance_id = NULL; + unsigned i, caches = 0; + DmiCpuCache *dmi_cpu_caches = NULL; + unsigned dmi_cpu_caches_nb = 0; + SysfsCpuCache *sysfs_cpu_caches = NULL; + unsigned sysfs_cpu_caches_nb = 0; + + if (dmi_get_cpu_caches(&dmi_cpu_caches, &dmi_cpu_caches_nb) != 0 + || dmi_cpu_caches_nb < 1) { + dmi_free_cpu_caches(&dmi_cpu_caches, &dmi_cpu_caches_nb); + + if (sysfs_get_cpu_caches(&sysfs_cpu_caches, &sysfs_cpu_caches_nb) != 0 + || sysfs_cpu_caches_nb < 1) { + error_msg = "Unable to get processor cache information."; + goto done; + } + } + + if (dmi_cpu_caches_nb > 0) { + caches = dmi_cpu_caches_nb; + } else if (sysfs_cpu_caches_nb > 0) { + caches = sysfs_cpu_caches_nb; + } else { + error_msg = "Unable to get processor cache information."; + goto done; + } + + for (i = 0; i < caches; i++) { + LMI_ProcessorCacheMemory_Init(&lmi_cpu_cache, _cb, ns); + + LMI_ProcessorCacheMemory_Set_SystemCreationClassName(&lmi_cpu_cache, + get_system_creation_class_name()); + LMI_ProcessorCacheMemory_Set_SystemName(&lmi_cpu_cache, + get_system_name()); + LMI_ProcessorCacheMemory_Set_CreationClassName(&lmi_cpu_cache, + ORGID "_" CPU_CACHE_CLASS_NAME); + + LMI_ProcessorCacheMemory_Set_BlockSize(&lmi_cpu_cache, 1); + LMI_ProcessorCacheMemory_Set_HealthState(&lmi_cpu_cache, + LMI_ProcessorCacheMemory_HealthState_Unknown); + LMI_ProcessorCacheMemory_Init_OperationalStatus(&lmi_cpu_cache, 1); + LMI_ProcessorCacheMemory_Set_OperationalStatus(&lmi_cpu_cache, 0, + LMI_ProcessorCacheMemory_OperationalStatus_Unknown); + LMI_ProcessorCacheMemory_Set_Access(&lmi_cpu_cache, + LMI_ProcessorCacheMemory_Access_Read_Write_Supported); + LMI_ProcessorCacheMemory_Set_Caption(&lmi_cpu_cache, + "Processor Cache Memory"); + LMI_ProcessorCacheMemory_Set_Description(&lmi_cpu_cache, + "This object represents one cache memory of processor in system."); + LMI_ProcessorCacheMemory_Set_IsCompressed(&lmi_cpu_cache, 0); + LMI_ProcessorCacheMemory_Set_Purpose(&lmi_cpu_cache, + "Processor cache is used to reduce the average time to " + "access memory. The cache is a smaller, faster memory which " + "stores copies of the data from the most frequently used main " + "memory locations."); + + /* do we have dmidecode output? */ + if (dmi_cpu_caches_nb > 0) { + if (asprintf(&instance_id, ORGID ":" CPU_CACHE_CLASS_NAME ":%s", + dmi_cpu_caches[i].id) < 0) { + instance_id = NULL; + error_msg = "Not enough available memory."; + goto done; + } + + LMI_ProcessorCacheMemory_Set_DeviceID(&lmi_cpu_cache, + dmi_cpu_caches[i].id); + + LMI_ProcessorCacheMemory_Set_NumberOfBlocks(&lmi_cpu_cache, + dmi_cpu_caches[i].size); + LMI_ProcessorCacheMemory_Set_ElementName(&lmi_cpu_cache, + dmi_cpu_caches[i].name); + LMI_ProcessorCacheMemory_Set_Name(&lmi_cpu_cache, + dmi_cpu_caches[i].name); + LMI_ProcessorCacheMemory_Set_EnabledState(&lmi_cpu_cache, + get_cachestatus(dmi_cpu_caches[i].status)); + } else { + if (asprintf(&instance_id, ORGID ":" CPU_CACHE_CLASS_NAME ":%s", + sysfs_cpu_caches[i].id) < 0) { + instance_id = NULL; + error_msg = "Not enough available memory."; + goto done; + } + + LMI_ProcessorCacheMemory_Set_DeviceID(&lmi_cpu_cache, + sysfs_cpu_caches[i].id); + + LMI_ProcessorCacheMemory_Set_NumberOfBlocks(&lmi_cpu_cache, + sysfs_cpu_caches[i].size); + LMI_ProcessorCacheMemory_Set_ElementName(&lmi_cpu_cache, + sysfs_cpu_caches[i].name); + LMI_ProcessorCacheMemory_Set_Name(&lmi_cpu_cache, + sysfs_cpu_caches[i].name); + } + + LMI_ProcessorCacheMemory_Set_InstanceID(&lmi_cpu_cache, instance_id); + + KReturnInstance(cr, lmi_cpu_cache); + } + +done: + dmi_free_cpu_caches(&dmi_cpu_caches, &dmi_cpu_caches_nb); + sysfs_free_cpu_caches(&sysfs_cpu_caches, &sysfs_cpu_caches_nb); + + if (error_msg) { + KReturn2(_cb, ERR_FAILED, error_msg); + } + + CMReturn(CMPI_RC_OK); +} + +static CMPIStatus LMI_ProcessorCacheMemoryGetInstance( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const char** properties) +{ + return KDefaultGetInstance( + _cb, mi, cc, cr, cop, properties); +} + +static CMPIStatus LMI_ProcessorCacheMemoryCreateInstance( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const CMPIInstance* ci) +{ + CMReturn(CMPI_RC_ERR_NOT_SUPPORTED); +} + +static CMPIStatus LMI_ProcessorCacheMemoryModifyInstance( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const CMPIInstance* ci, + const char** properties) +{ + CMReturn(CMPI_RC_ERR_NOT_SUPPORTED); +} + +static CMPIStatus LMI_ProcessorCacheMemoryDeleteInstance( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop) +{ + CMReturn(CMPI_RC_ERR_NOT_SUPPORTED); +} + +static CMPIStatus LMI_ProcessorCacheMemoryExecQuery( + CMPIInstanceMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const char* lang, + const char* query) +{ + CMReturn(CMPI_RC_ERR_NOT_SUPPORTED); +} + +CMInstanceMIStub( + LMI_ProcessorCacheMemory, + LMI_ProcessorCacheMemory, + _cb, + LMI_ProcessorCacheMemoryInitialize()) + +static CMPIStatus LMI_ProcessorCacheMemoryMethodCleanup( + CMPIMethodMI* mi, + const CMPIContext* cc, + CMPIBoolean term) +{ + CMReturn(CMPI_RC_OK); +} + +static CMPIStatus LMI_ProcessorCacheMemoryInvokeMethod( + CMPIMethodMI* mi, + const CMPIContext* cc, + const CMPIResult* cr, + const CMPIObjectPath* cop, + const char* meth, + const CMPIArgs* in, + CMPIArgs* out) +{ + return LMI_ProcessorCacheMemory_DispatchMethod( + _cb, mi, cc, cr, cop, meth, in, out); +} + +CMMethodMIStub( + LMI_ProcessorCacheMemory, + LMI_ProcessorCacheMemory, + _cb, + LMI_ProcessorCacheMemoryInitialize()) + +KUint32 LMI_ProcessorCacheMemory_RequestStateChange( + const CMPIBroker* cb, + CMPIMethodMI* mi, + const CMPIContext* context, + const LMI_ProcessorCacheMemoryRef* self, + const KUint16* RequestedState, + KRef* Job, + const KDateTime* TimeoutPeriod, + CMPIStatus* status) +{ + KUint32 result = KUINT32_INIT; + + KSetStatus(status, ERR_NOT_SUPPORTED); + return result; +} + +KUint32 LMI_ProcessorCacheMemory_SetPowerState( + const CMPIBroker* cb, + CMPIMethodMI* mi, + const CMPIContext* context, + const LMI_ProcessorCacheMemoryRef* self, + const KUint16* PowerState, + const KDateTime* Time, + CMPIStatus* status) +{ + KUint32 result = KUINT32_INIT; + + KSetStatus(status, ERR_NOT_SUPPORTED); + return result; +} + +KUint32 LMI_ProcessorCacheMemory_Reset( + const CMPIBroker* cb, + CMPIMethodMI* mi, + const CMPIContext* context, + const LMI_ProcessorCacheMemoryRef* self, + CMPIStatus* status) +{ + KUint32 result = KUINT32_INIT; + + KSetStatus(status, ERR_NOT_SUPPORTED); + return result; +} + +KUint32 LMI_ProcessorCacheMemory_EnableDevice( + const CMPIBroker* cb, + CMPIMethodMI* mi, + const CMPIContext* context, + const LMI_ProcessorCacheMemoryRef* self, + const KBoolean* Enabled, + CMPIStatus* status) +{ + KUint32 result = KUINT32_INIT; + + KSetStatus(status, ERR_NOT_SUPPORTED); + return result; +} + +KUint32 LMI_ProcessorCacheMemory_OnlineDevice( + const CMPIBroker* cb, + CMPIMethodMI* mi, + const CMPIContext* context, + const LMI_ProcessorCacheMemoryRef* self, + const KBoolean* Online, + CMPIStatus* status) +{ + KUint32 result = KUINT32_INIT; + + KSetStatus(status, ERR_NOT_SUPPORTED); + return result; +} + +KUint32 LMI_ProcessorCacheMemory_QuiesceDevice( + const CMPIBroker* cb, + CMPIMethodMI* mi, + const CMPIContext* context, + const LMI_ProcessorCacheMemoryRef* self, + const KBoolean* Quiesce, + CMPIStatus* status) +{ + KUint32 result = KUINT32_INIT; + + KSetStatus(status, ERR_NOT_SUPPORTED); + return result; +} + +KUint32 LMI_ProcessorCacheMemory_SaveProperties( + const CMPIBroker* cb, + CMPIMethodMI* mi, + const CMPIContext* context, + const LMI_ProcessorCacheMemoryRef* self, + CMPIStatus* status) +{ + KUint32 result = KUINT32_INIT; + + KSetStatus(status, ERR_NOT_SUPPORTED); + return result; +} + +KUint32 LMI_ProcessorCacheMemory_RestoreProperties( + const CMPIBroker* cb, + CMPIMethodMI* mi, + const CMPIContext* context, + const LMI_ProcessorCacheMemoryRef* self, + CMPIStatus* status) +{ + KUint32 result = KUINT32_INIT; + + KSetStatus(status, ERR_NOT_SUPPORTED); + return result; +} + +/* + * Get CPU Cache status according to the dmidecode. + * @param status from dmidecode + * @return CIM id of CPU Cache status + */ +CMPIUint16 get_cachestatus(const char *status) +{ + if (!status || strlen(status) < 1) { + return 5; /* Not Applicable */ + } + + static struct { + CMPIUint16 val; /* CIM value */ + char *stat; /* dmidecode status */ + } statuses[] = { + {2, "Enabled"}, + {3, "Disabled"}, + }; + + size_t i, st_length = sizeof(statuses) / sizeof(statuses[0]); + + for (i = 0; i < st_length; i++) { + if (strcmp(status, statuses[i].stat) == 0) { + return statuses[i].val; + } + } + + return 5; /* Not Applicable */ +} + +KONKRET_REGISTRATION( + "root/cimv2", + "LMI_ProcessorCacheMemory", + "LMI_ProcessorCacheMemory", + "instance method") diff --git a/src/hardware/LMI_ProcessorCapabilitiesProvider.c b/src/hardware/LMI_ProcessorCapabilitiesProvider.c index c9f7e14..c7c4ff2 100644 --- a/src/hardware/LMI_ProcessorCapabilitiesProvider.c +++ b/src/hardware/LMI_ProcessorCapabilitiesProvider.c @@ -62,17 +62,17 @@ static CMPIStatus LMI_ProcessorCapabilitiesEnumInstances( *element_name_string = "Capabilities of processor "; char *error_msg = NULL, *instance_id = NULL, *element_name = NULL; unsigned i, cpus_nb = 0; - DmiProcessor *dmi_cpus; - unsigned dmi_cpus_nb; + DmiProcessor *dmi_cpus = NULL; + unsigned dmi_cpus_nb = 0; LscpuProcessor lscpu; if (dmi_get_processors(&dmi_cpus, &dmi_cpus_nb) != 0 || dmi_cpus_nb < 1) { dmi_free_processors(&dmi_cpus, &dmi_cpus_nb); - } - if (lscpu_get_processor(&lscpu) != 0) { - error_msg = "Unable to get processor information."; - goto done; + if (lscpu_get_processor(&lscpu) != 0) { + error_msg = "Unable to get processor information."; + goto done; + } } if (dmi_cpus_nb > 0) { @@ -89,8 +89,8 @@ static CMPIStatus LMI_ProcessorCapabilitiesEnumInstances( /* do we have output from dmidecode program? */ if (dmi_cpus_nb > 0) { - if (asprintf(&instance_id, "%s:%s:%s", ORGID, - CPU_CAP_CLASS_NAME, dmi_cpus[i].id) < 0) { + if (asprintf(&instance_id, ORGID ":" CPU_CAP_CLASS_NAME ":%s", + dmi_cpus[i].id) < 0) { instance_id = NULL; error_msg = "Not enough available memory."; goto done; @@ -106,8 +106,8 @@ static CMPIStatus LMI_ProcessorCapabilitiesEnumInstances( goto done; } } else { - if (asprintf(&instance_id, "%s:%s:%u", ORGID, - CPU_CAP_CLASS_NAME, i) < 0) { + if (asprintf(&instance_id, ORGID ":" CPU_CAP_CLASS_NAME ":%u", + i) < 0) { instance_id = NULL; error_msg = "Not enough available memory."; goto done; @@ -133,7 +133,8 @@ static CMPIStatus LMI_ProcessorCapabilitiesEnumInstances( LMI_ProcessorCapabilities_Set_Caption(&lmi_cpu_cap, "Processor Capabilities"); LMI_ProcessorCapabilities_Set_Description(&lmi_cpu_cap, - "This object represents (mainly multi-core and multi-thread) capabilities of processor in system."); + "This object represents (mainly multi-core and multi-thread) " + "capabilities of processor in system."); LMI_ProcessorCapabilities_Set_ElementName(&lmi_cpu_cap, element_name); KReturnInstance(cr, lmi_cpu_cap); @@ -158,8 +159,11 @@ done: } element_name = NULL; + /* free lscpu only if it was used */ + if (dmi_cpus_nb < 1) { + lscpu_free_processor(&lscpu); + } dmi_free_processors(&dmi_cpus, &dmi_cpus_nb); - lscpu_free_processor(&lscpu); if (error_msg) { KReturn2(_cb, ERR_FAILED, error_msg); diff --git a/src/hardware/LMI_ProcessorElementCapabilitiesProvider.c b/src/hardware/LMI_ProcessorElementCapabilitiesProvider.c index a477717..ee9cf18 100644 --- a/src/hardware/LMI_ProcessorElementCapabilitiesProvider.c +++ b/src/hardware/LMI_ProcessorElementCapabilitiesProvider.c @@ -64,17 +64,17 @@ static CMPIStatus LMI_ProcessorElementCapabilitiesEnumInstances( const char *ns = KNameSpace(cop); char *error_msg = NULL, *instance_id = NULL; unsigned i, cpus_nb = 0; - DmiProcessor *dmi_cpus; - unsigned dmi_cpus_nb; + DmiProcessor *dmi_cpus = NULL; + unsigned dmi_cpus_nb = 0; LscpuProcessor lscpu; if (dmi_get_processors(&dmi_cpus, &dmi_cpus_nb) != 0 || dmi_cpus_nb < 1) { dmi_free_processors(&dmi_cpus, &dmi_cpus_nb); - } - if (lscpu_get_processor(&lscpu) != 0) { - error_msg = "Unable to get processor information."; - goto done; + if (lscpu_get_processor(&lscpu) != 0) { + error_msg = "Unable to get processor information."; + goto done; + } } if (dmi_cpus_nb > 0) { @@ -93,7 +93,8 @@ static CMPIStatus LMI_ProcessorElementCapabilitiesEnumInstances( LMI_ProcessorRef_Set_SystemCreationClassName(&lmi_cpu, get_system_creation_class_name()); LMI_ProcessorRef_Set_SystemName(&lmi_cpu, get_system_name()); - LMI_ProcessorRef_Set_CreationClassName(&lmi_cpu, "LMI_Processor"); + LMI_ProcessorRef_Set_CreationClassName(&lmi_cpu, + ORGID "_" CPU_CLASS_NAME); LMI_ProcessorCapabilitiesRef_Init(&lmi_cpu_cap, _cb, ns); @@ -101,8 +102,8 @@ static CMPIStatus LMI_ProcessorElementCapabilitiesEnumInstances( if (dmi_cpus_nb > 0) { LMI_ProcessorRef_Set_DeviceID(&lmi_cpu, dmi_cpus[i].id); - if (asprintf(&instance_id, "%s:%s:%s", ORGID, - CPU_CAP_CLASS_NAME, dmi_cpus[i].id) < 0) { + if (asprintf(&instance_id, ORGID ":" CPU_CAP_CLASS_NAME ":%s", + dmi_cpus[i].id) < 0) { instance_id = NULL; error_msg = "Not enough available memory."; goto done; @@ -118,8 +119,8 @@ static CMPIStatus LMI_ProcessorElementCapabilitiesEnumInstances( free(cpu_id); cpu_id = NULL; - if (asprintf(&instance_id, "%s:%s:%u", ORGID, - CPU_CAP_CLASS_NAME, i) < 0) { + if (asprintf(&instance_id, ORGID ":" CPU_CAP_CLASS_NAME ":%u", + i) < 0) { instance_id = NULL; error_msg = "Not enough available memory."; goto done; @@ -151,8 +152,11 @@ done: } instance_id = NULL; + /* free lscpu only if it was used */ + if (dmi_cpus_nb < 1) { + lscpu_free_processor(&lscpu); + } dmi_free_processors(&dmi_cpus, &dmi_cpus_nb); - lscpu_free_processor(&lscpu); if (error_msg) { KReturn2(_cb, ERR_FAILED, error_msg); diff --git a/src/hardware/LMI_ProcessorProvider.c b/src/hardware/LMI_ProcessorProvider.c index c3b78e2..6280bbd 100644 --- a/src/hardware/LMI_ProcessorProvider.c +++ b/src/hardware/LMI_ProcessorProvider.c @@ -19,9 +19,11 @@ */ #include <konkret/konkret.h> -#include <sys/utsname.h> +#include <stdio.h> #include <string.h> +#include <sys/utsname.h> #include "LMI_Processor.h" +#include "LMI_Hardware.h" #include "globals.h" #include "dmidecode.h" #include "lscpu.h" @@ -72,10 +74,10 @@ static CMPIStatus LMI_ProcessorEnumInstances( CMPIUint32 current_speed = 0, max_speed = 0, external_clock = 0; unsigned i, j, cpus_nb = 0; char *other_family = NULL, *architecture = NULL, *cpu_name = NULL, - *stepping = NULL, *error_msg = NULL; + *stepping = NULL, *error_msg = NULL, *instance_id = NULL; struct utsname utsname_buf; - DmiProcessor *dmi_cpus; - unsigned dmi_cpus_nb; + DmiProcessor *dmi_cpus = NULL; + unsigned dmi_cpus_nb = 0; LscpuProcessor lscpu; CpuinfoProcessor proc_cpu; @@ -116,8 +118,8 @@ static CMPIStatus LMI_ProcessorEnumInstances( LMI_Processor_Set_SystemCreationClassName(&lmi_cpu, get_system_creation_class_name()); LMI_Processor_Set_SystemName(&lmi_cpu, get_system_name()); - LMI_Processor_Set_CreationClassName(&lmi_cpu, "LMI_Processor"); - LMI_Processor_Set_Caption(&lmi_cpu, "Processor"); + LMI_Processor_Set_CreationClassName(&lmi_cpu, ORGID "_" CPU_CLASS_NAME); + LMI_Processor_Set_Caption(&lmi_cpu, CPU_CLASS_NAME); LMI_Processor_Set_Description(&lmi_cpu, "This object represents one processor in system."); @@ -137,6 +139,12 @@ static CMPIStatus LMI_ProcessorEnumInstances( cpu_name = dmi_cpus[i].name; enabled_cores = dmi_cpus[i].enabled_cores; stepping = dmi_cpus[i].stepping; + if (asprintf(&instance_id, ORGID ":" CPU_CLASS_NAME ":%s", + dmi_cpus[i].id) < 0) { + instance_id = NULL; + error_msg = "Not enough available memory."; + goto done; + } LMI_Processor_Set_DeviceID(&lmi_cpu, dmi_cpus[i].id); LMI_Processor_Set_Family(&lmi_cpu, family); @@ -160,7 +168,8 @@ static CMPIStatus LMI_ProcessorEnumInstances( LMI_Processor_Set_Characteristics(&lmi_cpu, count, charact); LMI_Processor_Set_EnabledProcessorCharacteristics( - &lmi_cpu, count, 0); + &lmi_cpu, count, + LMI_Processor_EnabledProcessorCharacteristics_Unknown); count++; } } @@ -172,6 +181,12 @@ static CMPIStatus LMI_ProcessorEnumInstances( error_msg = "Not enough available memory."; goto done; } + if (asprintf(&instance_id, ORGID ":" CPU_CLASS_NAME ":%s", + cpu_id) < 0) { + instance_id = NULL; + error_msg = "Not enough available memory."; + goto done; + } LMI_Processor_Set_DeviceID(&lmi_cpu, cpu_id); free(cpu_id); cpu_id = NULL; @@ -187,18 +202,18 @@ static CMPIStatus LMI_ProcessorEnumInstances( stepping = lscpu.stepping; } + LMI_Processor_Set_InstanceID(&lmi_cpu, instance_id); LMI_Processor_Set_CPUStatus(&lmi_cpu, cpustatus); LMI_Processor_Set_EnabledState(&lmi_cpu, enabledstate); LMI_Processor_Set_NumberOfEnabledCores(&lmi_cpu, enabled_cores); - if (current_speed) { - LMI_Processor_Set_CurrentClockSpeed(&lmi_cpu, current_speed); - } - if (max_speed) { - LMI_Processor_Set_MaxClockSpeed(&lmi_cpu, max_speed); - } - if (external_clock) { - LMI_Processor_Set_ExternalBusClockSpeed(&lmi_cpu, external_clock); - } + LMI_Processor_Set_CurrentClockSpeed(&lmi_cpu, current_speed); + LMI_Processor_Set_MaxClockSpeed(&lmi_cpu, max_speed); + LMI_Processor_Set_ExternalBusClockSpeed(&lmi_cpu, external_clock); + LMI_Processor_Init_OperationalStatus(&lmi_cpu, 1); + LMI_Processor_Set_OperationalStatus(&lmi_cpu, 0, + LMI_Processor_OperationalStatus_Unknown); + LMI_Processor_Set_HealthState(&lmi_cpu, + LMI_Processor_HealthState_Unknown); if (cpu_name && strlen(cpu_name)) { LMI_Processor_Set_Name(&lmi_cpu, cpu_name); LMI_Processor_Set_UniqueID(&lmi_cpu, cpu_name); @@ -239,6 +254,10 @@ done: free(architecture); } architecture = NULL; + if (instance_id) { + free(instance_id); + } + instance_id = NULL; dmi_free_processors(&dmi_cpus, &dmi_cpus_nb); lscpu_free_processor(&lscpu); diff --git a/src/hardware/dmidecode.c b/src/hardware/dmidecode.c index 14e4ac9..a50aa8a 100644 --- a/src/hardware/dmidecode.c +++ b/src/hardware/dmidecode.c @@ -21,6 +21,10 @@ #include "dmidecode.h" +/****************************************************************************** + * DmiProcessor + */ + /* * Initialize DmiProcessor attributes. * @param cpu @@ -42,6 +46,9 @@ void init_dmiprocessor_struct(DmiProcessor *cpu) cpu->upgrade = NULL; cpu->charact_nb = 0; cpu->characteristics = NULL; + cpu->l1_cache_handle = NULL; + cpu->l2_cache_handle = NULL; + cpu->l3_cache_handle = NULL; } /* @@ -95,6 +102,24 @@ short check_dmiprocessor_attributes(DmiProcessor *cpu) goto done; } } + if (!cpu->l1_cache_handle) { + if (!(cpu->l1_cache_handle = strdup(""))) { + ret = -9; + goto done; + } + } + if (!cpu->l2_cache_handle) { + if (!(cpu->l2_cache_handle = strdup(""))) { + ret = -10; + goto done; + } + } + if (!cpu->l3_cache_handle) { + if (!(cpu->l3_cache_handle = strdup(""))) { + ret = -11; + goto done; + } + } ret = 0; @@ -147,7 +172,7 @@ short dmi_get_processors(DmiProcessor **cpus, unsigned *cpus_nb) for (i = 0; i < buffer_size; i++) { if (strncmp(buffer[i], "Handle 0x", 9) == 0) { curr_cpu++; - init_dmiprocessor_struct(cpus[curr_cpu]); + init_dmiprocessor_struct(&(*cpus)[curr_cpu]); continue; } /* ignore first useless lines */ @@ -157,28 +182,28 @@ short dmi_get_processors(DmiProcessor **cpus, unsigned *cpus_nb) /* ID */ buf = copy_string_part_after_delim(buffer[i], "ID: "); if (buf) { - cpus[curr_cpu]->id = buf; + (*cpus)[curr_cpu].id = buf; buf = NULL; continue; } /* Family */ buf = copy_string_part_after_delim(buffer[i], "Family: "); if (buf) { - cpus[curr_cpu]->family = buf; + (*cpus)[curr_cpu].family = buf; buf = NULL; continue; } /* Status */ buf = copy_string_part_after_delim(buffer[i], "Status: Populated, "); if (buf) { - cpus[curr_cpu]->status = buf; + (*cpus)[curr_cpu].status = buf; buf = NULL; continue; } /* Current Speed */ buf = copy_string_part_after_delim(buffer[i], "Current Speed: "); if (buf && strcmp(buf, "Unknown") != 0) { - sscanf(buf, "%u", &cpus[curr_cpu]->current_speed); + sscanf(buf, "%u", &(*cpus)[curr_cpu].current_speed); free(buf); buf = NULL; continue; @@ -186,7 +211,7 @@ short dmi_get_processors(DmiProcessor **cpus, unsigned *cpus_nb) /* Max Speed */ buf = copy_string_part_after_delim(buffer[i], "Max Speed: "); if (buf && strcmp(buf, "Unknown") != 0) { - sscanf(buf, "%u", &cpus[curr_cpu]->max_speed); + sscanf(buf, "%u", &(*cpus)[curr_cpu].max_speed); free(buf); buf = NULL; continue; @@ -194,7 +219,7 @@ short dmi_get_processors(DmiProcessor **cpus, unsigned *cpus_nb) /* External Clock Speed */ buf = copy_string_part_after_delim(buffer[i], "External Clock: "); if (buf && strcmp(buf, "Unknown") != 0) { - sscanf(buf, "%u", &cpus[curr_cpu]->external_clock); + sscanf(buf, "%u", &(*cpus)[curr_cpu].external_clock); free(buf); buf = NULL; continue; @@ -202,14 +227,14 @@ short dmi_get_processors(DmiProcessor **cpus, unsigned *cpus_nb) /* CPU Name */ buf = copy_string_part_after_delim(buffer[i], "Version: "); if (buf) { - cpus[curr_cpu]->name = buf; + (*cpus)[curr_cpu].name = buf; buf = NULL; continue; } /* Cores */ buf = copy_string_part_after_delim(buffer[i], "Core Count: "); if (buf) { - sscanf(buf, "%u", &cpus[curr_cpu]->cores); + sscanf(buf, "%u", &(*cpus)[curr_cpu].cores); free(buf); buf = NULL; continue; @@ -217,7 +242,7 @@ short dmi_get_processors(DmiProcessor **cpus, unsigned *cpus_nb) /* Enabled Cores */ buf = copy_string_part_after_delim(buffer[i], "Core Enabled: "); if (buf) { - sscanf(buf, "%u", &cpus[curr_cpu]->enabled_cores); + sscanf(buf, "%u", &(*cpus)[curr_cpu].enabled_cores); free(buf); buf = NULL; continue; @@ -225,7 +250,7 @@ short dmi_get_processors(DmiProcessor **cpus, unsigned *cpus_nb) /* Threads */ buf = copy_string_part_after_delim(buffer[i], "Thread Count: "); if (buf) { - sscanf(buf, "%u", &cpus[curr_cpu]->threads); + sscanf(buf, "%u", &(*cpus)[curr_cpu].threads); free(buf); buf = NULL; continue; @@ -233,21 +258,42 @@ short dmi_get_processors(DmiProcessor **cpus, unsigned *cpus_nb) /* CPU Type/Role */ buf = copy_string_part_after_delim(buffer[i], "Type: "); if (buf) { - cpus[curr_cpu]->type = buf; + (*cpus)[curr_cpu].type = buf; buf = NULL; continue; } /* Stepping */ buf = copy_string_part_after_delim(buffer[i], ", Stepping "); if (buf) { - cpus[curr_cpu]->stepping = buf; + (*cpus)[curr_cpu].stepping = buf; buf = NULL; continue; } /* Upgrade */ buf = copy_string_part_after_delim(buffer[i], "Upgrade: "); if (buf) { - cpus[curr_cpu]->upgrade = buf; + (*cpus)[curr_cpu].upgrade = buf; + buf = NULL; + continue; + } + /* Level 1 Cache Handle */ + buf = copy_string_part_after_delim(buffer[i], "L1 Cache Handle: "); + if (buf) { + (*cpus)[curr_cpu].l1_cache_handle = buf; + buf = NULL; + continue; + } + /* Level 2 Cache Handle */ + buf = copy_string_part_after_delim(buffer[i], "L2 Cache Handle: "); + if (buf) { + (*cpus)[curr_cpu].l2_cache_handle = buf; + buf = NULL; + continue; + } + /* Level 3 Cache Handle */ + buf = copy_string_part_after_delim(buffer[i], "L3 Cache Handle: "); + if (buf) { + (*cpus)[curr_cpu].l3_cache_handle = buf; buf = NULL; continue; } @@ -255,27 +301,27 @@ short dmi_get_processors(DmiProcessor **cpus, unsigned *cpus_nb) if (strstr(buffer[i], "Characteristics:") && !strstr(buffer[i], "Characteristics: ")) { /* count characteristics */ - cpus[curr_cpu]->charact_nb = 0; - while (strlen(buffer[i + cpus[curr_cpu]->charact_nb + 1])) { - cpus[curr_cpu]->charact_nb += 1; + (*cpus)[curr_cpu].charact_nb = 0; + while (strlen(buffer[i + (*cpus)[curr_cpu].charact_nb + 1])) { + (*cpus)[curr_cpu].charact_nb += 1; } /* allocate memory */ - cpus[curr_cpu]->characteristics = - (char **)calloc(cpus[curr_cpu]->charact_nb, sizeof(char *)); - if (!cpus[curr_cpu]->characteristics) { + (*cpus)[curr_cpu].characteristics = + (char **)calloc((*cpus)[curr_cpu].charact_nb, sizeof(char *)); + if (!(*cpus)[curr_cpu].characteristics) { warn("Failed to allocate memory."); ret = -5; goto done; } unsigned j; char *tmp_line; - for (j = 0; j < cpus[curr_cpu]->charact_nb; j++) { + for (j = 0; j < (*cpus)[curr_cpu].charact_nb; j++) { tmp_line = trim(buffer[i + j + 1], NULL); if (tmp_line) { - cpus[curr_cpu]->characteristics[j] = tmp_line; + (*cpus)[curr_cpu].characteristics[j] = tmp_line; } else { - cpus[curr_cpu]->characteristics[j] = strdup(""); - if (!cpus[curr_cpu]->characteristics[j]) { + (*cpus)[curr_cpu].characteristics[j] = strdup(""); + if (!(*cpus)[curr_cpu].characteristics[j]) { warn("Failed to allocate memory."); ret = -6; goto done; @@ -283,13 +329,13 @@ short dmi_get_processors(DmiProcessor **cpus, unsigned *cpus_nb) } } /* skip characteristics and newline after them */ - i += cpus[curr_cpu]->charact_nb + 1; + i += (*cpus)[curr_cpu].charact_nb + 1; } } /* fill in default attributes if needed */ for (i = 0; i < *cpus_nb; i++) { - if (check_dmiprocessor_attributes(cpus[i]) != 0) { + if (check_dmiprocessor_attributes(&(*cpus)[i]) != 0) { ret = -7; goto done; } @@ -313,45 +359,57 @@ void dmi_free_processors(DmiProcessor **cpus, unsigned *cpus_nb) if (*cpus_nb > 0) { for (i = 0; i < *cpus_nb; i++) { - if (cpus[i]->id) { - free(cpus[i]->id); + if ((*cpus)[i].id) { + free((*cpus)[i].id); } - cpus[i]->id = NULL; - if (cpus[i]->family) { - free(cpus[i]->family); + (*cpus)[i].id = NULL; + if ((*cpus)[i].family) { + free((*cpus)[i].family); } - cpus[i]->family = NULL; - if (cpus[i]->status) { - free(cpus[i]->status); + (*cpus)[i].family = NULL; + if ((*cpus)[i].status) { + free((*cpus)[i].status); } - cpus[i]->status = NULL; - if (cpus[i]->name) { - free(cpus[i]->name); + (*cpus)[i].status = NULL; + if ((*cpus)[i].name) { + free((*cpus)[i].name); } - cpus[i]->name = NULL; - if (cpus[i]->type) { - free(cpus[i]->type); + (*cpus)[i].name = NULL; + if ((*cpus)[i].type) { + free((*cpus)[i].type); } - cpus[i]->type = NULL; - if (cpus[i]->stepping) { - free(cpus[i]->stepping); + (*cpus)[i].type = NULL; + if ((*cpus)[i].stepping) { + free((*cpus)[i].stepping); } - cpus[i]->stepping = NULL; - if (cpus[i]->upgrade) { - free(cpus[i]->upgrade); + (*cpus)[i].stepping = NULL; + if ((*cpus)[i].upgrade) { + free((*cpus)[i].upgrade); } - cpus[i]->upgrade = NULL; - if (cpus[i]->charact_nb > 0) { - for (j = 0; j < cpus[i]->charact_nb; j++) { - if (cpus[i]->characteristics[j]) { - free(cpus[i]->characteristics[j]); + (*cpus)[i].upgrade = NULL; + if ((*cpus)[i].charact_nb > 0) { + for (j = 0; j < (*cpus)[i].charact_nb; j++) { + if ((*cpus)[i].characteristics[j]) { + free((*cpus)[i].characteristics[j]); } - cpus[i]->characteristics[j] = NULL; + (*cpus)[i].characteristics[j] = NULL; } - free(cpus[i]->characteristics); + free((*cpus)[i].characteristics); + } + (*cpus)[i].charact_nb = 0; + (*cpus)[i].characteristics = NULL; + if ((*cpus)[i].l1_cache_handle) { + free((*cpus)[i].l1_cache_handle); + } + (*cpus)[i].l1_cache_handle = NULL; + if ((*cpus)[i].l2_cache_handle) { + free((*cpus)[i].l2_cache_handle); } - cpus[i]->charact_nb = 0; - cpus[i]->characteristics = NULL; + (*cpus)[i].l2_cache_handle = NULL; + if ((*cpus)[i].l3_cache_handle) { + free((*cpus)[i].l3_cache_handle); + } + (*cpus)[i].l3_cache_handle = NULL; } free (*cpus); } @@ -359,3 +417,262 @@ void dmi_free_processors(DmiProcessor **cpus, unsigned *cpus_nb) *cpus_nb = 0; *cpus = NULL; } + + +/****************************************************************************** + * DmiCpuCache + */ + +/* + * Initialize DmiCpuCache attributes. + * @param cache + */ +void init_dmi_cpu_cache_struct(DmiCpuCache *cache) +{ + cache->id = NULL; + cache->size = 0; + cache->name = NULL; + cache->status = NULL; + cache->level = 0; + cache->op_mode = NULL; + cache->type = NULL; + cache->associativity = NULL; +} + +/* + * Check attributes of cache structure and fill in defaults if needed. + * @param cache + * @return 0 if success, negative value otherwise + */ +short check_dmi_cpu_cache_attributes(DmiCpuCache *cache) +{ + short ret = -1; + + if (!cache->id) { + if (!(cache->id = strdup(""))) { + ret = -2; + goto done; + } + } + if (!cache->name) { + if (!(cache->name = strdup(""))) { + ret = -3; + goto done; + } + } + if (!cache->status) { + if (!(cache->status = strdup(""))) { + ret = -4; + goto done; + } + } + if (!cache->op_mode) { + if (!(cache->op_mode = strdup("Unknown"))) { + ret = -5; + goto done; + } + } + if (!cache->type) { + if (!(cache->type = strdup("Unknown"))) { + ret = -6; + goto done; + } + } + if (!cache->associativity) { + if (!(cache->associativity = strdup("Unknown"))) { + ret = -7; + goto done; + } + } + + ret = 0; + +done: + if (ret != 0) { + warn("Failed to allocate memory."); + } + + return ret; +} + +short dmi_get_cpu_caches(DmiCpuCache **caches, unsigned *caches_nb) +{ + short ret = -1; + int curr_cache = -1; + unsigned i, buffer_size = 0; + char **buffer = NULL, *buf; + + *caches_nb = 0; + + /* get dmidecode output */ + if (run_command("dmidecode -t 7", &buffer, &buffer_size) != 0) { + ret = -2; + goto done; + } + + /* count caches */ + for (i = 0; i < buffer_size; i++) { + if (strncmp(buffer[i], "Handle 0x", 9) == 0) { + (*caches_nb)++; + } + } + + /* if no cache was found */ + if (*caches_nb < 1) { + warn("Dmidecode didn't recognize any processor cache memory."); + ret = -3; + goto done; + } + + /* allocate memory for caches */ + *caches = (DmiCpuCache *)calloc(*caches_nb, sizeof(DmiCpuCache)); + if (!(*caches)) { + warn("Failed to allocate memory."); + ret = -4; + goto done; + } + + /* parse information about cache */ + for (i = 0; i < buffer_size; i++) { + if (strncmp(buffer[i], "Handle 0x", 9) == 0) { + curr_cache++; + init_dmi_cpu_cache_struct(&(*caches)[curr_cache]); + + /* Cache ID is it's handle */ + char *id_start = buffer[i] + 7; + char *id_end = strchr(buffer[i], ','); + if (!id_end) { + warn("Unrecognized output from dmidecode program."); + ret = -5; + goto done; + } + (*caches)[curr_cache].id = strndup(id_start, id_end - id_start); + if (!(*caches)[curr_cache].id) { + warn("Failed to allocate memory."); + ret = -6; + goto done; + } + + continue; + } + /* ignore first useless lines */ + if (curr_cache < 0) { + continue; + } + /* Cache Name */ + buf = copy_string_part_after_delim(buffer[i], "Socket Designation: "); + if (buf) { + (*caches)[curr_cache].name = buf; + buf = NULL; + continue; + } + /* Cache Status and Level */ + buf = copy_string_part_after_delim(buffer[i], "Configuration: "); + if (buf) { + char **confs = NULL; + unsigned confs_len = 0; + if (explode(buf, ",", &confs, &confs_len) != 0 || + confs_len < 3) { + free_2d_buffer(&confs, &confs_len); + free(buf); + buf = NULL; + continue; + } + + (*caches)[curr_cache].status = trim(confs[0], NULL); + sscanf(confs[2], "%*s %u", &(*caches)[curr_cache].level); + + free_2d_buffer(&confs, &confs_len); + free(buf); + buf = NULL; + continue; + } + /* Cache Operational Mode */ + buf = copy_string_part_after_delim(buffer[i], "Operational Mode: "); + if (buf) { + (*caches)[curr_cache].op_mode = buf; + buf = NULL; + continue; + } + /* Cache Size */ + buf = copy_string_part_after_delim(buffer[i], "Installed Size: "); + if (buf) { + sscanf(buf, "%u", &(*caches)[curr_cache].size); + (*caches)[curr_cache].size *= 1024; /* It's in kB, we want B */ + free(buf); + buf = NULL; + continue; + } + /* Cache Type */ + buf = copy_string_part_after_delim(buffer[i], "System Type: "); + if (buf) { + (*caches)[curr_cache].type = buf; + buf = NULL; + continue; + } + /* Cache Associativity */ + buf = copy_string_part_after_delim(buffer[i], "Associativity: "); + if (buf) { + (*caches)[curr_cache].associativity = buf; + buf = NULL; + continue; + } + } + + /* fill in default attributes if needed */ + for (i = 0; i < *caches_nb; i++) { + if (check_dmi_cpu_cache_attributes(&(*caches)[i]) != 0) { + ret = -7; + goto done; + } + } + + ret = 0; + +done: + free_2d_buffer(&buffer, &buffer_size); + + if (ret != 0) { + dmi_free_cpu_caches(caches, caches_nb); + } + + return ret; +} + +void dmi_free_cpu_caches(DmiCpuCache **caches, unsigned *caches_nb) +{ + unsigned i; + + if (*caches_nb > 0) { + for (i = 0; i < *caches_nb; i++) { + if ((*caches)[i].id) { + free((*caches)[i].id); + } + (*caches)[i].id = NULL; + if ((*caches)[i].name) { + free((*caches)[i].name); + } + (*caches)[i].name = NULL; + if ((*caches)[i].status) { + free((*caches)[i].status); + } + (*caches)[i].status = NULL; + if ((*caches)[i].op_mode) { + free((*caches)[i].op_mode); + } + (*caches)[i].op_mode = NULL; + if ((*caches)[i].type) { + free((*caches)[i].type); + } + (*caches)[i].type = NULL; + if ((*caches)[i].associativity) { + free((*caches)[i].associativity); + } + (*caches)[i].associativity = NULL; + } + free (*caches); + } + + *caches_nb = 0; + *caches = NULL; +} diff --git a/src/hardware/dmidecode.h b/src/hardware/dmidecode.h index 9a8c6c8..1b4eca8 100644 --- a/src/hardware/dmidecode.h +++ b/src/hardware/dmidecode.h @@ -45,8 +45,23 @@ typedef struct _DmiProcessor { char *upgrade; /* CPU upgrade method - socket */ unsigned charact_nb; /* Number of CPU Characteristics */ char **characteristics; /* CPU Characteristics */ + char *l1_cache_handle; /* Level 1 Cache Handle */ + char *l2_cache_handle; /* Level 2 Cache Handle */ + char *l3_cache_handle; /* Level 3 Cache Handle */ } DmiProcessor; +/* Processor cache from dmidecode. */ +typedef struct _DmiCpuCache { + char *id; /* ID */ + unsigned size; /* Cache Size */ + char *name; /* Cache Name */ + char *status; /* Cache Status (Enabled or Disabled) */ + unsigned level; /* Cache Level */ + char *op_mode; /* Cache Operational Mode (Write Back, ..) */ + char *type; /* Cache Type (Data, Instruction, Unified..) */ + char *associativity; /* Cache Associativity */ +} DmiCpuCache; + /* * Get array of processors according to the dmidecode program. @@ -64,5 +79,21 @@ short dmi_get_processors(DmiProcessor **cpus, unsigned *cpus_nb); */ void dmi_free_processors(DmiProcessor **cpus, unsigned *cpus_nb); +/* + * Get array of processor caches according to the dmidecode program. + * @param caches array of cpu caches, this function will allocate necessary + * memory, but caller is responsible for freeing it + * @param caches_nb number of caches in caches + * @return 0 if success, negative value otherwise + */ +short dmi_get_cpu_caches(DmiCpuCache **caches, unsigned *caches_nb); + +/* + * Free array of cpu cache structures. + * @param caches array of caches + * @param caches_nb number of caches + */ +void dmi_free_cpu_caches(DmiCpuCache **caches, unsigned *caches_nb); + #endif /* DMIDECODE_H_ */ diff --git a/src/hardware/sysfs.c b/src/hardware/sysfs.c new file mode 100644 index 0000000..c2cc1af --- /dev/null +++ b/src/hardware/sysfs.c @@ -0,0 +1,295 @@ +/* + * Copyright (C) 2013 Red Hat, Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Authors: Peter Schiffer <pschiffe@redhat.com> + */ + +#include "sysfs.h" + + +/* + * Read unsigned value from file. + * @param path of file + * @paratm result + * @return 0 if success, negative value otherwise + */ +short path_get_unsigned(const char *path, unsigned *result) +{ + short ret = -1; + unsigned buffer_size = 0; + char **buffer = NULL; + + if (read_file(path, &buffer, &buffer_size) != 0 || buffer_size < 1) { + ret = -2; + goto done; + } + if (sscanf(buffer[0], "%u", result) != 1) { + warn("Failed to parse file: \"%s\"; Error: %s", + path, strerror(errno)); + ret = -3; + goto done; + } + + ret = 0; + +done: + free_2d_buffer(&buffer, &buffer_size); + + if (ret != 0) { + *result = 0; + } + + return ret; +} + +/* + * Read string value from file. + * @param path of file + * @paratm result + * @return 0 if success, negative value otherwise + */ +short path_get_string(const char *path, char **result) +{ + short ret = -1; + unsigned buffer_size = 0; + char **buffer = NULL; + + if (read_file(path, &buffer, &buffer_size) != 0 || buffer_size < 1) { + ret = -2; + goto done; + } + *result = trim(buffer[0], NULL); + if (!(*result)) { + warn("Failed to parse file: \"%s\"", path); + ret = -3; + goto done; + } + + ret = 0; + +done: + free_2d_buffer(&buffer, &buffer_size); + + if (ret != 0) { + *result = NULL; + } + + return ret; +} + +/* + * Initialize SysfsCpuCache attributes. + * @param cache + */ +void init_sysfs_cpu_cache_struct(SysfsCpuCache *cache) +{ + cache->id = NULL; + cache->size = 0; + cache->name = NULL; + cache->level = 0; + cache->type = NULL; + cache->ways_of_assoc = 0; + cache->line_size = 0; +} + +/* + * Check attributes of cache structure and fill in defaults if needed. + * @param cache + * @return 0 if success, negative value otherwise + */ +short check_sysfs_cpu_cache_attributes(SysfsCpuCache *cache) +{ + short ret = -1; + + if (!cache->id) { + if (!(cache->id = strdup(""))) { + ret = -2; + goto done; + } + } + if (!cache->name) { + if (!(cache->name = strdup(""))) { + ret = -3; + goto done; + } + } + if (!cache->type) { + if (!(cache->type = strdup("Unknown"))) { + ret = -4; + goto done; + } + } + + ret = 0; + +done: + if (ret != 0) { + warn("Failed to allocate memory."); + } + + return ret; +} + +short sysfs_get_cpu_caches(SysfsCpuCache **caches, unsigned *caches_nb) +{ + short ret = -1; + unsigned i, level; + char *buf, *format_str, path[PATH_MAX]; + DIR *dir; + + *caches_nb = 0; + + /* count caches */ + char *cache_dir = SYSFS_CPU_PATH "/cpu0/cache"; + dir = opendir(cache_dir); + if (!dir) { + warn("Failed to read directory: \"%s\"; Error: %s", + cache_dir, strerror(errno)); + ret = -2; + goto done; + } + while (readdir(dir)) { + (*caches_nb)++; + } + closedir(dir); + + /* do not count . and .. */ + *caches_nb -= 2; + + /* if no cache was found */ + if (*caches_nb < 1) { + warn("No processor cache was found in sysfs."); + ret = -3; + goto done; + } + + /* allocate memory for caches */ + *caches = (SysfsCpuCache *)calloc(*caches_nb, sizeof(SysfsCpuCache)); + if (!(*caches)) { + warn("Failed to allocate memory."); + ret = -4; + goto done; + } + + for (i = 0; i < *caches_nb; i++) { + init_sysfs_cpu_cache_struct(&(*caches)[i]); + + /* cache ID and name */ + /* cache level */ + snprintf(path, PATH_MAX, SYSFS_CPU_PATH "/cpu0/cache/index%u/level", i); + if (path_get_unsigned(path, &level) != 0) { + ret = -5; + goto done; + } + (*caches)[i].level = level; + /* cache type */ + snprintf(path, PATH_MAX, SYSFS_CPU_PATH "/cpu0/cache/index%u/type", i); + if (path_get_string(path, &buf) != 0) { + ret = -6; + goto done; + } + if (strncmp(buf, "Data", 4) == 0) { + format_str = "L%ud-%u"; + } else if (strncmp(buf, "Instruction", 11) == 0) { + format_str = "L%ui-%u"; + } else { + format_str = "L%u-%u"; + } + if (asprintf(&(*caches)[i].id, format_str, level, i) < 0) { + (*caches)[i].id = NULL; + warn("Failed to allocate memory."); + ret = -7; + goto done; + } + if (asprintf(&(*caches)[i].name, "Level %u %s cache", + level, buf) < 0) { + (*caches)[i].name = NULL; + warn("Failed to allocate memory."); + ret = -8; + goto done; + } + (*caches)[i].type = buf; + buf = NULL; + + /* cache size */ + snprintf(path, PATH_MAX, SYSFS_CPU_PATH "/cpu0/cache/index%u/size", i); + if (path_get_unsigned(path, &(*caches)[i].size) != 0) { + (*caches)[i].size = 0; + } + (*caches)[i].size *= 1024; /* It's in kB, we want B */ + + /* ways of associativity */ + snprintf(path, PATH_MAX, + SYSFS_CPU_PATH "/cpu0/cache/index%u/ways_of_associativity", i); + if (path_get_unsigned(path, &(*caches)[i].ways_of_assoc) != 0) { + (*caches)[i].ways_of_assoc = 0; + } + + /* line size */ + snprintf(path, PATH_MAX, + SYSFS_CPU_PATH "/cpu0/cache/index%u/coherency_line_size", i); + if (path_get_unsigned(path, &(*caches)[i].line_size) != 0) { + (*caches)[i].line_size = 0; + } + + /* fill in default attributes if needed */ + if (check_sysfs_cpu_cache_attributes(&(*caches)[i]) != 0) { + ret = -9; + goto done; + } + } + + ret = 0; + +done: + if (buf) { + free(buf); + } + buf = NULL; + + if (ret != 0) { + sysfs_free_cpu_caches(caches, caches_nb); + } + + return ret; +} + +void sysfs_free_cpu_caches(SysfsCpuCache **caches, unsigned *caches_nb) +{ + unsigned i; + + if (*caches_nb > 0) { + for (i = 0; i < *caches_nb; i++) { + if ((*caches)[i].id) { + free((*caches)[i].id); + } + (*caches)[i].id = NULL; + if ((*caches)[i].name) { + free((*caches)[i].name); + } + (*caches)[i].name = NULL; + if ((*caches)[i].type) { + free((*caches)[i].type); + } + (*caches)[i].type = NULL; + } + free (*caches); + } + + *caches_nb = 0; + *caches = NULL; +} diff --git a/src/hardware/sysfs.h b/src/hardware/sysfs.h new file mode 100644 index 0000000..0e0b523 --- /dev/null +++ b/src/hardware/sysfs.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2013 Red Hat, Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Authors: Peter Schiffer <pschiffe@redhat.com> + */ + +#ifndef SYSFS_H_ +#define SYSFS_H_ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <dirent.h> +#include <errno.h> +#include <limits.h> +#include "globals.h" +#include "utils.h" + +#define SYSFS_CPU_PATH "/sys/devices/system/cpu" + +/* Processor cache from sysfs. */ +typedef struct _SysfsCpuCache { + char *id; /* ID */ + unsigned size; /* Cache Size */ + char *name; /* Cache Name */ + unsigned level; /* Cache Level */ + char *type; /* Cache Type (Data, Instruction, Unified..) */ + unsigned ways_of_assoc; /* Number of ways of associativity */ + unsigned line_size; /* Cache Line Size */ +} SysfsCpuCache; + + +/* + * Get array of processor caches from sysfs. + * @param caches array of cpu caches, this function will allocate necessary + * memory, but caller is responsible for freeing it + * @param caches_nb number of caches in caches + * @return 0 if success, negative value otherwise + */ +short sysfs_get_cpu_caches(SysfsCpuCache **caches, unsigned *caches_nb); + +/* + * Free array of cpu cache structures. + * @param caches array of caches + * @param caches_nb number of caches + */ +void sysfs_free_cpu_caches(SysfsCpuCache **caches, unsigned *caches_nb); + + +#endif /* SYSFS_H_ */ |